diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml
index 3d8d61f62..10d7c6de7 100644
--- a/.github/workflows/golangci-lint.yml
+++ b/.github/workflows/golangci-lint.yml
@@ -26,7 +26,7 @@ jobs:
- name: golangci-lint
uses: golangci/golangci-lint-action@v6
with:
- version: v1.59
+ version: v1.59.1
verify:
name: verify
diff --git a/Makefile b/Makefile
index 0aeecbcf2..649be9615 100644
--- a/Makefile
+++ b/Makefile
@@ -79,8 +79,10 @@ GOTESTSUM_PKG := gotest.tools/gotestsum
HADOLINT_VER := v2.10.0
HADOLINT_FAILURE_THRESHOLD = warning
+GOLANGCI_LINT_VER := $(shell cat .github/workflows/golangci-lint.yml | grep [[:space:]]version | sed 's/.*version: //')
GOLANGCI_LINT_BIN := golangci-lint
GOLANGCI_LINT := $(abspath $(OUTPUT_TOOLS_DIR)/$(GOLANGCI_LINT_BIN))
+GOLANGCI_LINT_PKG := github.com/golangci/golangci-lint/cmd/golangci-lint
GORELEASER_VERSION := v2.0.1
GORELEASER_BIN := goreleaser
@@ -175,7 +177,7 @@ generate-manifests-kubekey: $(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RB
.PHONY: generate-modules
generate-modules: ## Run go mod tidy to ensure modules are up to date
- go mod tidy
+ @go mod tidy && go mod vendor
.PHONY: generate-goimports
generate-goimports: ## Format all import, `goimports` is required.
@@ -587,22 +589,32 @@ $(GOLANGCI_LINT_BIN): $(GOLANGCI_LINT) ## Build a local copy of golangci-lint
.PHONY: $(GORELEASER)
$(GORELEASER_BIN): $(GORELEASER) ## Build a local copy of golangci-lint
-$(CONTROLLER_GEN): # Build controller-gen from tools folder.
- CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(CONTROLLER_GEN_PKG) $(CONTROLLER_GEN_BIN) $(CONTROLLER_GEN_VER)
+$(CONTROLLER_GEN): # Build controller-gen into tools folder.
+ @if [ ! -f $(OUTPUT_TOOLS_DIR)/$(CONTROLLER_GEN_BIN) ]; then \
+ CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(CONTROLLER_GEN_PKG) $(CONTROLLER_GEN_BIN) $(CONTROLLER_GEN_VER); \
+ fi
-$(GOTESTSUM): # Build gotestsum from tools folder.
- CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(GOTESTSUM_PKG) $(GOTESTSUM_BIN) $(GOTESTSUM_VER)
+$(GOTESTSUM): # Build gotestsum into tools folder.
+ @if [ ! -f $(OUTPUT_TOOLS_DIR)/$(GOTESTSUM_BIN) ]; then \
+ CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(GOTESTSUM_PKG) $(GOTESTSUM_BIN) $(GOTESTSUM_VER); \
+ fi
-$(KUSTOMIZE): # Build kustomize from tools folder.
- CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(KUSTOMIZE_PKG) $(KUSTOMIZE_BIN) $(KUSTOMIZE_VER)
+$(KUSTOMIZE): # Build kustomize into tools folder.
+ @if [ ! -f $(OUTPUT_TOOLS_DIR)/$(KUSTOMIZE_PKG) ]; then \
+ CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(KUSTOMIZE_PKG) $(KUSTOMIZE_BIN) $(KUSTOMIZE_VER); \
+ fi
-$(SETUP_ENVTEST): # Build setup-envtest from tools folder.
- GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(SETUP_ENVTEST_PKG) $(SETUP_ENVTEST_BIN) $(SETUP_ENVTEST_VER)
+$(SETUP_ENVTEST): # Build setup-envtest into tools folder.
+ if [ ! -f $(OUTPUT_TOOLS_DIR)/$(SETUP_ENVTEST_BIN) ]; then \
+ CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(SETUP_ENVTEST_PKG) $(SETUP_ENVTEST_BIN) $(SETUP_ENVTEST_VER); \
+ fi
-$(GOLANGCI_LINT): .github/workflows/golangci-lint.yml # Download golangci-lint using hack script into tools folder.
- hack/ensure-golangci-lint.sh \
- -b $(OUTPUT_TOOLS_DIR) \
- $(shell cat .github/workflows/golangci-lint.yml | grep [[:space:]]version | sed 's/.*version: //')
+$(GOLANGCI_LINT): # Build golangci-lint into tools folder.
+ @if [ ! -f $(OUTPUT_TOOLS_DIR)/$(GOLANGCI_LINT_BIN) ]; then \
+ CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(GOLANGCI_LINT_PKG) $(GOLANGCI_LINT_BIN) $(GOLANGCI_LINT_VER); \
+ fi
-$(GORELEASER):
- CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(GORELEASER_PKG) $(GORELEASER_BIN) $(GORELEASER_VERSION)
+$(GORELEASER): # Build goreleaser into tools folder.
+ @if [ ! -f $(OUTPUT_TOOLS_DIR)/$(GOLANGCI_LINT_BIN) ]; then \
+ CGO_ENABLED=0 GOBIN=$(OUTPUT_TOOLS_DIR) $(GO_INSTALL) $(GORELEASER_PKG) $(GORELEASER_BIN) $(GORELEASER_VERSION); \
+ fi
diff --git a/builtin/inventory/config.yaml b/builtin/inventory/config.yaml
index f7c3ee58f..f807cbef1 100644
--- a/builtin/inventory/config.yaml
+++ b/builtin/inventory/config.yaml
@@ -7,40 +7,40 @@ spec:
# kkzone: cn
# work_dir is the directory where the artifact is extracted.
# work_dir: /var/lib/kubekey/
- # cni binary
- cni_version: v1.2.0
+ # the version of kubernetes to be installed.
+ # should be greater than or equal to kube_version_min_required.
+ kube_version: v1.23.15
# helm binary
helm_version: v3.14.2
- # docker-compose binary
- dockercompose_version: v2.24.6
- # harbor image tag
- harbor_version: v2.10.1
- # registry image tag
- registry_version: 2.8.3
- # keepalived image tag
- keepalived_version: stable
- # runc binary
- runc_version: v1.1.11
+ # cni binary
+ cni_version: v1.2.0
# calicoctl binary
calico_version: v3.27.2
# etcd binary
etcd_version: v3.5.6
+ # harbor image tag
+# harbor_version: v2.10.1
+ # docker-compose binary
+# dockercompose_version: v2.24.6
+ # registry image tag
+# registry_version: 2.8.3
+ # keepalived image tag
+# keepalived_version: stable
# crictl binary
crictl_version: v1.29.0
+ # docker binary
+ docker_version: 24.0.6
# cilium helm
- cilium_version: 1.15.4
+# cilium_version: 1.15.4
# kubeovn helm
- kubeovn_version: 0.1.0
+# kubeovn_version: 0.1.0
# hybridnet helm
- hybridnet_version: 0.6.8
+# hybridnet_version: 0.6.8
# containerd binary
- containerd_version: v1.7.0
- # docker binary
- docker_version: 24.0.6
+# containerd_version: v1.7.0
+ # runc binary
+# runc_version: v1.1.11
# cridockerd
- cridockerd_version: v0.3.10
- # the version of kubernetes to be installed.
- # should be greater than or equal to kube_version_min_required.
- kube_version: v1.23.15
+# cridockerd_version: v0.3.10
# nfs provisioner helm version
- nfs_provisioner_version: 4.0.18
+# nfs_provisioner_version: 4.0.18
diff --git a/builtin/inventory/inventory.yaml b/builtin/inventory/inventory.yaml
index 58682b679..b7f723366 100644
--- a/builtin/inventory/inventory.yaml
+++ b/builtin/inventory/inventory.yaml
@@ -4,22 +4,13 @@ metadata:
name: default
spec:
hosts: # your can set all nodes here. or set nodes on special groups.
-# localhost: {} localhost is the default host.
# node1:
-# ssh_host: xxxxx
-# ssh_port: 22
-# ssh_user: user
-# ssh_password: password
-# node2:
-# ssh_host: xxxxx
-# ssh_port: 22
-# ssh_user: user
-# ssh_password: password
-# node3:
-# ssh_host: xxxxx
-# ssh_port: 22
-# ssh_user: user
-# ssh_password: password
+# connector:
+# type: ssh
+# host: node1
+# port: 22
+# user: root
+# password: 123456
groups:
# all kubernetes nodes.
k8s_cluster:
@@ -38,10 +29,10 @@ spec:
etcd:
hosts:
- localhost
- image_registry:
- hosts:
- - localhost
+# image_registry:
+# hosts:
+# - localhost
# nfs nodes for registry storage. and kubernetes nfs storage
- nfs:
- hosts:
- - localhost
+# nfs:
+# hosts:
+# - localhost
diff --git a/builtin/playbooks/artifact_export.yaml b/builtin/playbooks/artifact_export.yaml
index b480be0cd..b800287e4 100644
--- a/builtin/playbooks/artifact_export.yaml
+++ b/builtin/playbooks/artifact_export.yaml
@@ -5,8 +5,8 @@
tasks:
- name: Package image
image:
- pull: "{{ image_manifests }}"
- when: image_manifests|length > 0
+ pull: "{{ .image_manifests }}"
+ when: .image_manifests | default list | len | lt 0
- name: Export artifact
command: |
- cd {{ work_dir }} && tar -czvf kubekey-artifact.tar.gz kubekey/
+ cd {{ .work_dir }} && tar -czvf kubekey-artifact.tar.gz kubekey/
diff --git a/builtin/playbooks/certs_renew.yaml b/builtin/playbooks/certs_renew.yaml
index 8469ab377..599d64668 100644
--- a/builtin/playbooks/certs_renew.yaml
+++ b/builtin/playbooks/certs_renew.yaml
@@ -13,7 +13,7 @@
- vars/certs_renew.yaml
roles:
- role: certs/renew-etcd
- when: groups['etcd']|length > 0 && renew_etcd
+ when: and (.groups.etcd | default list | len | lt 0) .renew_etcd
- hosts:
- image_registry
@@ -22,7 +22,7 @@
- vars/certs_renew.yaml
roles:
- role: certs/renew-registry
- when: groups['image_registry']|length > 0 && renew_image_registry
+ when: and (.groups.image_registry | default list | len | lt 0) .renew_image_registry
- hosts:
- kube_control_plane
@@ -31,4 +31,4 @@
tags: ["certs"]
roles:
- role: certs/renew-kubernetes
- when: groups['kube_control_plane']|length > 0 && renew_kubernetes
+ when: and (.groups.kube_control_plane | default list | len | lt 0) .renew_kubernetes
diff --git a/builtin/playbooks/create_cluster.yaml b/builtin/playbooks/create_cluster.yaml
index 2a6473b8d..943e17875 100644
--- a/builtin/playbooks/create_cluster.yaml
+++ b/builtin/playbooks/create_cluster.yaml
@@ -37,7 +37,7 @@
- kube_control_plane
roles:
- role: install/certs
- when: renew_certs.enabled|default_if_none:false
+ when: .renew_certs.enabled
- hosts:
- k8s_cluster|random
diff --git a/builtin/playbooks/hook/post_install.yaml b/builtin/playbooks/hook/post_install.yaml
index e7789c81c..25d37123f 100644
--- a/builtin/playbooks/hook/post_install.yaml
+++ b/builtin/playbooks/hook/post_install.yaml
@@ -6,8 +6,11 @@
- name: Copy post install scripts to remote
ignore_errors: yes
copy:
- src: "{{ work_dir }}/scripts/post_install_{{ inventory_name }}.sh"
- dest: "/etc/kubekey/scripts/post_install_{{ inventory_name }}.sh"
+ src: |
+ {{ .work_dir }}/scripts/post_install_{{ .inventory_name }}.sh
+ dest: |
+ /etc/kubekey/scripts/post_install_{{ .inventory_name }}.sh
+ mode: 0755
- name: Execute post install scripts
command: |
for file in /etc/kubekey/scripts/post_install_*.sh; do
diff --git a/builtin/playbooks/hook/pre_install.yaml b/builtin/playbooks/hook/pre_install.yaml
index b9d951bb6..627f906c5 100644
--- a/builtin/playbooks/hook/pre_install.yaml
+++ b/builtin/playbooks/hook/pre_install.yaml
@@ -6,8 +6,11 @@
- name: Copy pre install scripts to remote
ignore_errors: yes
copy:
- src: "{{ work_dir }}/scripts/pre_install_{{ inventory_name }}.sh"
- dest: "/etc/kubekey/scripts/pre_install_{{ inventory_name }}.sh"
+ src: |
+ {{ .work_dir }}/scripts/pre_install_{{ .inventory_name }}.sh
+ dest: |
+ /etc/kubekey/scripts/pre_install_{{ .inventory_name }}.sh
+ mode: 0755
- name: Execute pre install scripts
command: |
for file in /etc/kubekey/scripts/pre_install_*.sh; do
diff --git a/builtin/playbooks/precheck.yaml b/builtin/playbooks/precheck.yaml
index 677a293e5..2cebd8110 100644
--- a/builtin/playbooks/precheck.yaml
+++ b/builtin/playbooks/precheck.yaml
@@ -3,7 +3,7 @@
- localhost
roles:
- role: precheck/artifact_check
- when: artifact_file | defined
+ when: and .artifact.artifact_file (ne .artifact.artifact_file "")
- hosts:
- k8s_cluster
diff --git a/builtin/playbooks/vars/create_cluster_kubernetes.yaml b/builtin/playbooks/vars/create_cluster_kubernetes.yaml
index 1d4304845..200a34d51 100644
--- a/builtin/playbooks/vars/create_cluster_kubernetes.yaml
+++ b/builtin/playbooks/vars/create_cluster_kubernetes.yaml
@@ -1,7 +1,27 @@
global_registry: ""
-dockerio_registry: "{% if (global_registry != '') %}{{ global_registry }}{% else %}docker.io{% endif %}"
-quayio_registry: "{% if (global_registry != '') %}{{ global_registry }}{% else %}quay.io{% endif %}"
-ghcrio_registry: "{% if (global_registry != '') %}{{ global_registry }}{% else %}ghcr.io{% endif %}"
-k8s_registry: "{% if (global_registry != '') %}{{ global_registry }}{% else %}registry.k8s.io{% endif %}"
+dockerio_registry: |
+ {{- if ne .global_registry "" -}}
+ {{ .global_registry }}
+ {{- else -}}
+ docker.io
+ {{- end -}}
+quayio_registry: |
+ {{- if ne .global_registry "" -}}
+ {{ .global_registry }}
+ {{- else -}}
+ quay.io
+ {{- end -}}
+ghcrio_registry: |
+ {{- if ne .global_registry "" -}}
+ {{ .global_registry }}
+ {{- else -}}
+ ghcr.io
+ {{- end -}}
+k8s_registry: |
+ {{- if ne .global_registry "" -}}
+ {{ .global_registry }}
+ {{- else -}}
+ registry.k8s.io
+ {{- end -}}
security_enhancement: false
diff --git a/builtin/roles/addons/cni/defaults/main.yaml b/builtin/roles/addons/cni/defaults/main.yaml
index b28f48a09..21497f1db 100644
--- a/builtin/roles/addons/cni/defaults/main.yaml
+++ b/builtin/roles/addons/cni/defaults/main.yaml
@@ -1,21 +1,45 @@
cni:
- kube_proxy: "{{ kubernetes.kube_proxy.enabled|default_if_none:true }}"
+ kube_proxy: |
+ {{- .kubernetes.kube_proxy.enabled | default true -}}
# apiVersion for policy may be changed for difference kubernetes version. https://kube-api.ninja
- api_version_policy: "{%if (kube_version|version:'1) %}true{% else %}false{% endif %}"
- kube_pods_v4_cidr: "{{ kubernetes.networking.pod_cidr|default_if_none:'10.233.64.0/18'|split:','|first }}"
- kube_pods_v6_cidr: "{{ kubernetes.networking.pod_cidr|default_if_none:'10.233.64.0/18'|split:','|last }}"
- node_cidr_mask_size: "{{ kubernetes.controller_manager.kube_network_node_prefix|default_if_none:24 }}"
- kube_svc_cidr: "{{ kubernetes.networking.service_cidr|default_if_none:'10.233.0.0/18' }}"
+ ipv6_support: |
+ {{- if gt ( .kubernetes.networking.pod_cidr | default "10.233.64.0/18" | splitList "," | len) 1 -}}
+ true
+ {{- else -}}
+ false
+ {{- end -}}
+ kube_pods_v4_cidr: |
+ {{- .kubernetes.networking.pod_cidr | default "10.233.64.0/18" | splitList "," | first -}}
+ kube_pods_v6_cidr: |
+ {{- if .cni.ipv6_support -}}
+ {{- .kubernetes.networking.pod_cidr | splitList "," | last -}}
+ {{- end -}}
+ node_cidr_mask_size: |
+ {{- .kubernetes.controller_manager.kube_network_node_prefix | default 24 -}}
+ kube_svc_cidr: |
+ {{- .kubernetes.networking.service_cidr | default "10.233.0.0/18" -}}
multus:
enabled: false
- image: "{{ dockerio_registry }}/kubesphere/multus-cni:v3.8"
+ image: |
+ {{ .dockerio_registry }}/kubesphere/multus-cni:v3.8
calico:
# when cluster node > 50. it default true.
- typha: "{%if (groups['k8s_cluster']|length > 50) %}true{% else %}false{% endif %}"
+ typha: |
+ {{- if gt (.groups.k8s_cluster | default list | len) 50 -}}
+ true
+ {{- else -}}
+ false
+ {{- end -}}
veth_mtu: 0
ipip_mode: Always
vxlan_mode: Never
@@ -24,38 +48,65 @@ cni:
# true is enabled
default_ip_pool: true
# image
- cni_image: "{{ dockerio_registry }}/calico/cni:{{ calico_version }}"
- node_image: "{{ dockerio_registry }}/calico/node:{{ calico_version }}"
- kube_controller_image: "{{ dockerio_registry }}/calico/kube-controllers:{{ calico_version }}"
- typha_image: "{{ dockerio_registry }}/calico/typha:{{ calico_version }}"
+ cni_image: |
+ {{ .dockerio_registry }}/calico/cni:{{ .calico_version }}
+ node_image: |
+ {{ .dockerio_registry }}/calico/node:{{ .calico_version }}
+ kube_controller_image: |
+ {{ .dockerio_registry }}/calico/kube-controllers:{{ .calico_version }}
+ typha_image: |
+ {{ .dockerio_registry }}/calico/typha:{{ .calico_version }}
replicas: 1
node_selector: {}
flannel:
# https://github.com/flannel-io/flannel/blob/master/Documentation/backends.md
backend: vxlan
- cni_plugin_image: "{{ dockerio_registry }}/flannel/flannel-cni-plugin:v1.4.0-flannel1"
- flannel_image: "{{ dockerio_registry }}/flannel/flannel:{{ flannel_version }}"
+ cni_plugin_image: |
+ {{ .dockerio_registry }}/flannel/flannel-cni-plugin:v1.4.0-flannel1
+ flannel_image: |
+ {{ .dockerio_registry }}/flannel/flannel:{{ .flannel_version }}
cilium:
# image repo
- cilium_repository: "{{ quayio_registry }}/cilium/cilium"
- certgen_repository: "{{ quayio_registry }}/cilium/certgen"
- hubble_relay_repository: "{{ quayio_registry }}/cilium/hubble-relay"
- hubble_ui_backend_repository: "{{ quayio_registry }}/cilium/hubble-ui-backend"
- hubble_ui_repository: "{{ quayio_registry }}/cilium/hubble-ui"
- cilium_envoy_repository: "{{ quayio_registry }}/cilium/cilium-envoy"
- cilium_etcd_operator_repository: "{{ quayio_registry }}/cilium/cilium-etcd-operator"
- operator_repository: "{{ quayio_registry }}/cilium/operator"
- startup_script_repository: "{{ quayio_registry }}/cilium/startup-script"
- clustermesh_apiserver_repository: "{{ quayio_registry }}/cilium/clustermesh-apiserver"
- busybox_repository: "{{ dockerio_registry }}/library/busybox"
- spire_agent_repository: "{{ ghcrio_registry }}/spiffe/spire-agent"
- spire_server_repository: "{{ ghcrio_registry }}/spiffe/spire-server"
- k8s_endpoint: "{% if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint }}{% else %}{{ groups['kube_control_plane']|first }}{% endif %}"
- k8s_port: "{{ kubernetes.apiserver.port|default_if_none:6443 }}"
+ cilium_repository: |
+ {{ .quayio_registry }}/cilium/cilium
+ certgen_repository: |
+ {{ .quayio_registry }}/cilium/certgen
+ hubble_relay_repository: |
+ {{ .quayio_registry }}/cilium/hubble-relay
+ hubble_ui_backend_repository: |
+ {{ .quayio_registry }}/cilium/hubble-ui-backend
+ hubble_ui_repository: |
+ {{ .quayio_registry }}/cilium/hubble-ui
+ cilium_envoy_repository: |
+ {{ .quayio_registry }}/cilium/cilium-envoy
+ cilium_etcd_operator_repository: |
+ {{ .quayio_registry }}/cilium/cilium-etcd-operator
+ operator_repository: |
+ {{ .quayio_registry }}/cilium/operator
+ startup_script_repository: |
+ {{ .quayio_registry }}/cilium/startup-script
+ clustermesh_apiserver_repository: |
+ {{ .quayio_registry }}/cilium/clustermesh-apiserver
+ busybox_repository: |
+ {{ .dockerio_registry }}/library/busybox
+ spire_agent_repository: |
+ {{ .ghcrio_registry }}/spiffe/spire-agent
+ spire_server_repository: |
+ {{ .ghcrio_registry }}/spiffe/spire-server
+ k8s_endpoint: |
+ {{- if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") -}}
+ {{- .kubernetes.control_plane_endpoint -}}
+ {{- else -}}
+ {{- .groups.kube_control_plane | default list | first -}}
+ {{- end -}}
+ k8s_port: |
+ {{- .kubernetes.apiserver.port | default 6443 -}}
kubeovn:
replica: 1
- registry: "{{ dockerio_registry }}/kubeovn"
+ registry: |
+ {{ .dockerio_registry }}/kubeovn
hybridnet:
- registry: "{{ dockerio_registry }}"
+ registry: |
+ {{- .dockerio_registry -}}
# hybridnet_image: hybridnetdev/hybridnet
# hybridnet_tag: v0.8.8
diff --git a/builtin/roles/addons/cni/tasks/calico.yaml b/builtin/roles/addons/cni/tasks/calico.yaml
index 12e4e59f2..2b0826915 100644
--- a/builtin/roles/addons/cni/tasks/calico.yaml
+++ b/builtin/roles/addons/cni/tasks/calico.yaml
@@ -1,9 +1,11 @@
---
- name: Generate calico manifest
template:
- src: "calico/{{ calico_version|split:'.'|slice:':2'|join:'.' }}.yaml"
- dest: "/etc/kubernetes/cni/calico-{{ calico_version }}.yaml"
+ src: |
+ calico/{{ slice (.calico_version | splitList ".") 0 2 | join "." }}.yaml
+ dest: |
+ /etc/kubernetes/cni/calico-{{ .calico_version }}.yaml
- name: Apply calico
command: |
- /usr/local/bin/kubectl apply -f /etc/kubernetes/cni/calico-{{ calico_version }}.yaml --force
+ kubectl apply -f /etc/kubernetes/cni/calico-{{ .calico_version }}.yaml --force
diff --git a/builtin/roles/addons/cni/tasks/cilium.yaml b/builtin/roles/addons/cni/tasks/cilium.yaml
index 5440ee5db..dd0ce3e0a 100644
--- a/builtin/roles/addons/cni/tasks/cilium.yaml
+++ b/builtin/roles/addons/cni/tasks/cilium.yaml
@@ -1,33 +1,35 @@
---
- name: Sync cilium helm chart to remote
copy:
- src: "{{ work_dir }}/kubekey/cni/cilium-{{ cilium_version }}.tgz"
- dest: "/etc/kubernetes/cni/cilium-{{ cilium_version }}.tgz"
+ src: |
+ {{ .work_dir }}/kubekey/cni/cilium-{{ .cilium_version }}.tgz
+ dest: |
+ /etc/kubernetes/cni/cilium-{{ .cilium_version }}.tgz
# https://docs.cilium.io/en/stable/installation/k8s-install-helm/
- name: Install cilium
command: |
- helm install cilium /etc/kubernetes/cni/cilium-{{ cilium_version }}.tgz --namespace kube-system \
- --set image.repository={{ cilium_repository }} \
- --set preflight.image.repository={{ cilium_repository }} \
- --set certgen.image.repository={{ certgen_repository }} \
- --set hubble.relay.image.repository={{ hubble_relay_repository }} \
- --set hubble.ui.backend.image.repository={{ hubble_ui_backend_repository }} \
- --set hubble.ui.frontend.image.repository={{ hubble_ui_repository }} \
- --set envoy.image.repository={{ cilium_envoy_repository }} \
- --set etcd.image.repository={{ cilium_etcd_operator_repository }} \
- --set operator.image.repository={{ operator_repository }} \
- --set nodeinit.image.repository={{ startup_script_repository }} \
- --set clustermesh.apiserver.image.repository={{ clustermesh_apiserver_repository }} \
- --set authentication.mutual.spire.install.initImage.image.repository={{ busybox_repository }} \
- --set authentication.mutual.spire.install.agent.image.repository={{ spire_agent_repository }} \
- --set authentication.mutual.spire.install.server.image.repository={{ spire_server_repository }} \
- --set operator.replicas={{ cni.cilium.operator_replicas }} \
- --set ipv6.enabled={{ cni.ipv6_support }} \
- --set ipv4NativeRoutingCIDR: {{ cni.kube_pods_v4_cidr }} \
- {% if (cni.ipv6_support) %}
- --set ipv6NativeRoutingCIDR: {{ cni.kube_pods_v6_cidr }} \
- {% endif %}
- {% if (cni.kube_proxy) %}
- --set kubeProxyReplacement=strict --set k8sServiceHost={{ cni.cilium.k8s_endpoint }} --set k8sServicePort={{ cni.cilium.k8s_port }}
- {% endif %}
+ helm install cilium /etc/kubernetes/cni/cilium-{{ .cilium_version }}.tgz --namespace kube-system \
+ --set image.repository={{ .cni.cilium.cilium_repository }} \
+ --set preflight.image.repository={{ .cni.cilium.cilium_repository }} \
+ --set certgen.image.repository={{ .cni.cilium.certgen_repository }} \
+ --set hubble.relay.image.repository={{ .cni.cilium.hubble_relay_repository }} \
+ --set hubble.ui.backend.image.repository={{ .cni.cilium.hubble_ui_backend_repository }} \
+ --set hubble.ui.frontend.image.repository={{ .cni.cilium.hubble_ui_repository }} \
+ --set envoy.image.repository={{ .cni.cilium.cilium_envoy_repository }} \
+ --set etcd.image.repository={{ .cni.cilium.cilium_etcd_operator_repository }} \
+ --set operator.image.repository={{ .cni.cilium.operator_repository }} \
+ --set nodeinit.image.repository={{ .cni.cilium.startup_script_repository }} \
+ --set clustermesh.apiserver.image.repository={{ .cni.cilium.clustermesh_apiserver_repository }} \
+ --set authentication.mutual.spire.install.initImage.image.repository={{ .cni.cilium.busybox_repository }} \
+ --set authentication.mutual.spire.install.agent.image.repository={{ .cni.cilium.spire_agent_repository }} \
+ --set authentication.mutual.spire.install.server.image.repository={{ .cni.cilium.spire_server_repository }} \
+ --set operator.replicas={{ .cni.cilium.operator_replicas }} \
+ --set ipv6.enabled={{ .cni.ipv6_support }} \
+ --set ipv4NativeRoutingCIDR: {{ .cni.kube_pods_v4_cidr }} \
+ {{- if .cni.ipv6_support -}}
+ --set ipv6NativeRoutingCIDR: {{ .cni.kube_pods_v6_cidr }} \
+ {{- end -}}
+ {{- if .cni.kube_proxy -}}
+ --set kubeProxyReplacement=strict --set k8sServiceHost={{ .cni.cilium.k8s_endpoint }} --set k8sServicePort={{ .cni.cilium.k8s_port }}
+ {{- end -}}
diff --git a/builtin/roles/addons/cni/tasks/flannel.yaml b/builtin/roles/addons/cni/tasks/flannel.yaml
index 7832f4e25..e51b180ff 100644
--- a/builtin/roles/addons/cni/tasks/flannel.yaml
+++ b/builtin/roles/addons/cni/tasks/flannel.yaml
@@ -2,9 +2,10 @@
# https://github.com/flannel-io/flannel/blob/master/Documentation/kubernetes.md
- name: Generate flannel manifest
template:
- src: "flannel/flannel.yaml"
- dest: "/etc/kubernetes/cni/flannel-{{ flannel_version }}.yaml"
+ src: flannel/flannel.yaml
+ dest: |
+ /etc/kubernetes/cni/flannel-{{ .flannel_version }}.yaml
- name: Apply calico
command: |
- /usr/local/bin/kubectl apply -f /etc/kubernetes/cni/flannel-{{ flannel_version }}.yaml
+ kubectl apply -f /etc/kubernetes/cni/flannel-{{ .flannel_version }}.yaml
diff --git a/builtin/roles/addons/cni/tasks/hybridnet.yaml b/builtin/roles/addons/cni/tasks/hybridnet.yaml
index 9cdd95fdc..9eba898dc 100644
--- a/builtin/roles/addons/cni/tasks/hybridnet.yaml
+++ b/builtin/roles/addons/cni/tasks/hybridnet.yaml
@@ -1,17 +1,19 @@
---
- name: Sync hybridnet helm chart to remote
copy:
- src: "{{ work_dir }}/kubekey/cni/hybridnet-{{ hybridnet_version }}.tgz"
- dest: "/etc/kubernetes/cni/hybridnet-{{ hybridnet_version }}.tgz"
+ src: |
+ {{ .work_dir }}/kubekey/cni/hybridnet-{{ .hybridnet_version }}.tgz
+ dest: |
+ /etc/kubernetes/cni/hybridnet-{{ .hybridnet_version }}.tgz
# https://artifacthub.io/packages/helm/hybridnet/hybridnet
- name: Install hybridnet
command: |
- helm install hybridnet /etc/kubernetes/cni/hybridnet-{{ hybridnet_version }}.tgz --namespace kube-system \
- {% if (cni.hybridnet.hybridnet_image|defined && cni.hybridnet.hybridnet_image != '') %}
- --set images.hybridnet.image={{ cni.hybridnet.hybridnet_image }} \
- {% endif %}
- {% if (cni.hybridnet.hybridnet_tag|defined && cni.hybridnet.hybridnet_tag != '') %}
- --set images.hybridnet.tag={{ cni.hybridnet.hybridnet_tag }} \
- {% endif %}
- --set image.registryURL={{ cni.hybridnet.registry }} \
+ helm install hybridnet /etc/kubernetes/cni/hybridnet-{{ .hybridnet_version }}.tgz --namespace kube-system \
+ {{- if ne .cni.hybridnet.hybridnet_image "" -}}
+ --set images.hybridnet.image={{ .cni.hybridnet.hybridnet_image }} \
+ {{- end -}}
+ {{- if ne .cni.hybridnet.hybridnet_tag "" -}}
+ --set images.hybridnet.tag={{ .cni.hybridnet.hybridnet_tag }} \
+ {{- end -}}
+ --set image.registryURL={{ .cni.hybridnet.registry }} \
diff --git a/builtin/roles/addons/cni/tasks/kubeovn.yaml b/builtin/roles/addons/cni/tasks/kubeovn.yaml
index 8e81955ab..deac54ca9 100644
--- a/builtin/roles/addons/cni/tasks/kubeovn.yaml
+++ b/builtin/roles/addons/cni/tasks/kubeovn.yaml
@@ -7,18 +7,24 @@
# kubeovn-0.1.0.tgz is helm version not helm appVersion
- name: Sync kubeovn helm chart to remote
copy:
- src: "{{ work_dir }}/kubekey/cni/kubeovn-{{ kubeovn_version }}.tgz"
- dest: "/etc/kubernetes/cni/kubeovn-{{ kubeovn_version }}.tgz"
+ src: |
+ {{ .work_dir }}/kubekey/cni/kubeovn-{{ .kubeovn_version }}.tgz
+ dest: |
+ /etc/kubernetes/cni/kubeovn-{{ .kubeovn_version }}.tgz
# https://kubeovn.github.io/docs/stable/start/one-step-install/#helm-chart
- name: Install kubeovn
command: |
- helm install kubeovn /etc/kubernetes/cni/kubeovn-{{ kubeovn_version }}.tgz --set replicaCount={{ cni.kubeovn.replica }} \
- --set MASTER_NODES={% for h in groups['kube_control_plane'] %}{% set hv=inventory_hosts[h] %}"{{ hv.internal_ipv4 }}"{% if (not forloop.Last) %},{% endif %}{% endfor %} \
- --set global.registry.address={{ cni.kubeovn.registry }} \
- --set ipv4.POD_CIDR={{ cni.kubeovn.kube_pods_v4_cidr }} --set ipv4.SVC_CIDR={{ cni.kubeovn.kube_svc_cidr }} \
- {% if (cni.ipv6_support) %}
+ helm install kubeovn /etc/kubernetes/cni/kubeovn-{{ .kubeovn_version }}.tgz --set replicaCount={{ .cni.kubeovn.replica }} \
+ {{ $ips := list }}
+ {{- range .groups.kube_control_plane | default list -}}
+ {{- $ips = append $ips (index $.inventory_hosts . "internal_ipv4") -}}
+ {{- end -}}
+ --set MASTER_NODES={{ $ips |join "," }} \
+ --set global.registry.address={{ .cni.kubeovn.registry }} \
+ --set ipv4.POD_CIDR={{ .cni.kubeovn.kube_pods_v4_cidr }} --set ipv4.SVC_CIDR={{ .cni.kubeovn.kube_svc_cidr }} \
+ {{- if .cni.ipv6_support -}}
--set networking.NET_STACK=dual_stack \
- --set dual_stack.POD_CIDR={{ cni.kubeovn.kube_pods_v4_cidr }},{{ cni.kubeovn.kube_pods_v6_cidr }} \
- --set dual_stack.SVC_CIDR={{ cni.kubeovn.kube_svc_cidr }} \
- {% endif %}
+ --set dual_stack.POD_CIDR={{ .cni.kubeovn.kube_pods_v4_cidr }},{{ .cni.kubeovn.kube_pods_v6_cidr }} \
+ --set dual_stack.SVC_CIDR={{ .cni.kubeovn.kube_svc_cidr }} \
+ {{- end -}}
diff --git a/builtin/roles/addons/cni/tasks/main.yaml b/builtin/roles/addons/cni/tasks/main.yaml
index 2d7eb84c2..9c3be2782 100644
--- a/builtin/roles/addons/cni/tasks/main.yaml
+++ b/builtin/roles/addons/cni/tasks/main.yaml
@@ -1,18 +1,18 @@
---
- include_tasks: calico.yaml
- when: cni.kube_network_plugin == "calico"
+ when: .cni.kube_network_plugin | eq "calico"
- include_tasks: flannel.yaml
- when: cni.kube_network_plugin == "flannel"
+ when: .cni.kube_network_plugin | eq "flannel"
- include_tasks: cilium.yaml
- when: cni.kube_network_plugin == "cilium"
+ when: .cni.kube_network_plugin | eq "cilium"
- include_tasks: kubeovn.yaml
- when: cni.kube_network_plugin == "kubeovn"
+ when: .cni.kube_network_plugin | eq "kubeovn"
- include_tasks: hybridnet.yaml
- when: cni.kube_network_plugin == "hyvbridnet"
+ when: .cni.kube_network_plugin | eq "hyvbridnet"
- include_tasks: multus.yaml
- when: cni.multus.enabled
+ when: .cni.multus.enabled
diff --git a/builtin/roles/addons/cni/templates/calico/pdg.yaml b/builtin/roles/addons/cni/templates/calico/pdg.yaml
index e2eb7e526..51093eb87 100644
--- a/builtin/roles/addons/cni/templates/calico/pdg.yaml
+++ b/builtin/roles/addons/cni/templates/calico/pdg.yaml
@@ -2,7 +2,7 @@
# Source: calico/templates/calico-kube-controllers.yaml
# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict
-apiVersion: {{ cni.api_version_policy }}
+apiVersion: {{ .cni.api_version_policy }}
kind: PodDisruptionBudget
metadata:
name: calico-kube-controllers
@@ -15,12 +15,12 @@ spec:
matchLabels:
k8s-app: calico-kube-controllers
-{% if (cni.calico.typha) %}
+{{- if .cni.calico.typha }}
---
# Source: calico/templates/calico-typha.yaml
# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict
-apiVersion: {{ cni.api_version_policy }}
+apiVersion: {{ .cni.api_version_policy }}
kind: PodDisruptionBudget
metadata:
name: calico-typha
@@ -32,4 +32,4 @@ spec:
selector:
matchLabels:
k8s-app: calico-typha
-{% endif %}
+{{- end }}
diff --git a/builtin/roles/addons/cni/templates/calico/v3.27.yaml b/builtin/roles/addons/cni/templates/calico/v3.27.yaml
index 999dea797..e6709565e 100644
--- a/builtin/roles/addons/cni/templates/calico/v3.27.yaml
+++ b/builtin/roles/addons/cni/templates/calico/v3.27.yaml
@@ -29,14 +29,14 @@ metadata:
namespace: kube-system
data:
# You must set a non-zero value for Typha replicas below.
- typha_service_name: {% if (cni.calico.typha) %}"calico-typha"{% else %}"none"{% endif %}
+ typha_service_name: "{{ if .cni.calico.typha }}calico-typha{{ else }}none{{ end }}"
# Configure the backend to use.
calico_backend: "bird"
# Configure the MTU to use for workload interfaces and tunnels.
# By default, MTU is auto-detected, and explicitly setting this field should not be required.
# You can override auto-detection by providing a non-zero value.
- veth_mtu: "{{ cni.calico.veth_mtu }}"
+ veth_mtu: "{{ .cni.calico.veth_mtu }}"
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
@@ -4715,7 +4715,7 @@ subjects:
name: calico-cni-plugin
namespace: kube-system
-{% if (cni.calico.typha) %}
+{{- if .cni.calico.typha }}
---
# Source: calico/templates/calico-typha.yaml
# This manifest creates a Service, which will be backed by Calico's Typha daemon.
@@ -4736,8 +4736,7 @@ spec:
name: calico-typha
selector:
k8s-app: calico-typha
-
-{% endif %}
+{{- end }}
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the calico-node container, as well
@@ -4785,7 +4784,7 @@ spec:
# It can be deleted if this is a fresh installation, or if you have already
# upgraded to use calico-ipam.
- name: upgrade-ipam
- image: {{ cni.calico.cni_image }}
+ image: {{ .cni.calico.cni_image }}
imagePullPolicy: IfNotPresent
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
envFrom:
@@ -4813,7 +4812,7 @@ spec:
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
- image: {{ cni.calico.cni_image }}
+ image: {{ .cni.calico.cni_image }}
imagePullPolicy: IfNotPresent
command: ["/opt/cni/bin/install"]
envFrom:
@@ -4856,7 +4855,7 @@ spec:
# i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed
# in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode.
- name: "mount-bpffs"
- image: {{ cni.calico.node_image }}
+ image: {{ .cni.calico.node_image }}
imagePullPolicy: IfNotPresent
command: ["calico-node", "-init", "-best-effort"]
volumeMounts:
@@ -4882,7 +4881,7 @@ spec:
# container programs network policy and routes on each
# host.
- name: calico-node
- image: {{ cni.calico.node_image }}
+ image: {{ .cni.calico.node_image }}
imagePullPolicy: IfNotPresent
envFrom:
- configMapRef:
@@ -4893,14 +4892,14 @@ spec:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
- {% if (cni.calico.typha) %}
+{{- if .cni.calico.typha }}
# Typha support: controlled by the ConfigMap.
- name: FELIX_TYPHAK8SSERVICENAME
valueFrom:
configMapKeyRef:
name: calico-config
key: typha_service_name
- {% endif %}
+{{- end }}
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
@@ -4927,36 +4926,36 @@ spec:
value: "can-reach=$(NODEIP)"
- name: IP
value: "autodetect"
- {% if (cni.ipv6_support) %}
+{{- if .cni.ipv6_support }}
- name: IP6
value: "autodetect"
- {% endif %}
+{{- end }}
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
- value: "{{ cni.calico.ipip_mode }}"
+ value: "{{ .cni.calico.ipip_mode }}"
# Enable or Disable VXLAN on the default IP pool.
- name: CALICO_IPV4POOL_VXLAN
- value: "{{ cni.calico.vxlan_mode }}"
- {% if (cni.calico.ipv4pool_nat_outgoing) %}
+ value: "{{ .cni.calico.vxlan_mode }}"
+{{- if .cni.calico.ipv4pool_nat_outgoing }}
- name: CALICO_IPV4POOL_NAT_OUTGOING
value: "true"
- {% else %}
+{{- else }}
- name: CALICO_IPV4POOL_NAT_OUTGOING
value: "false"
- {% endif %}
- {% if (cni.ipv6_support) %}
+{{- end }}
+{{- if .cni.ipv6_support }}
# Enable or Disable VXLAN on the default IPv6 IP pool.
- name: CALICO_IPV6POOL_VXLAN
value: "Always"
- name: CALICO_IPV6POOL_NAT_OUTGOING
value: "true"
- {% else %}
+{{- else }}
# Enable or Disable VXLAN on the default IPv6 IP pool.
- name: CALICO_IPV6POOL_VXLAN
value: "Never"
- name: CALICO_IPV6POOL_NAT_OUTGOING
value: "false"
- {% endif %}
+{{- end }}
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
@@ -4975,43 +4974,43 @@ spec:
configMapKeyRef:
name: calico-config
key: veth_mtu
- {% if cni.calico.default_ip_pool %}
+{{- if .cni.calico.default_ip_pool }}
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect.
- name: CALICO_IPV4POOL_CIDR
- value: "{{ cni.kube_pods_v4_cidr }}"
+ value: "{{ .cni.kube_pods_v4_cidr }}"
- name: CALICO_IPV4POOL_BLOCK_SIZE
- value: "{{ cni.node_cidr_mask_size }}"
- {% if (cni.ipv6_support) %}
+ value: "{{ .cni.node_cidr_mask_size }}"
+ {{- if .cni.ipv6_support }}
- name: CALICO_IPV6POOL_CIDR
- value: "{{ cni.kube_pods_v6_cidr }}"
+ value: "{{ .cni.kube_pods_v6_cidr }}"
- name: CALICO_IPV6POOL_BLOCK_SIZE
value: "120"
- {% endif %}
- {% else %}
+ {{- end }}
+{{- else }}
- name: NO_DEFAULT_POOLS
value: "true"
- name: CALICO_IPV4POOL_CIDR
value: ""
- {% if (cni.ipv6_support) %}
+ {{- if .cni.ipv6_support }}
- name: CALICO_IPV6POOL_CIDR
value: ""
- {% endif %}
- {% endif %}
+ {{- end }}
+{{- end }}
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Disable IPv6 on Kubernetes.
- {% if (cni.ipv6_support) %}
+{{- if .cni.ipv6_support }}
- name: FELIX_IPV6SUPPORT
value: "true"
- {% else %}
+{{- else }}
- name: FELIX_IPV6SUPPORT
value: "false"
- {% endif %}
+{{- end }}
- name: FELIX_HEALTHENABLED
value: "true"
- name: FELIX_DEVICEROUTESOURCEADDRESS
@@ -5135,7 +5134,7 @@ metadata:
k8s-app: calico-kube-controllers
spec:
# The controllers can only have a single active instance.
- replicas: {{ cni.calico.replicas }}
+ replicas: {{ .cni.calico.replicas }}
selector:
matchLabels:
k8s-app: calico-kube-controllers
@@ -5150,7 +5149,7 @@ spec:
spec:
nodeSelector:
kubernetes.io/os: linux
-{{ cni.calico.node_selector|to_yaml:8|safe }}
+{{ .cni.calico.node_selector|to_yaml:8|safe }}
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
@@ -5175,7 +5174,7 @@ spec:
priorityClassName: system-cluster-critical
containers:
- name: calico-kube-controllers
- image: {{ cni.calico.kube_controller_image }}
+ image: {{ .cni.calico.kube_controller_image }}
imagePullPolicy: IfNotPresent
env:
# Choose which controllers to run.
@@ -5199,7 +5198,7 @@ spec:
- -r
periodSeconds: 10
-{% if (cni.calico.typha) %}
+{{- if .cni.calico.typha }}
---
# Source: calico/templates/calico-typha.yaml
# This manifest creates a Deployment of Typha to back the above service.
@@ -5218,7 +5217,7 @@ spec:
# We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential
# (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In
# production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade.
- replicas: {{ cni.calico.replicas }}
+ replicas: {{ .cni.calico.replicas }}
revisionHistoryLimit: 2
selector:
matchLabels:
@@ -5245,7 +5244,7 @@ spec:
spec:
nodeSelector:
kubernetes.io/os: linux
-{{ cni.calico.node_selector|to_yaml:8|safe }}
+{{ .cni.calico.node_selector|to_yaml:8|safe }}
hostNetwork: true
# Typha supports graceful shut down, disconnecting clients slowly during the grace period.
# The TYPHA_SHUTDOWNTIMEOUTSECS env var should be kept in sync with this value.
@@ -5279,7 +5278,7 @@ spec:
securityContext:
fsGroup: 65534
containers:
- - image: {{ cni.calico.typha_image }}
+ - image: {{ .cni.calico.typha_image }}
imagePullPolicy: IfNotPresent
name: calico-typha
ports:
@@ -5336,4 +5335,4 @@ spec:
host: localhost
periodSeconds: 10
timeoutSeconds: 10
-{% endif %}
+{{- end }}
diff --git a/builtin/roles/addons/cni/templates/flannel/flannel.yaml b/builtin/roles/addons/cni/templates/flannel/flannel.yaml
index eb3127ece..814be8428 100644
--- a/builtin/roles/addons/cni/templates/flannel/flannel.yaml
+++ b/builtin/roles/addons/cni/templates/flannel/flannel.yaml
@@ -90,14 +90,14 @@ data:
}
net-conf.json: |
{
- "Network": "{{ cni.kube_pods_v4_cidr }}",
-{% if (cni.ipv6_support) %}
+ "Network": "{{ .cni.kube_pods_v4_cidr }}",
+ {{- if .cni.ipv6_support }}
"EnableIPv6": true,
- "IPv6Network":"{{ cni.kube_pods_v6_cidr }}",
-{% endif %}
- "EnableNFTables": {{ cni.kube_proxy }},
+ "IPv6Network":"{{ .cni.kube_pods_v6_cidr }}",
+ {{- end }}
+ "EnableNFTables": {{ .cni.kube_proxy }},
"Backend": {
- "Type": "{{ cni.flannel.backend }}"
+ "Type": "{{ .cni.flannel.backend }}"
}
}
---
@@ -137,7 +137,7 @@ spec:
serviceAccountName: flannel
initContainers:
- name: install-cni-plugin
- image: {{ cni.flannel.cni_plugin_image }}
+ image: {{ .cni.flannel.cni_plugin_image }}
command:
- cp
args:
@@ -148,7 +148,7 @@ spec:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
- image: {{ cni.flannel.flannel_image }}
+ image: {{ .cni.flannel.flannel_image }}
command:
- cp
args:
@@ -162,7 +162,7 @@ spec:
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
- image: {{ cni.flannel.flannel_image }}
+ image: {{ .cni.flannel.flannel_image }}
command:
- /opt/bin/flanneld
args:
diff --git a/builtin/roles/addons/cni/templates/multus/multus.yaml b/builtin/roles/addons/cni/templates/multus/multus.yaml
index 34175f5d5..913487341 100644
--- a/builtin/roles/addons/cni/templates/multus/multus.yaml
+++ b/builtin/roles/addons/cni/templates/multus/multus.yaml
@@ -169,7 +169,7 @@ spec:
serviceAccountName: multus
containers:
- name: kube-multus
- image: {{ cni.multus.image }}
+ image: {{ .cni.multus.image }}
command: ["/entrypoint.sh"]
args:
- "--multus-conf-file=auto"
diff --git a/builtin/roles/addons/kata/defaults/main.yaml b/builtin/roles/addons/kata/defaults/main.yaml
index 7fd7bdbc1..5b158dd42 100644
--- a/builtin/roles/addons/kata/defaults/main.yaml
+++ b/builtin/roles/addons/kata/defaults/main.yaml
@@ -1,3 +1,4 @@
kata:
enabled: false
- image: kubesphere/kata-deploy:stable
+ image: |
+ {{ .dockerio_registry }}/kubesphere/kata-deploy:stable
diff --git a/builtin/roles/addons/kata/tasks/main.yaml b/builtin/roles/addons/kata/tasks/main.yaml
index f5e662d4b..fa6690068 100644
--- a/builtin/roles/addons/kata/tasks/main.yaml
+++ b/builtin/roles/addons/kata/tasks/main.yaml
@@ -1,11 +1,11 @@
---
- name: Generate kata deploy file
template:
- src: "kata-deploy.yaml"
- dest: "/etc/kubernetes/addons/kata-deploy.yaml"
- when: kata.enabled
+ src: kata-deploy.yaml
+ dest: /etc/kubernetes/addons/kata-deploy.yaml
+ when: .kata.enabled
- name: Deploy kata
command: |
kubectl apply -f /etc/kubernetes/addons/kata-deploy.yaml
- when: kata.enabled
+ when: .kata.enabled
diff --git a/builtin/roles/addons/kata/templates/kata-deploy.yaml b/builtin/roles/addons/kata/templates/kata-deploy.yaml
index 08bbde33d..f2afd42cd 100644
--- a/builtin/roles/addons/kata/templates/kata-deploy.yaml
+++ b/builtin/roles/addons/kata/templates/kata-deploy.yaml
@@ -44,7 +44,7 @@ spec:
serviceAccountName: kata-label-node
containers:
- name: kube-kata
- image: {{ kata.image }}
+ image: {{ .kata.image }}
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
diff --git a/builtin/roles/addons/nfd/defaults/main.yaml b/builtin/roles/addons/nfd/defaults/main.yaml
index 6dcf469dd..aebb39547 100644
--- a/builtin/roles/addons/nfd/defaults/main.yaml
+++ b/builtin/roles/addons/nfd/defaults/main.yaml
@@ -1,3 +1,4 @@
nfd:
enabled: false
- image: kubesphere/node-feature-discovery:v0.10.0
+ image: |
+ {{ .dockerio_registry }}/kubesphere/node-feature-discovery:v0.10.0
diff --git a/builtin/roles/addons/nfd/tasks/main.yaml b/builtin/roles/addons/nfd/tasks/main.yaml
index 6b6a7a400..d472d9899 100644
--- a/builtin/roles/addons/nfd/tasks/main.yaml
+++ b/builtin/roles/addons/nfd/tasks/main.yaml
@@ -1,11 +1,11 @@
---
- name: Generate nfd deploy file
template:
- src: "nfd-deploy.yaml"
- dest: "/etc/kubernetes/addons/nfd-deploy.yaml"
- when: nfd.enabled
+ src: nfd-deploy.yaml
+ dest: /etc/kubernetes/addons/nfd-deploy.yaml
+ when: .nfd.enabled
- name: Deploy nfd
command: |
kubectl apply -f /etc/kubernetes/addons/nfd-deploy.yaml
- when: nfd.enabled
+ when: .nfd.enabled
diff --git a/builtin/roles/addons/nfd/templates/nfd-deploy.yaml b/builtin/roles/addons/nfd/templates/nfd-deploy.yaml
index f47d20e58..189c28baa 100644
--- a/builtin/roles/addons/nfd/templates/nfd-deploy.yaml
+++ b/builtin/roles/addons/nfd/templates/nfd-deploy.yaml
@@ -9,7 +9,6 @@ kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.7.0
- creationTimestamp: null
name: nodefeaturerules.nfd.k8s-sigs.io
spec:
group: nfd.k8s-sigs.io
@@ -500,7 +499,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- image: {{ nfd.image }}
+ image: {{ .nfd.image }}
imagePullPolicy: IfNotPresent
livenessProbe:
exec:
@@ -564,7 +563,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- image: {{ nfd.image }}
+ image: {{ .nfd.image }}
imagePullPolicy: IfNotPresent
name: nfd-worker
securityContext:
diff --git a/builtin/roles/addons/sc/defaults/main.yaml b/builtin/roles/addons/sc/defaults/main.yaml
index e0a3eaf26..8c9a3eb28 100644
--- a/builtin/roles/addons/sc/defaults/main.yaml
+++ b/builtin/roles/addons/sc/defaults/main.yaml
@@ -2,11 +2,14 @@ sc:
local:
enabled: true
default: true
- provisioner_image: openebs/provisioner-localpv:3.3.0
- linux_utils_image: openebs/linux-utils:3.3.0
+ provisioner_image: |
+ {{ .dockerio_registry }}/openebs/provisioner-localpv:3.3.0
+ linux_utils_image: |
+ {{ .dockerio_registry }}/openebs/linux-utils:3.3.0
path: /var/openebs/local
nfs: # each k8s_cluster node should install nfs-utils
enabled: false
default: false
- server: "{{ groups['nfs']|first }}"
+ server: |
+ {{ groups.nfs | first }}
path: /share/kubernetes
diff --git a/builtin/roles/addons/sc/tasks/local.yaml b/builtin/roles/addons/sc/tasks/local.yaml
index d535322e6..4b09e706d 100644
--- a/builtin/roles/addons/sc/tasks/local.yaml
+++ b/builtin/roles/addons/sc/tasks/local.yaml
@@ -1,9 +1,9 @@
---
- name: Generate local manifest
template:
- src: "local-volume.yaml"
- dest: "/etc/kubernetes/addons/local-volume.yaml"
+ src: local-volume.yaml
+ dest: /etc/kubernetes/addons/local-volume.yaml
- name: deploy local
command: |
- /usr/local/bin/kubectl apply -f /etc/kubernetes/addons/local-volume.yaml
+ kubectl apply -f /etc/kubernetes/addons/local-volume.yaml
diff --git a/builtin/roles/addons/sc/tasks/main.yaml b/builtin/roles/addons/sc/tasks/main.yaml
index 18b3d58eb..59bd771d4 100644
--- a/builtin/roles/addons/sc/tasks/main.yaml
+++ b/builtin/roles/addons/sc/tasks/main.yaml
@@ -1,6 +1,6 @@
---
- include_tasks: local.yaml
- when: sc.local.enabled
+ when: .sc.local.enabled
- include_tasks: nfs.yaml
- when: sc.nfs.enabled
+ when: .sc.nfs.enabled
diff --git a/builtin/roles/addons/sc/tasks/nfs.yaml b/builtin/roles/addons/sc/tasks/nfs.yaml
index a578d2b96..53dfe317e 100644
--- a/builtin/roles/addons/sc/tasks/nfs.yaml
+++ b/builtin/roles/addons/sc/tasks/nfs.yaml
@@ -1,11 +1,13 @@
---
- name: Sync nfs provisioner helm to remote
copy:
- src: "{{ work_dir }}/kubekey/sc/nfs-subdir-external-provisioner-{{ nfs_provisioner_version }}.tgz"
- dest: "/etc/kubernetes/addons/nfs-subdir-external-provisioner-{{ nfs_provisioner_version }}.tgz"
+ src: |
+ {{ .work_dir }}/kubekey/sc/nfs-subdir-external-provisioner-{{ .nfs_provisioner_version }}.tgz
+ dest: |
+ /etc/kubernetes/addons/nfs-subdir-external-provisioner-{{ .nfs_provisioner_version }}.tgz
- name: Deploy nfs provisioner
command: |
- helm install nfs-subdir-external-provisioner /etc/kubernetes/addons/nfs-subdir-external-provisioner-{{ nfs_provisioner_version }}.tgz --namespace kube-system \
- --set nfs.server={{ sc.nfs.server }} --set nfs.path={{ sc.nfs.path }} \
- --set storageClass.defaultClass={% if (sc.local.default) %}true{% else %}false{% endif %}
+ helm upgrade --install nfs-subdir-external-provisioner /etc/kubernetes/addons/nfs-subdir-external-provisioner-{{ .nfs_provisioner_version }}.tgz --namespace kube-system \
+ --set nfs.server={{ .sc.nfs.server }} --set nfs.path={{ .sc.nfs.path }} \
+ --set storageClass.defaultClass={{ if .sc.local.default }}true{{ else }}false{{ end }}
diff --git a/builtin/roles/addons/sc/templates/local-volume.yaml b/builtin/roles/addons/sc/templates/local-volume.yaml
index 53ed3adec..2b12dcb01 100644
--- a/builtin/roles/addons/sc/templates/local-volume.yaml
+++ b/builtin/roles/addons/sc/templates/local-volume.yaml
@@ -6,13 +6,13 @@ metadata:
name: local
annotations:
storageclass.kubesphere.io/supported-access-modes: '["ReadWriteOnce"]'
- storageclass.beta.kubernetes.io/is-default-class: {% if (sc.local.default) %}"true"{% else %}"false"{% endif %}
+ storageclass.beta.kubernetes.io/is-default-class: "{{ if .sc.local.default }}true{{ else }}false{{ end }}"
openebs.io/cas-type: local
cas.openebs.io/config: |
- name: StorageType
value: "hostpath"
- name: BasePath
- value: "{{ sc.local.path }}"
+ value: "{{ .sc.local.path }}"
provisioner: openebs.io/local
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete
@@ -100,7 +100,7 @@ spec:
containers:
- name: openebs-provisioner-hostpath
imagePullPolicy: IfNotPresent
- image: {{ sc.local.provisioner_image }}
+ image: {{ .sc.local.provisioner_image }}
env:
# OPENEBS_IO_K8S_MASTER enables openebs provisioner to connect to K8s
# based on this address. This is ignored if empty.
@@ -131,7 +131,7 @@ spec:
- name: OPENEBS_IO_INSTALLER_TYPE
value: "openebs-operator-lite"
- name: OPENEBS_IO_HELPER_IMAGE
- value: "{{ sc.local.linux_utils_image }}"
+ value: "{{ .sc.local.linux_utils_image }}"
# LEADER_ELECTION_ENABLED is used to enable/disable leader election. By default
# leader election is enabled.
#- name: LEADER_ELECTION_ENABLED
diff --git a/builtin/roles/certs/renew-etcd/tasks/main.yaml b/builtin/roles/certs/renew-etcd/tasks/main.yaml
index bc830f4c7..e7467368c 100644
--- a/builtin/roles/certs/renew-etcd/tasks/main.yaml
+++ b/builtin/roles/certs/renew-etcd/tasks/main.yaml
@@ -2,20 +2,24 @@
- name: Sync ca file to remote
tags: ["certs"]
copy:
- src: "{{ work_dir }}/kubekey/pki/root.crt"
- dest: "/etc/ssl/etcd/ssl/ca.crt"
+ src: |
+ {{ .work_dir }}/kubekey/pki/root.crt
+ dest: /etc/ssl/etcd/ssl/ca.crt
- name: Sync etcd cert file to remote
tags: ["certs"]
copy:
- src: "{{ work_dir }}/kubekey/pki/etcd.crt"
- dest: "/etc/ssl/etcd/ssl/server.crt"
+ src: |
+ {{ .work_dir }}/kubekey/pki/etcd.crt
+ dest: /etc/ssl/etcd/ssl/server.crt
- name: Sync etcd key file to remote
tags: ["certs"]
copy:
- src: "{{ work_dir }}/kubekey/pki/etcd.key"
- dest: "/etc/ssl/etcd/ssl/server.key"
+ src: |
+ {{ .work_dir }}/kubekey/pki/etcd.key
+ dest: |
+ /etc/ssl/etcd/ssl/server.key
- name: Restart etcd service
tags: ["certs"]
diff --git a/builtin/roles/certs/renew-kubernetes/tasks/etcd.yaml b/builtin/roles/certs/renew-kubernetes/tasks/etcd.yaml
index 44c61e83e..4399caf8c 100644
--- a/builtin/roles/certs/renew-kubernetes/tasks/etcd.yaml
+++ b/builtin/roles/certs/renew-kubernetes/tasks/etcd.yaml
@@ -2,15 +2,18 @@
- name: Sync etcd ca file to remote
tags: ["certs"]
copy:
- src: "{{ work_dir }}/kubekey/pki/root.crt"
- dest: "/etc/kubernetes/pki/etcd/ca.crt"
+ src: |
+ {{ .work_dir }}/kubekey/pki/root.crt
+ dest: /etc/kubernetes/pki/etcd/ca.crt
- name: Sync etcd cert files to remote
tags: ["certs"]
copy:
- src: "{{ work_dir }}/kubekey/pki/etcd.crt"
- dest: "/etc/kubernetes/pki/etcd/client.crt"
+ src: |
+ {{ .work_dir }}/kubekey/pki/etcd.crt
+ dest: /etc/kubernetes/pki/etcd/client.crt
- name: Sync etcd key files to remote
tags: ["certs"]
copy:
- src: "{{ work_dir }}/kubekey/pki/etcd.key"
- dest: "/etc/kubernetes/pki/etcd/client.key"
+ src: |
+ {{ .work_dir }}/kubekey/pki/etcd.key
+ dest: /etc/kubernetes/pki/etcd/client.key
diff --git a/builtin/roles/certs/renew-kubernetes/tasks/kube.yaml b/builtin/roles/certs/renew-kubernetes/tasks/kube.yaml
index b55e7cf1a..fa0e3b35c 100644
--- a/builtin/roles/certs/renew-kubernetes/tasks/kube.yaml
+++ b/builtin/roles/certs/renew-kubernetes/tasks/kube.yaml
@@ -9,41 +9,43 @@
tags: ["certs"]
run_once: true
command: |
- {% if (kubeadm_install_version.stdout|version:' 0
- - renew_etcd
+ - and (.kubernetes.etcd.deployment_type | eq "external") (.groups.etcd | default list | len | lt 0)
+ - .renew_etcd
- name: Reload kubernetes pods
tags: [ "certs" ]
command: |
- {% if (cri.container_manager == "docker") %}
+ {{- if .cri.container_manager | eq "docker" -}}
docker ps -af name=k8s_PODS_kube-apiserver* -q | xargs --no-run-if-empty docker rm -f
docker ps -af name=k8s_PODS_kube-controller-manager* -q | xargs --no-run-if-empty docker rm -f
docker ps -af name=k8s_PODS_kube-scheduler* -q | xargs --no-run-if-empty docker rm -f
- {% if (kubernetes.etcd.deployment_type=='internal' && renew_etcd ) %}
+ {{- if and (.kubernetes.etcd.deployment_type | eq "docker") .renew_etcd -}}
docker ps -af name=k8s_PODS_etcd* -q | xargs --no-run-if-empty docker rm -f
- {% endif %}
- {% else %}
+ {{- end -}}
+ {{- else -}}
crictl pods --name kube-apiserver-* -q | xargs -I% --no-run-if-empty bash -c 'crictl stopp % && crictl rmp %'
crictl pods --name kube-controller-manager-* -q | xargs -I% --no-run-if-empty bash -c 'crictl stopp % && crictl rmp %'
crictl pods --name kube-scheduler-* -q | xargs -I% --no-run-if-empty bash -c 'crictl stopp % && crictl rmp %'
- {% if (kubernetes.etcd.deployment_type=='internal' && renew_etcd ) %}
+ {{- if and (.kubernetes.etcd.deployment_type | eq "internal") .renew_etcd -}}
crictl pods --name etcd-* -q | xargs -I% --no-run-if-empty bash -c 'crictl stopp % && crictl rmp %'
- {% endif %}
- {% endif %}
+ {{- end -}}
+ {{- end -}}
diff --git a/builtin/roles/certs/renew-registry/tasks/harbor.yaml b/builtin/roles/certs/renew-registry/tasks/harbor.yaml
index f446e0f00..74e4bb4d5 100644
--- a/builtin/roles/certs/renew-registry/tasks/harbor.yaml
+++ b/builtin/roles/certs/renew-registry/tasks/harbor.yaml
@@ -2,14 +2,18 @@
- name: Sync image registry cert file to remote
tags: ["certs"]
copy:
- src: "{{ work_dir }}/kubekey/pki/image_registry.crt"
- dest: "/opt/harbor/{{ harbor_version }}/ssl/server.crt"
+ src: |
+ {{ .work_dir }}/kubekey/pki/image_registry.crt
+ dest: |
+ /opt/harbor/{{ .harbor_version }}/ssl/server.crt
- name: Sync image registry key file to remote
tags: ["certs"]
copy:
- src: "{{ work_dir }}/kubekey/pki/image_registry.key"
- dest: "/opt/harbor/{{ harbor_version }}/ssl/server.key"
+ src: |
+ {{ .work_dir }}/kubekey/pki/image_registry.key
+ dest: |
+ /opt/harbor/{{ .harbor_version }}/ssl/server.key
- name: Restart harbor service
tags: ["certs"]
diff --git a/builtin/roles/certs/renew-registry/tasks/main.yaml b/builtin/roles/certs/renew-registry/tasks/main.yaml
index 5437ffecb..eb7ef14ee 100644
--- a/builtin/roles/certs/renew-registry/tasks/main.yaml
+++ b/builtin/roles/certs/renew-registry/tasks/main.yaml
@@ -1,6 +1,6 @@
- include_tasks: harbor.yaml
tags: ["certs"]
- when: image_registry.type == 'harbor'
+ when: .image_registry.type | eq "harbor"
- include_tasks: registry.yaml
tags: ["certs"]
- when: image_registry.type == 'registry'
+ when: .image_registry.type | eq "registry"
diff --git a/builtin/roles/certs/renew-registry/tasks/registry.yaml b/builtin/roles/certs/renew-registry/tasks/registry.yaml
index 8333bffac..d7de79ba6 100644
--- a/builtin/roles/certs/renew-registry/tasks/registry.yaml
+++ b/builtin/roles/certs/renew-registry/tasks/registry.yaml
@@ -2,14 +2,18 @@
- name: Sync image registry cert file to remote
tags: ["certs"]
copy:
- src: "{{ work_dir }}/kubekey/pki/image_registry.crt"
- dest: "/opt/registry/{{ registry_version }}/ssl/server.crt"
+ src: |
+ {{ .work_dir }}/kubekey/pki/image_registry.crt
+ dest: |
+ /opt/registry/{{ .registry_version }}/ssl/server.crt
- name: Sync image registry key file to remote
tags: ["certs"]
copy:
- src: "{{ work_dir }}/kubekey/pki/image_registry.key"
- dest: "/opt/registry/{{ registry_version }}/ssl/server.key"
+ src: |
+ {{ .work_dir }}/kubekey/pki/image_registry.key
+ dest: |
+ /opt/registry/{{ .registry_version }}/ssl/server.key
- name: Restart registry service
tags: ["certs"]
diff --git a/builtin/roles/init/init-artifact/defaults/main.yaml b/builtin/roles/init/init-artifact/defaults/main.yaml
index e46249771..705de3c8b 100644
--- a/builtin/roles/init/init-artifact/defaults/main.yaml
+++ b/builtin/roles/init/init-artifact/defaults/main.yaml
@@ -2,96 +2,224 @@ work_dir: /kubekey
artifact:
arch: [ "amd64" ]
# offline artifact package for kk.
-# artifact_file: /tmp/kubekey.tar.gz
+ artifact_file: ""
# the md5_file of artifact_file.
-# artifact_md5: /tmp/artifact.md5
+ artifact_md5: ""
# how to generate cert file.support: IfNotPresent, Always
gen_cert_policy: IfNotPresent
artifact_url:
etcd:
amd64: |
- {% if (kkzone == "cn") %}https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-amd64.tar.gz{% else %}https://github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-amd64.tar.gz{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ .etcd_version }}/etcd-{{ .etcd_version }}-linux-amd64.tar.gz
+ {{- else -}}
+ https://github.com/etcd-io/etcd/releases/download/{{ .etcd_version }}/etcd-{{ .etcd_version }}-linux-amd64.tar.gz
+ {{- end -}}
arm64: |
- {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-arm64.tar.gz{% else %}https://github.com/etcd-io/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-arm64.tar.gz{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ .etcd_version }}/etcd-{{ .etcd_version }}-linux-arm64.tar.gz
+ {{- else -}}
+ https://github.com/etcd-io/etcd/releases/download/{{ .etcd_version }}/etcd-{{ .etcd_version }}-linux-arm64.tar.gz
+ {{- end -}}
kubeadm:
amd64: |
- {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/release/{{ kube_version }}/bin/linux/amd64/kubeadm{% else %}https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64/kubeadm{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://kubernetes-release.pek3b.qingstor.com/release/{{ .kube_version }}/bin/linux/amd64/kubeadm
+ {{- else -}}
+ https://storage.googleapis.com/kubernetes-release/release/{{ .kube_version }}/bin/linux/amd64/kubeadm
+ {{- end -}}
arm64: |
- {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/release/{{ kube_version }}/bin/linux/arm64/kubeadm{% else %}https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/arm64/kubeadm{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://kubernetes-release.pek3b.qingstor.com/release/{{ .kube_version }}/bin/linux/arm64/kubeadm
+ {{- else -}}
+ https://storage.googleapis.com/kubernetes-release/release/{{ .kube_version }}/bin/linux/arm64/kubeadm
+ {{- end -}}
kubelet:
amd64: |
- {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/release/{{ kube_version }}/bin/linux/amd64/kubelet{% else %}https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64/kubelet{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://kubernetes-release.pek3b.qingstor.com/release/{{ .kube_version }}/bin/linux/amd64/kubelet
+ {{- else -}}
+ https://storage.googleapis.com/kubernetes-release/release/{{ .kube_version }}/bin/linux/amd64/kubelet
+ {{- end -}}
arm64: |
- {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/release/{{ kube_version }}/bin/linux/arm64/kubelet{% else %}https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/arm64/kubelet{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://kubernetes-release.pek3b.qingstor.com/release/{{ .kube_version }}/bin/linux/arm64/kubelet
+ {{- else -}}
+ https://storage.googleapis.com/kubernetes-release/release/{{ .kube_version }}/bin/linux/arm64/kubelet
+ {{- end -}}
kubectl:
amd64: |
- {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/release/{{ kube_version }}/bin/linux/amd64/kubectl{% else %}https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64/kubectl{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://kubernetes-release.pek3b.qingstor.com/release/{{ .kube_version }}/bin/linux/amd64/kubectl
+ {{- else -}}
+ https://storage.googleapis.com/kubernetes-release/release/{{ .kube_version }}/bin/linux/amd64/kubectl
+ {{- end -}}
arm64: |
- {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/release/{{ kube_version }}/bin/linux/arm64/kubectl{% else %}https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/arm64/kubectl{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://kubernetes-release.pek3b.qingstor.com/release/{{ .kube_version }}/bin/linux/arm64/kubectl
+ {{- else -}}
+ https://storage.googleapis.com/kubernetes-release/release/{{ .kube_version }}/bin/linux/arm64/kubectl
+ {{- end -}}
cni:
amd64: |
- {% if (kkzone == 'cn') %}https://github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-amd64-{{ cni_version }}.tgz{% else %}https://containernetworking.pek3b.qingstor.com/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-amd64-{{ cni_version }}.tgz{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://github.com/containernetworking/plugins/releases/download/{{ .cni_version }}/cni-plugins-linux-amd64-{{ .cni_version }}.tgz
+ {{- else -}}
+ https://containernetworking.pek3b.qingstor.com/plugins/releases/download/{{ .cni_version }}/cni-plugins-linux-amd64-{{ .cni_version }}.tgz
+ {{- end -}}
arm64: |
- {% if (kkzone == 'cn') %}https://github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-arm64-{{ cni_version }}.tgz{% else %}https://containernetworking.pek3b.qingstor.com/plugins/releases/download/{{ cni_version }}/cni-plugins-linux-arm64-{{ cni_version }}.tgz{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://github.com/containernetworking/plugins/releases/download/{{ .cni_version }}/cni-plugins-linux-arm64-{{ .cni_version }}.tgz
+ {{- else -}}
+ https://containernetworking.pek3b.qingstor.com/plugins/releases/download/{{ .cni_version }}/cni-plugins-linux-arm64-{{ .cni_version }}.tgz
+ {{- end -}}
helm:
amd64: |
- {% if (kkzone == 'cn') %}https://kubernetes-helm.pek3b.qingstor.com/helm-{{ helm_version }}-linux-amd64.tar.gz{% else %}https://get.helm.sh/helm-{{ helm_version }}-linux-amd64.tar.gz{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://kubernetes-helm.pek3b.qingstor.com/helm-{{ .helm_version }}-linux-amd64.tar.gz
+ {{- else -}}
+ https://get.helm.sh/helm-{{ .helm_version }}-linux-amd64.tar.gz
+ {{- end -}}
arm64: |
- {% if (kkzone == 'cn') %}https://kubernetes-helm.pek3b.qingstor.com/helm-{{ helm_version }}-linux-arm64.tar.gz{% else %}https://get.helm.sh/helm-{{ helm_version }}-linux-arm64.tar.gz{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://kubernetes-helm.pek3b.qingstor.com/helm-{{ .helm_version }}-linux-arm64.tar.gz
+ {{- else -}}
+ https://get.helm.sh/helm-{{ .helm_version }}-linux-arm64.tar.gz
+ {{- end -}}
crictl:
amd64: |
- {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-linux-amd64.tar.gz{% else %}https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-linux-amd64.tar.gz{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://kubernetes-release.pek3b.qingstor.com/cri-tools/releases/download/{{ .crictl_version }}/crictl-{{ .crictl_version }}-linux-amd64.tar.gz
+ {{- else -}}
+ https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ .crictl_version }}/crictl-{{ .crictl_version }}-linux-amd64.tar.gz
+ {{- end -}}
arm64: |
- {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-linux-arm64.tar.gz{% else %}https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-linux-arm64.tar.gz{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://kubernetes-release.pek3b.qingstor.com/cri-tools/releases/download/{{ .crictl_version }}/crictl-{{ .crictl_version }}-linux-arm64.tar.gz
+ {{- else -}}
+ https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ .crictl_version }}/crictl-{{ .crictl_version }}-linux-arm64.tar.gz
+ {{- end -}}
docker:
amd64: |
- {% if (kkzone == 'cn') %}https://mirrors.aliyun.com/docker-ce/linux/static/stable/x86_64/docker-{{ docker_version }}.tgz{% else %}https://download.docker.com/linux/static/stable/x86_64/docker-{{ docker_version }}.tgz{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://mirrors.aliyun.com/docker-ce/linux/static/stable/x86_64/docker-{{ .docker_version }}.tgz
+ {{- else -}}
+ https://download.docker.com/linux/static/stable/x86_64/docker-{{ .docker_version }}.tgz
+ {{- end -}}
arm64: |
- {% if (kkzone == 'cn') %}https://mirrors.aliyun.com/docker-ce/linux/static/stable/aarch64/docker-{{ docker_version }}.tgz{% else %}https://download.docker.com/linux/static/stable/aarch64/docker-{{ docker_version }}.tgz{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://mirrors.aliyun.com/docker-ce/linux/static/stable/aarch64/docker-{{ .docker_version }}.tgz
+ {{- else -}}
+ https://download.docker.com/linux/static/stable/aarch64/docker-{{ .docker_version }}.tgz
+ {{- end -}}
cridockerd:
amd64: |
- {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/releases/download/{{ cridockerd_version }}/cri-dockerd-{{ cridockerd_version|slice:'1:' }}.amd64.tgz{% else %}https://github.com/Mirantis/cri-dockerd/releases/download/{{ cridockerd_version }}/cri-dockerd-{{ cridockerd_version|slice:'1:' }}.amd64.tgz{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://kubernetes-release.pek3b.qingstor.com/releases/download/{{ .cridockerd_version }}/cri-dockerd-{{ .cridockerd_version | default "" | trimPrefix "v" }}.amd64.tgz
+ {{- else -}}
+ https://github.com/Mirantis/cri-dockerd/releases/download/{{ .cridockerd_version }}/cri-dockerd-{{ .cridockerd_version | default "" | trimPrefix "v" }}.amd64.tgz
+ {{- end -}}
arm64: |
- {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/releases/download/{{ cridockerd_version }}/cri-dockerd-{{ cridockerd_version|slice:'1:' }}.arm64.tgz{% else %}https://github.com/Mirantis/cri-dockerd/releases/download/{{ cridockerd_version }}/cri-dockerd-{{ cridockerd_version|slice:'1:' }}.arm64.tgz{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://kubernetes-release.pek3b.qingstor.com/releases/download/{{ .cridockerd_version }}/cri-dockerd-{{ .cridockerd_version | default "" | trimPrefix "v" }}.arm64.tgz
+ {{- else -}}
+ https://github.com/Mirantis/cri-dockerd/releases/download/{{ .cridockerd_version }}/cri-dockerd-{{ .cridockerd_version | default "" | trimPrefix "v" }}.arm64.tgz
+ {{- end -}}
containerd:
amd64: |
- {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/containerd/containerd/releases/download/{{ containerd_version }}/containerd-{{ containerd_version|slice:'1:' }}-linux-amd64.tar.gz{% else %}https://github.com/containerd/containerd/releases/download/{{ containerd_version }}/containerd-{{ containerd_version|slice:'1:' }}-linux-amd64.tar.gz{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://kubernetes-release.pek3b.qingstor.com/containerd/containerd/releases/download/{{ .containerd_version }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-amd64.tar.gz
+ {{- else -}}
+ https://github.com/containerd/containerd/releases/download/{{ .containerd_version }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-amd64.tar.gz
+ {{- end -}}
arm64: |
- {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/containerd/containerd/releases/download/{{ containerd_version }}/containerd-{{ containerd_version|slice:'1:' }}-linux-arm64.tar.gz{% else %}https://github.com/containerd/containerd/releases/download/{{ containerd_version }}/containerd-{{ containerd_version|slice:'1:' }}-linux-arm64.tar.gz{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://kubernetes-release.pek3b.qingstor.com/containerd/containerd/releases/download/{{ .containerd_version }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-arm64.tar.gz
+ {{- else -}}
+ https://github.com/containerd/containerd/releases/download/{{ .containerd_version }}/containerd-{{ .containerd_version | default "" | trimPrefix "v" }}-linux-arm64.tar.gz
+ {{- end -}}
runc:
amd64: |
- {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.amd64{% else %}https://github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.amd64{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://kubernetes-release.pek3b.qingstor.com/opencontainers/runc/releases/download/{{ .runc_version }}/runc.amd64
+ {{- else -}}
+ https://github.com/opencontainers/runc/releases/download/{{ .runc_version }}/runc.amd64
+ {{- end -}}
arm64: |
- {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.arm64{% else %}https://github.com/opencontainers/runc/releases/download/{{ runc_version }}/runc.arm64{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://kubernetes-release.pek3b.qingstor.com/opencontainers/runc/releases/download/{{ .runc_version }}/runc.arm64
+ {{- else -}}
+ https://github.com/opencontainers/runc/releases/download/{{ .runc_version }}/runc.arm64
+ {{- end -}}
calicoctl:
amd64: |
- {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/projectcalico/calico/releases/download/{{ calico_version }}/calicoctl-linux-amd64{% else %}https://github.com/projectcalico/calico/releases/download/{{ calico_version }}/calicoctl-linux-amd64{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://kubernetes-release.pek3b.qingstor.com/projectcalico/calico/releases/download/{{ .calico_version }}/calicoctl-linux-amd64
+ {{- else -}}
+ https://github.com/projectcalico/calico/releases/download/{{ .calico_version }}/calicoctl-linux-amd64
+ {{- end -}}
arm64: |
- {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/projectcalico/calico/releases/download/{{ calico_version }}/calicoctl-linux-arm64{% else %}https://github.com/projectcalico/calico/releases/download/{{ calico_version }}/calicoctl-linux-arm64{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://kubernetes-release.pek3b.qingstor.com/projectcalico/calico/releases/download/{{ .calico_version }}/calicoctl-linux-arm64
+ {{- else -}}
+ https://github.com/projectcalico/calico/releases/download/{{ .calico_version }}/calicoctl-linux-arm64
+ {{- end -}}
dockercompose:
amd64: |
- {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/docker/compose/releases/download/{{ dockercompose_version }}/docker-compose-linux-x86_64{% else %}https://github.com/docker/compose/releases/download/{{ dockercompose_version }}/docker-compose-linux-x86_64{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://kubernetes-release.pek3b.qingstor.com/docker/compose/releases/download/{{ .dockercompose_version }}/docker-compose-linux-x86_64
+ {{- else -}}
+ https://github.com/docker/compose/releases/download/{{ .dockercompose_version }}/docker-compose-linux-x86_64
+ {{- end -}}
arm64: |
- {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/docker/compose/releases/download/{{ dockercompose_version }}/docker-compose-linux-aarch64{% else %}https://github.com/docker/compose/releases/download/{{ dockercompose_version }}/docker-compose-linux-aarch64{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://kubernetes-release.pek3b.qingstor.com/docker/compose/releases/download/{{ .dockercompose_version }}/docker-compose-linux-aarch64
+ {{- else -}}
+ https://github.com/docker/compose/releases/download/{{ .dockercompose_version }}/docker-compose-linux-aarch64
+ {{- end -}}
# registry:
# amd64: |
-# {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/registry/{{ registry_version }}/registry-{{ registry_version }}-linux-amd64.tgz{% else %}https://github.com/kubesphere/kubekey/releases/download/v2.0.0-alpha.1/registry-{{ registry_version }}-linux-amd64.tgz{% endif %}
+# {{- if .kkzone | eq "cn" -}}
+# https://kubernetes-release.pek3b.qingstor.com/registry/{{ .registry_version }}/registry-{{ .registry_version }}-linux-amd64.tgz
+# {{- else -}}
+# https://github.com/kubesphere/kubekey/releases/download/{{ .registry_version }}/registry-{{ .registry_version }}-linux-amd64.tgz
+# {{- end -}}
# arm64: |
-# {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/registry/{{ registry_version }}/registry-{{ registry_version }}-linux-arm64.tgz{% else %}https://github.com/kubesphere/kubekey/releases/download/v2.0.0-alpha.1/registry-{{ registry_version }}-linux-arm64.tgz{% endif %}
+# {{- if .kkzone | eq "cn" -}}
+# https://kubernetes-release.pek3b.qingstor.com/registry/{{ .registry_version }}/registry-{{ .registry_version }}-linux-arm64.tgz
+# {{- else -}}
+# https://github.com/kubesphere/kubekey/releases/download/{{ .registry_version }}/registry-{{ .registry_version }}-linux-arm64.tgz
+# {{- end -}}
harbor:
amd64: |
- {% if (kkzone == 'cn') %}https://github.com/goharbor/harbor/releases/download/{{ harbor_version }}/harbor-offline-installer-{{ harbor_version }}.tgz{% else %}https://github.com/goharbor/harbor/releases/download/{{ harbor_version }}/harbor-offline-installer-{{ harbor_version }}.tgz{% endif %}
+ {{- if .kkzone | eq "cn" -}}
+ https://github.com/goharbor/harbor/releases/download/{{ .harbor_version }}/harbor-offline-installer-{{ .harbor_version }}.tgz
+ {{- else -}}
+ https://github.com/goharbor/harbor/releases/download/{{ .harbor_version }}/harbor-offline-installer-{{ .harbor_version }}.tgz
+ {{- end -}}
# arm64: |
-# {% if (kkzone == 'cn') %}https://github.com/goharbor/harbor/releases/download/{{ harbor_version }}/harbor-{{ harbor_version }}-linux-arm64.tgz{% else %}https://github.com/goharbor/harbor/releases/download/{{ harbor_version }}/harbor-{{ harbor_version }}-linux-arm64.tgz{% endif %}
+# {{- if .kkzone | eq "cn" -}}
+# https://github.com/goharbor/harbor/releases/download/{{ .harbor_version }}/harbor-{{ .harbor_version }}-linux-arm64.tgz
+# {{- else -}}
+# https://github.com/goharbor/harbor/releases/download/{{ .harbor_version }}/harbor-{{ .harbor_version }}-linux-arm64.tgz
+# {{- end -}}
# keepalived:
# amd64: |
-# {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/osixia/keepalived/releases/download/{{ keepalived_version }}/keepalived-{{ keepalived_version }}-linux-amd64.tgz{% else %}https://github.com/osixia/keepalived/releases/download/{{ keepalived_version }}/keepalived-{{ keepalived_version }}-linux-amd64.tgz{% endif %}
+# {{- if .kkzone | eq "cn" -}}
+# https://kubernetes-release.pek3b.qingstor.com/osixia/keepalived/releases/download/{{ .keepalived_version }}/keepalived-{{ .keepalived_version }}-linux-amd64.tgz
+# {{- else -}}
+# https://github.com/osixia/keepalived/releases/download/{{ .keepalived_version }}/keepalived-{{ .keepalived_version }}-linux-amd64.tgz
+# {{- end -}}
# arm64: |
-# {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/osixia/keepalived/releases/download/{{ keepalived_version }}/keepalived-{{ keepalived_version }}-linux-arm64.tgz{% else %}https://github.com/osixia/keepalived/releases/download/{{ keepalived_version }}/keepalived-{{ keepalived_version }}-linux-arm64.tgz{% endif %}
- cilium: https://helm.cilium.io/cilium-{{ cilium_version }}.tgz
- kubeovn: https://kubeovn.github.io/kube-ovn/kube-ovn-{{ kubeovn_version }}.tgz
- hybridnet: https://github.com/alibaba/hybridnet/releases/download/helm-chart-{{ hybridnet_version }}/hybridnet-{{ hybridnet_version }}.tgz
- nfs_provisioner: https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner/releases/download/nfs-subdir-external-provisioner-4.0.18/nfs-subdir-external-provisioner-{{ nfs_provisioner_version }}.tgz
+# {{- if .kkzone | eq "cn" -}}
+# https://kubernetes-release.pek3b.qingstor.com/osixia/keepalived/releases/download/{{ .keepalived_version }}/keepalived-{{ .keepalived_version }}-linux-arm64.tgz
+# {{- else -}}
+# https://github.com/osixia/keepalived/releases/download/{{ .keepalived_version }}/keepalived-{{ .keepalived_version }}-linux-arm64.tgz
+# {{- end -}}
+ cilium: https://helm.cilium.io/cilium-{{ .cilium_version }}.tgz
+ kubeovn: https://kubeovn.github.io/kube-ovn/kube-ovn-{{ .kubeovn_version }}.tgz
+ hybridnet: https://github.com/alibaba/hybridnet/releases/download/helm-chart-{{ .hybridnet_version }}/hybridnet-{{ .hybridnet_version }}.tgz
+ nfs_provisioner: https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner/releases/download/nfs-subdir-external-provisioner-4.0.18/nfs-subdir-external-provisioner-{{ .nfs_provisioner_version }}.tgz
images:
auth: []
list: []
diff --git a/builtin/roles/init/init-artifact/tasks/download_by_curl.yaml b/builtin/roles/init/init-artifact/tasks/download_by_curl.yaml
index 82cafc794..6f0fa7b75 100644
--- a/builtin/roles/init/init-artifact/tasks/download_by_curl.yaml
+++ b/builtin/roles/init/init-artifact/tasks/download_by_curl.yaml
@@ -1,266 +1,252 @@
---
- name: Check binaries for etcd
command: |
- artifact_name={{ artifact.artifact_url.etcd[item]|split:"/"|last }}
- artifact_path={{ work_dir }}/kubekey/etcd/{{ etcd_version }}/{{ item }}
+ artifact_name={{ get .artifact.artifact_url.etcd .item | splitList "/" | last }}
+ artifact_path={{ .work_dir }}/kubekey/etcd/{{ .etcd_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
- http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.etcd[item] }})
+ http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.etcd .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
- curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.etcd[item] }}
+ curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.etcd .item }}
fi
- loop: "{{ artifact.arch }}"
- when:
- - etcd_version | defined && etcd_version != ""
+ loop: "{{ .artifact.arch | toJson }}"
+ when: and .etcd_version (ne .etcd_version "")
- name: Check binaries for kube
command: |
- kube_path={{ work_dir }}/kubekey/kube/{{ kube_version }}/{{ item }}
+ kube_path={{ .work_dir }}/kubekey/kube/{{ .kube_version }}/{{ .item }}
if [ ! -f $kube_path/kubelet ]; then
mkdir -p $kube_path
# download online
- http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.kubelet[item] }})
+ http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.kubelet .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
- curl -L -o $kube_path/kubelet {{ artifact.artifact_url.kubelet[item] }}
+ curl -L -o $kube_path/kubelet {{ get .artifact.artifact_url.kubelet .item }}
fi
if [ ! -f $kube_path/kubeadm ]; then
mkdir -p $kube_path
# download online
- http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.kubeadm[item] }})
+ http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.kubeadm .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
- curl -L -o $kube_path/kubeadm {{ artifact.artifact_url.kubeadm[item] }}
+ curl -L -o $kube_path/kubeadm {{ get .artifact.artifact_url.kubeadm .item }}
fi
if [ ! -f $kube_path/kubectl ]; then
mkdir -p $kube_path
# download online
- http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.kubectl[item] }})
+ http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.kubectl .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
- curl -L -o $kube_path/kubectl {{ artifact.artifact_url.kubectl[item] }}
+ curl -L -o $kube_path/kubectl {{ get .artifact.artifact_url.kubectl .item }}
fi
- loop: "{{ artifact.arch }}"
- when:
- - kube_version | defined && kube_version != ""
+ loop: "{{ .artifact.arch | toJson }}"
+ when: and .kube_version (ne .kube_version "")
- name: Check binaries for cni
command: |
- artifact_name={{ artifact.artifact_url.cni[item]|split:"/"|last }}
- artifact_path={{ work_dir }}/kubekey/cni/{{ cni_version }}/{{ item }}
+ artifact_name={{ get .artifact.artifact_url.cni .item | splitList "/" | last }}
+ artifact_path={{ .work_dir }}/kubekey/cni/{{ .cni_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
- http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.cni[item] }})
+ http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.cni .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
- curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.cni[item] }}
+ curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.cni .item }}
fi
- loop: "{{ artifact.arch }}"
- when:
- - cni_version | defined && cni_version != ""
+ loop: "{{ .artifact.arch | toJson }}"
+ when: and .cni_version (ne .cni_version "")
- name: Check binaries for helm
command: |
- artifact_name={{ artifact.artifact_url.helm[item]|split:"/"|last }}
- artifact_path={{ work_dir }}/kubekey/helm/{{ helm_version }}/{{ item }}
+ artifact_name={{ get .artifact.artifact_url.helm .item | splitList "/" | last }}
+ artifact_path={{ .work_dir }}/kubekey/helm/{{ .helm_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
- http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.helm[item] }})
+ http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.helm .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
- curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.helm[item] }}
+ curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.helm .item }}
fi
- loop: "{{ artifact.arch }}"
- when:
- - helm_version | defined && helm_version != ""
+ loop: "{{ .artifact.arch | toJson }}"
+ when: and .helm_version (ne .helm_version "")
- name: Check binaries for crictl
command: |
- artifact_name={{ artifact.artifact_url.crictl[item]|split:"/"|last }}
- artifact_path={{ work_dir }}/kubekey/crictl/{{ crictl_version }}/{{ item }}
+ artifact_name={{ get .artifact.artifact_url.crictl .item | splitList "/" | last }}
+ artifact_path={{ .work_dir }}/kubekey/crictl/{{ .crictl_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
- http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.crictl[item] }})
+ http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.crictl .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
- curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.crictl[item] }}
+ curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.crictl .item }}
fi
- loop: "{{ artifact.arch }}"
- when:
- - crictl_version | defined && crictl_version != ""
+ loop: "{{ .artifact.arch | toJson }}"
+ when: and .crictl_version (ne .crictl_version "")
- name: Check binaries for docker
command: |
- artifact_name={{ artifact.artifact_url.docker[item]|split:"/"|last }}
- artifact_path={{ work_dir }}/kubekey/docker/{{ docker_version }}/{{ item }}
+ artifact_name={{ get .artifact.artifact_url.docker .item | splitList "/" | last }}
+ artifact_path={{ .work_dir }}/kubekey/docker/{{ .docker_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
- http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.docker[item] }})
+ http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.docker .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
- curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.docker[item] }}
+ curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.docker .item }}
fi
- loop: "{{ artifact.arch }}"
- when:
- - docker_version | defined && docker_version != ""
+ loop: "{{ .artifact.arch | toJson }}"
+ when: and .docker_version (ne .docker_version "")
- name: Check binaries for cridockerd
command: |
- artifact_name={{ artifact.artifact_url.cridockerd[item]|split:"/"|last }}
- artifact_path={{ work_dir }}/kubekey/cri-dockerd/{{ cridockerd_version }}/{{ item }}
+ artifact_name={{ get .artifact.artifact_url.cridockerd .item | splitList "/" | last }}
+ artifact_path={{ .work_dir }}/kubekey/cri-dockerd/{{ .cridockerd_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
- http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.cridockerd[item] }})
+ http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.cridockerd .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
- curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.cridockerd[item] }}
+ curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.cridockerd .item }}
fi
- loop: "{{ artifact.arch }}"
- when:
- - cridockerd_version | defined && cridockerd_version != ""
+ loop: "{{ .artifact.arch | toJson }}"
+ when: and .cridockerd_version (ne .docker_version "")
- name: Check binaries for containerd
command: |
- artifact_name={{ artifact.artifact_url.containerd[item]|split:"/"|last }}
- artifact_path={{ work_dir }}/kubekey/containerd/{{ containerd_version }}/{{ item }}
+ artifact_name={{ get .artifact.artifact_url.containerd .item | splitList "/" | last }}
+ artifact_path={{ .work_dir }}/kubekey/containerd/{{ .containerd_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
- http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.containerd[item] }})
+ http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.containerd .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
- curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.containerd[item] }}
+ curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.containerd .item }}
fi
- loop: "{{ artifact.arch }}"
- when:
- - containerd_version | defined && containerd_version != ""
+ loop: "{{ .artifact.arch | toJson }}"
+ when: and .containerd_version (ne .containerd_version "")
- name: Check binaries for runc
command: |
- artifact_name={{ artifact.artifact_url.runc[item]|split:"/"|last }}
- artifact_path={{ work_dir }}/kubekey/runc/{{ runc_version }}/{{ item }}
+ artifact_name={{ get .artifact.artifact_url.runc .item | splitList "/" | last }}
+ artifact_path={{ .work_dir }}/kubekey/runc/{{ .runc_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
- http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.runc[item] }})
+ http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.runc .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
- curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.runc[item] }}
+ curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.runc .item }}
fi
- loop: "{{ artifact.arch }}"
- when:
- - runc_version | defined && runc_version != ""
+ loop: "{{ .artifact.arch | toJson }}"
+ when: and .runc_version (ne .runc_version "")
- name: Check binaries for calicoctl
command: |
- artifact_name={{ artifact.artifact_url.calicoctl[item]|split:"/"|last }}
- artifact_path={{ work_dir }}/kubekey/cni/{{ calico_version }}/{{ item }}
+ artifact_name={{ get .artifact.artifact_url.calicoctl .item | splitList "/" | last }}
+ artifact_path={{ .work_dir }}/kubekey/cni/{{ .calico_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
- http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.calicoctl[item] }})
+ http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.calicoctl .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
- curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.calicoctl[item] }}
+ curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.calicoctl .item }}
fi
- loop: "{{ artifact.arch }}"
- when:
- - calico_version | defined && calico_version != ""
+ loop: "{{ .artifact.arch | toJson }}"
+ when: and .calico_version (ne .calico_version "")
- name: Check binaries for registry
command: |
- artifact_name={{ artifact.artifact_url.registry[item]|split:"/"|last }}
- artifact_path={{ work_dir }}/kubekey/image-registry/registry/{{ registry_version }}/{{ item }}
+ artifact_name={{ get .artifact.artifact_url.registry .item | splitList "/" | last }}
+ artifact_path={{ .work_dir }}/kubekey/image-registry/registry/{{ .registry_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
- http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.registry[item] }})
+ http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.registry .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
- curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.registry[item] }}
+ curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.registry .item }}
fi
- loop: "{{ artifact.arch }}"
- when:
- - registry_version | defined && registry_version != ""
+ loop: "{{ .artifact.arch | toJson }}"
+ when: and .registry_version (ne .registry_version "")
- name: Check binaries for docker-compose
command: |
compose_name=docker-compose
- compose_path={{ work_dir }}/kubekey/image-registry/docker-compose/{{ dockercompose_version }}/{{ item }}
+ compose_path={{ .work_dir }}/kubekey/image-registry/docker-compose/{{ .dockercompose_version }}/{{ .item }}
if [ ! -f $compose_path/$compose_name ]; then
mkdir -p $compose_path
# download online
- curl -L -o $compose_path/$compose_name {{ artifact.artifact_url.dockercompose[item] }}
+ curl -L -o $compose_path/$compose_name {{ get .artifact.artifact_url.dockercompose .item }}
fi
- loop: "{{ artifact.arch }}"
- when:
- - dockercompose_version | defined && dockercompose_version != ""
+ loop: "{{ .artifact.arch | toJson }}"
+ when: and .dockercompose_version (ne .dockercompose_version "")
- name: Check binaries for harbor
command: |
- harbor_name={{ artifact.artifact_url.harbor[item]|split:"/"|last }}
- harbor_path={{ work_dir }}/kubekey/image-registry/harbor/{{ harbor_version }}/{{ item }}
+ harbor_name={{ get .artifact.artifact_url.harbor .item | splitList "/" | last }}
+ harbor_path={{ .work_dir }}/kubekey/image-registry/harbor/{{ .harbor_version }}/{{ .item }}
if [ ! -f $harbor_path/$harbor_name ]; then
mkdir -p $harbor_path
# download online
- http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.harbor[item] }})
+ http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.harbor .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
- curl -L -o $harbor_path/$harbor_name {{ artifact.artifact_url.harbor[item] }}
+ curl -L -o $harbor_path/$harbor_name {{ get .artifact.artifact_url.harbor .item }}
fi
- loop: "{{ artifact.arch }}"
- when:
- - harbor_version | defined && harbor_version != ""
+ loop: "{{ .artifact.arch | toJson }}"
+ when: and .harbor_version (ne .harbor_version "")
- name: Check binaries for keepalived
command: |
- artifact_name={{ artifact.artifact_url.keepalived[item]|split:"/"|last }}
- artifact_path={{ work_dir }}/kubekey/image-registry/keepalived/{{ keepalived_version }}/{{ item }}
+ artifact_name={{ get .artifact.artifact_url.keepalived .item | splitList "/" | last }}
+ artifact_path={{ .work_dir }}/kubekey/image-registry/keepalived/{{ .keepalived_version }}/{{ .item }}
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
- http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ artifact.artifact_url.keepalived[item] }})
+ http_code=$(curl -Lo /dev/null -s -w "%{http_code}" {{ get .artifact.artifact_url.keepalived .item }})
if [ $http_code != 200 ]; then
echo "http code is $http_code"
exit 1
fi
- curl -L -o $artifact_path/$artifact_name {{ artifact.artifact_url.keepalived[item] }}
+ curl -L -o $artifact_path/$artifact_name {{ get .artifact.artifact_url.keepalived .item }}
fi
- loop: "{{ artifact.arch }}"
- when:
- - keepalived_version | defined && keepalived_version != ""
+ loop: "{{ .artifact.arch | toJson }}"
+ when: and .keepalived_version (ne .keepalived_version "")
diff --git a/builtin/roles/init/init-artifact/tasks/download_by_helm.yaml b/builtin/roles/init/init-artifact/tasks/download_by_helm.yaml
index bc02b835d..0e462983a 100644
--- a/builtin/roles/init/init-artifact/tasks/download_by_helm.yaml
+++ b/builtin/roles/init/init-artifact/tasks/download_by_helm.yaml
@@ -1,44 +1,44 @@
---
- name: Check binaries for cilium
command: |
- artifact_name={{ artifact.artifact_url.cilium|split:"/"|last }}
- artifact_path={{ work_dir }}/kubekey/cni
+ artifact_name={{ .artifact.artifact_url.cilium | splitList "/" | last }}
+ artifact_path={{ .work_dir }}/kubekey/cni
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
- cd $artifact_path && helm pull {{ artifact.artifact_url.cilium }}
+ cd $artifact_path && helm pull {{ .artifact.artifact_url.cilium }}
fi
- when: cilium_version | defined
+ when: and .cilium_version (ne .cilium_version "")
- name: Check binaries for kubeovn
command: |
- artifact_name={{ artifact.artifact_url.kubeovn|split:"/"|last }}
- artifact_path={{ work_dir }}/kubekey/cni
+ artifact_name={{ .artifact.artifact_url.kubeovn | splitList "/" | last }}
+ artifact_path={{ .work_dir }}/kubekey/cni
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
- cd $artifact_path && helm pull {{ artifact.artifact_url.kubeovn }}
+ cd $artifact_path && helm pull {{ .artifact.artifact_url.kubeovn }}
fi
- when: kubeovn_version | defined
+ when: and .kubeovn_version (ne .kubeovn_version "")
- name: Check binaries for hybridnet
command: |
- artifact_name={{ artifact.artifact_url.hybridnet|split:"/"|last }}
- artifact_path={{ work_dir }}/kubekey/cni
+ artifact_name={{ .artifact.artifact_url.hybridnet | splitList "/" | last }}
+ artifact_path={{ .work_dir }}/kubekey/cni
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
- cd $artifact_path && helm pull {{ artifact.artifact_url.hybridnet }}
+ cd $artifact_path && helm pull {{ .artifact.artifact_url.hybridnet }}
fi
- when: hybridnet_version | defined
+ when: and .hybridnet_version (ne .hybridnet_version "")
- name: Check binaries for nfs_provisioner
command: |
- artifact_name={{ artifact.artifact_url.nfs_provisioner|split:"/"|last }}
- artifact_path={{ work_dir }}/kubekey/sc
+ artifact_name={{ .artifact.artifact_url.nfs_provisioner |splitList "/" | last }}
+ artifact_path={{ .work_dir }}/kubekey/sc
if [ ! -f $artifact_path/$artifact_name ]; then
mkdir -p $artifact_path
# download online
- cd $artifact_path && helm pull {{ artifact.artifact_url.nfs_provisioner }}
+ cd $artifact_path && helm pull {{ .artifact.artifact_url.nfs_provisioner }}
fi
- when: nfs_provisioner_version| defined
+ when: and .nfs_provisioner_version (ne .nfs_provisioner_version "")
diff --git a/builtin/roles/init/init-artifact/tasks/main.yaml b/builtin/roles/init/init-artifact/tasks/main.yaml
index c91dfb2d6..f01a8a245 100644
--- a/builtin/roles/init/init-artifact/tasks/main.yaml
+++ b/builtin/roles/init/init-artifact/tasks/main.yaml
@@ -2,17 +2,17 @@
- name: Create work_dir
tags: ["always"]
command: |
- if [ ! -d "{{ work_dir }}" ]; then
- mkdir -p {{ work_dir }}
+ if [ ! -d "{{ .work_dir }}" ]; then
+ mkdir -p {{ .work_dir }}
fi
- name: Extract artifact to work_dir
tags: ["always"]
command: |
- if [ ! -f "{{ artifact_file }}" ]; then
- tar -zxvf {{ artifact_file }} -C {{ work_dir }}
+ if [ ! -f "{{ .artifact_file }}" ]; then
+ tar -zxvf {{ .artifact_file }} -C {{ .work_dir }}
fi
- when: artifact_file | defined
+ when: and .artifact_file (ne .artifact_file "")
- name: Download binaries
block:
@@ -27,4 +27,4 @@
- name: Chown work_dir to sudo
tags: ["always"]
command: |
- chown -R ${SUDO_UID}:${SUDO_GID} {{ work_dir }}
+ chown -R ${SUDO_UID}:${SUDO_GID} {{ .work_dir }}
diff --git a/builtin/roles/init/init-artifact/tasks/pki.yaml b/builtin/roles/init/init-artifact/tasks/pki.yaml
index d46058144..2b2cba4ae 100644
--- a/builtin/roles/init/init-artifact/tasks/pki.yaml
+++ b/builtin/roles/init/init-artifact/tasks/pki.yaml
@@ -3,32 +3,50 @@
gen_cert:
cn: root
date: 87600h
- policy: "{{ artifact.gen_cert_policy }}"
- out_key: "{{ work_dir }}/kubekey/pki/root.key"
- out_cert: "{{ work_dir }}/kubekey/pki/root.crt"
+ policy: "{{ .artifact.gen_cert_policy }}"
+ out_key: |
+ {{ .work_dir }}/kubekey/pki/root.key
+ out_cert: |
+ {{ .work_dir }}/kubekey/pki/root.crt
- name: Generate etcd cert file
gen_cert:
- root_key: "{{ work_dir }}/kubekey/pki/root.key"
- root_cert: "{{ work_dir }}/kubekey/pki/root.crt"
+ root_key: |
+ {{ .work_dir }}/kubekey/pki/root.key
+ root_cert: |
+ {{ .work_dir }}/kubekey/pki/root.crt
cn: etcd
sans: |
- [{% for h in groups['etcd'] %}{% set hv=inventory_hosts[h] %}"{{ hv.internal_ipv4 }}"{% if (not forloop.Last) %},{% endif %}{% endfor %}]
+ {{- $ips := list -}}
+ {{- range .groups.etcd | default list -}}
+ {{- $ips = append $ips (index $.inventory_hosts . "internal_ipv4") -}}
+ {{- end -}}
+ {{ $ips | toJson }}
date: 87600h
- policy: "{{ artifact.gen_cert_policy }}"
- out_key: "{{ work_dir }}/kubekey/pki/etcd.key"
- out_cert: "{{ work_dir }}/kubekey/pki/etcd.crt"
- when: groups['etcd']|length > 0
+ policy: "{{ .artifact.gen_cert_policy }}"
+ out_key: |
+ {{ .work_dir }}/kubekey/pki/etcd.key
+ out_cert: |
+ {{ .work_dir }}/kubekey/pki/etcd.crt
+ when: .groups.etcd | default list | len | lt 0
- name: Generate registry image cert file
gen_cert:
- root_key: "{{ work_dir }}/kubekey/pki/root.key"
- root_cert: "{{ work_dir }}/kubekey/pki/root.crt"
+ root_key: |
+ {{ .work_dir }}/kubekey/pki/root.key
+ root_cert: |
+ {{ .work_dir }}/kubekey/pki/root.crt
cn: image_registry
sans: |
- [{% for h in groups['image_registry'] %}{% set hv=inventory_hosts[h] %}"{{ hv.internal_ipv4 }}"{% if (not forloop.Last) %},{% endif %}{% endfor %}]
+ {{- $ips := list -}}
+ {{- range .groups.image_registry | default list -}}
+ {{- $ips = append $ips (index $.inventory_hosts . "internal_ipv4") -}}
+ {{- end -}}
+ {{ $ips | toJson }}
date: 87600h
- policy: "{{ artifact.gen_cert_policy }}"
- out_key: "{{ work_dir }}/kubekey/pki/image_registry.key"
- out_cert: "{{ work_dir }}/kubekey/pki/image_registry.crt"
- when: groups['image_registry']|length > 0
+ policy: "{{ .artifact.gen_cert_policy }}"
+ out_key: |
+ {{ .work_dir }}/kubekey/pki/image_registry.key
+ out_cert: |
+ {{ .work_dir }}/kubekey/pki/image_registry.crt
+ when: and .groups.image_registry (.groups.image_registry | default list | len | lt 0)
diff --git a/builtin/roles/init/init-os/tasks/init_ntpserver.yaml b/builtin/roles/init/init-os/tasks/init_ntpserver.yaml
index 462b4e66e..ffe913b11 100644
--- a/builtin/roles/init/init-os/tasks/init_ntpserver.yaml
+++ b/builtin/roles/init/init-os/tasks/init_ntpserver.yaml
@@ -1,10 +1,7 @@
---
- name: Configure ntp server
command: |
- chronyConfigFile="/etc/chrony.conf"
- if [ {{ os.release.ID }} = "ubuntu" ] || [ {{ os.release.ID_LIKE }} = "debian" ]; then
- chronyConfigFile="/etc/chrony/chrony.conf"
- fi
+ chronyConfigFile={{ if or (.os.release.ID | eq "ubuntu") (.os.release.ID_LIKE | eq "debian") }}"/etc/chrony/chrony.conf"{{ else }}"/etc/chrony.conf"{{ end }}
# clear old server
sed -i '/^server/d' $chronyConfigFile
# disable pool
@@ -18,25 +15,25 @@
# add local
echo "local stratum 10" >> $chronyConfigFile
# add server
- {% for server in ntp_servers %}
- {% for _,v in inventory_hosts %}
- {% if (v.inventory_name == server) %}{% set server = v.internal_ipv4%}{% endif %}
- {% endfor %}
- grep -q '^server {{ server }} iburst' $chronyConfigFile||sed '1a server {{ server }} iburst' -i $chronyConfigFile
- {% endfor %}
+ {{- range $server := .ntp_servers -}}
+ {{- range $.inventory_hosts -}}
+ {{- if eq .inventory_name $server -}}
+ {{- $server = .internal_ipv4 -}}
+ {{- end -}}
+ {{- end -}}
+ grep -q '^server {{ $server }} iburst' $chronyConfigFile || sed '1a server {{ $server }} iburst' -i $chronyConfigFile
+ {{- end -}}
- name: Set timezone
command: |
- timedatectl set-timezone {{ timezone }}
+ timedatectl set-timezone {{ .timezone }}
timedatectl set-ntp true
- when: timezone | defined
+ when: or (.ntp_servers | len | lt 0) (.timezone | ne "")
- name: Restart ntp server
command: |
- chronyService="chronyd.service"
- if [ {{ os.release.ID }} = "ubuntu" ] || [ {{ os.release.ID_LIKE }} = "debian" ]; then
- chronyService="chrony.service"
- fi
- systemctl restart $chronyService
- when:
- - ntp_servers | defined or timezone | defined
+ {{- if or (.os.release.ID | eq "ubuntu") (.os.release.ID_LIKE | eq "debian") }}
+ systemctl restart chrony.service
+ {{- end }}
+ systemctl restart chronyd.service
+ when: or (.ntp_servers | len | lt 0) (.timezone | ne "")
diff --git a/builtin/roles/init/init-os/tasks/init_repository.yaml b/builtin/roles/init/init-os/tasks/init_repository.yaml
index 4c9aa79bf..af829977a 100644
--- a/builtin/roles/init/init-os/tasks/init_repository.yaml
+++ b/builtin/roles/init/init-os/tasks/init_repository.yaml
@@ -4,8 +4,9 @@
- name: Sync repository file
ignore_errors: true
copy:
- src: "{{ work_dir }}/kubekey/repository/{{ os.release.ID_LIKE }}-{{ os.release.VERSION_ID|safe }}-{{ binary_type.stdout }}.iso"
- dest: "/tmp/kubekey/repository.iso"
+ src: |
+ {{ .work_dir }}/kubekey/repository/{{ .os.release.ID_LIKE }}-{{ .os.release.VERSION_ID }}-{{ .binary_type.stdout }}.iso
+ dest: /tmp/kubekey/repository.iso
- name: Mount iso file
command: |
if [ -f "/tmp/kubekey/repository.iso" ]; then
@@ -42,7 +43,7 @@
else
apt-get update && apt install -y socat conntrack ipset ebtables chrony ipvsadm
fi
- when: os.release.ID_LIKE == "debian"
+ when: .os.release.ID_LIKE | eq "debian"
- name: Init rhel repository
command: |
now=$(date +"%Y-%m-%d %H:%M:%S")
@@ -74,4 +75,4 @@
# install
yum install -y openssl socat conntrack ipset ebtables chrony ipvsadm
fi
- when: os.release.ID_LIKE == "rhel fedora"
+ when: .os.release.ID_LIKE | eq "rhel fedora"
diff --git a/builtin/roles/init/init-os/tasks/main.yaml b/builtin/roles/init/init-os/tasks/main.yaml
index b76eab3d3..7a8637d2c 100644
--- a/builtin/roles/init/init-os/tasks/main.yaml
+++ b/builtin/roles/init/init-os/tasks/main.yaml
@@ -12,7 +12,8 @@
- name: Set hostname
command: |
- hostnamectl set-hostname {{ inventory_name }} && sed -i '/^127.0.1.1/s/.*/127.0.1.1 {{ inventory_name }}/g' /etc/hosts
+ hostnamectl set-hostname {{ .inventory_name }} \
+ && sed -i '/^127.0.1.1/s/.*/127.0.1.1 {{ .inventory_name }}/g' /etc/hosts
- name: Sync init os to remote
template:
diff --git a/builtin/roles/init/init-os/templates/init-os.sh b/builtin/roles/init/init-os/templates/init-os.sh
index 09b543a2d..c530ba930 100644
--- a/builtin/roles/init/init-os/templates/init-os.sh
+++ b/builtin/roles/init/init-os/templates/init-os.sh
@@ -174,11 +174,21 @@ sed -i '/^$/N;/\n$/N;//D' /etc/hosts
cat >>/etc/hosts< 0
+ when: .groups.image_registry | default list | len | lt 0
block:
- name: Sync image registry cert file to remote
copy:
- src: "{{ work_dir }}/kubekey/pki/root.crt"
- dest: "/etc/containerd/certs.d/{{ image_registry.auth.registry }}/ca.crt"
+ src: |
+ {{ .work_dir }}/kubekey/pki/root.crt
+ dest: |
+ /etc/containerd/certs.d/{{ .image_registry.auth.registry }}/ca.crt
- name: Sync image registry cert file to remote
copy:
- src: "{{ work_dir }}/kubekey/pki/image_registry.crt"
- dest: "/etc/containerd/certs.d/{{ image_registry.auth.registry }}/server.crt"
+ src: |
+ {{ .work_dir }}/kubekey/pki/image_registry.crt
+ dest: |
+ /etc/containerd/certs.d/{{ .image_registry.auth.registry }}/server.crt
- name: Sync image registry key file to remote
copy:
- src: "{{ work_dir }}/kubekey/pki/image_registry.key"
- dest: "/etc/containerd/certs.d/{{ image_registry.auth.registry }}/server.key"
-
-- name: Start containerd
- command: |
- systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service
- when: containerd_install_version.stderr != ""
+ src: |
+ {{ .work_dir }}/kubekey/pki/image_registry.key
+ dest: |
+ /etc/containerd/certs.d/{{ .image_registry.auth.registry }}/server.key
diff --git a/builtin/roles/install/cri/tasks/install_crictl.yaml b/builtin/roles/install/cri/tasks/install_crictl.yaml
index 111003dbb..ca0244ff9 100644
--- a/builtin/roles/install/cri/tasks/install_crictl.yaml
+++ b/builtin/roles/install/cri/tasks/install_crictl.yaml
@@ -4,18 +4,19 @@
command: crictl --version
register: crictl_install_version
-- name: Sync crictl binary to remote
- copy:
- src: "{{ work_dir }}/kubekey/crictl/{{ crictl_version }}/{{ binary_type.stdout }}/crictl-{{ crictl_version }}-linux-{{ binary_type.stdout }}.tar.gz"
- dest: "/tmp/kubekey/crictl-{{ crictl_version }}-linux-{{ binary_type.stdout }}.tar.gz"
- when: crictl_install_version.stderr != ""
-
-- name: Unpackage crictl binary
- command: |
- tar -xvf /tmp/kubekey/crictl-{{ crictl_version }}-linux-{{ binary_type.stdout }}.tar.gz -C /usr/local/bin/
- when: crictl_install_version.stderr != ""
-
-- name: Generate crictl config file
- template:
- src: crictl.config
- dest: /etc/crictl.yaml
+- name: Install crictl
+ when: or (.crictl_install_version.stderr | ne "") (.crictl_install_version.stdout | ne (printf "crictl version %s" .crictl_version))
+ block:
+ - name: Sync crictl binary to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/crictl/{{ .crictl_version }}/{{ .binary_type.stdout }}/crictl-{{ .crictl_version }}-linux-{{ .binary_type.stdout }}.tar.gz
+ dest: |
+ /tmp/kubekey/crictl-{{ .crictl_version }}-linux-{{ .binary_type.stdout }}.tar.gz
+ - name: Unpackage crictl binary
+ command: |
+ tar -xvf /tmp/kubekey/crictl-{{ .crictl_version }}-linux-{{ .binary_type.stdout }}.tar.gz -C /usr/local/bin/
+ - name: Generate crictl config file
+ template:
+ src: crictl.config
+ dest: /etc/crictl.yaml
diff --git a/builtin/roles/install/cri/tasks/install_cridockerd.yaml b/builtin/roles/install/cri/tasks/install_cridockerd.yaml
index 57e9671ca..d044b7590 100644
--- a/builtin/roles/install/cri/tasks/install_cridockerd.yaml
+++ b/builtin/roles/install/cri/tasks/install_cridockerd.yaml
@@ -4,30 +4,26 @@
command: cri-dockerd --version
register: cridockerd_install_version
-- name: Sync cri-dockerd Binary to remote
- copy:
- src: "{{ work_dir }}/kubekey/cri-dockerd/{{ cridockerd_version }}/{{ binary_type.stdout }}/cri-dockerd-{{ cridockerd_version }}-linux-{{ binary_type.stdout }}.tar.gz"
- dest: "/tmp/kubekey/cri-dockerd-{{ cridockerd_version }}-linux-{{ binary_type.stdout }}.tar.gz"
- when: cridockerd_install_version.stderr != ""
-
-- name: Generate cri-dockerd config file
- template:
- src: cri-dockerd.config
- dest: /etc/cri-dockerd.yaml
- when: cridockerd_install_version.stderr != ""
-
-- name: Unpackage cri-dockerd binary
- command: |
- tar -xvf /tmp/kubekey/cri-dockerd-{{ cridockerd_version }}-linux-{{ binary_type.stdout }}.tar.gz -C /usr/local/bin/
- when: cridockerd_install_version.stderr != ""
-
-- name: Generate cri-dockerd Service file
- template:
- src: cri-dockerd.service
- dest: /etc/systemd/system/cri-dockerd.service
- when: cridockerd_install_version.stderr != ""
-
-- name: Start cri-dockerd service
- command: |
- systemctl daemon-reload && systemctl start cri-dockerd.service && systemctl enable cri-dockerd.service
- when: cridockerd_install_version.stderr != ""
+- name: Install cri-dockerd
+ when: or (.cridockerd_install_version.stderr | ne "") (.cridockerd_install_version.stdout | hasPrefix (printf "cri-dockerd %s " .cridockerd_version) | not)
+ block:
+ - name: Sync cri-dockerd Binary to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/cri-dockerd/{{ .cridockerd_version }}/{{ .binary_type.stdout }}/cri-dockerd-{{ .cridockerd_version }}-linux-{{ .binary_type.stdout }}.tar.gz
+ dest: |
+ /tmp/kubekey/cri-dockerd-{{ .cridockerd_version }}-linux-{{ .binary_type.stdout }}.tar.gz
+ - name: Generate cri-dockerd config file
+ template:
+ src: cri-dockerd.config
+ dest: /etc/cri-dockerd.yaml
+ - name: Unpackage cri-dockerd binary
+ command: |
+ tar -xvf /tmp/kubekey/cri-dockerd-{{ .cridockerd_version }}-linux-{{ .binary_type.stdout }}.tar.gz -C /usr/local/bin/
+ - name: Generate cri-dockerd Service file
+ template:
+ src: cri-dockerd.service
+ dest: /etc/systemd/system/cri-dockerd.service
+ - name: Start cri-dockerd service
+ command: |
+ systemctl daemon-reload && systemctl start cri-dockerd.service && systemctl enable cri-dockerd.service
diff --git a/builtin/roles/install/cri/tasks/install_docker.yaml b/builtin/roles/install/cri/tasks/install_docker.yaml
index c3e9489bd..205f3e8fb 100644
--- a/builtin/roles/install/cri/tasks/install_docker.yaml
+++ b/builtin/roles/install/cri/tasks/install_docker.yaml
@@ -4,53 +4,53 @@
command: docker --version
register: docker_install_version
-- name: Sync docker binary to remote
- copy:
- src: "{{ work_dir }}/kubekey/docker/{{ docker_version }}/{{ binary_type.stdout }}/docker-{{ docker_version }}.tgz"
- dest: "/tmp/kubekey/docker-{{ docker_version }}.tgz"
- when: docker_install_version.stderr != ""
-
-- name: Unpackage docker binary
- command: |
- tar -C /usr/local/bin/ --strip-components=1 -xvf /tmp/kubekey/docker-{{ docker_version }}.tgz --wildcards docker/*
- when: docker_install_version.stderr != ""
-
-- name: Generate docker config file
- template:
- src: docker.config
- dest: /etc/docker/daemon.json
- when: docker_install_version.stderr != ""
-
-- name: Generate docker service file
- copy:
- src: docker.service
- dest: /etc/systemd/system/docker.service
- when: docker_install_version.stderr != ""
-
-- name: Generate containerd service file
- copy:
- src: containerd.service
- dest: /etc/systemd/system/containerd.service
- when: docker_install_version.stderr != ""
+- name: Install docker
+ when: or (.docker_install_version.stderr | ne "") (.docker_install_version.stdout | hasPrefix (printf "Docker version %s," .docker_version) | not)
+ block:
+ - name: Sync docker binary to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/docker/{{ .docker_version }}/{{ .binary_type.stdout }}/docker-{{ .docker_version }}.tgz
+ dest: |
+ /tmp/kubekey/docker-{{ .docker_version }}.tgz
+ - name: Unpackage docker binary
+ command: |
+ tar -C /usr/local/bin/ --strip-components=1 -xvf /tmp/kubekey/docker-{{ .docker_version }}.tgz --wildcards docker/*
+ - name: Generate docker config file
+ template:
+ src: docker.config
+ dest: /etc/docker/daemon.json
+ - name: Generate docker service file
+ copy:
+ src: docker.service
+ dest: /etc/systemd/system/docker.service
+ - name: Generate containerd service file
+ copy:
+ src: containerd.service
+ dest: /etc/systemd/system/containerd.service
+ - name: Start docker service
+ command: |
+ systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service
+ systemctl daemon-reload && systemctl start docker.service && systemctl enable docker.service
- name: Sync image registry tls to remote
- when: groups['image_registry'] > 0
+ when: .groups.image_registry | default list | len | lt 0
block:
- name: Sync image registry cert file to remote
copy:
- src: "{{ work_dir }}/kubekey/pki/root.crt"
- dest: "/etc/docker/certs.d/{{ image_registry.auth.registry }}/ca.crt"
+ src: |
+ {{ .work_dir }}/kubekey/pki/root.crt
+ dest: |
+ /etc/docker/certs.d/{{ .image_registry.auth.registry }}/ca.crt
- name: Sync image registry cert file to remote
copy:
- src: "{{ work_dir }}/kubekey/pki/image_registry.crt"
- dest: "/etc/docker/certs.d/{{ image_registry.auth.registry }}/server.crt"
+ src: |
+ {{ .work_dir }}/kubekey/pki/image_registry.crt
+ dest: |
+ /etc/docker/certs.d/{{ .image_registry.auth.registry }}/server.crt
- name: Sync image registry key file to remote
copy:
- src: "{{ work_dir }}/kubekey/pki/image_registry.key"
- dest: "/etc/docker/certs.d/{{ image_registry.auth.registry }}/server.key"
-
-- name: Start docker service
- command: |
- systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service
- systemctl daemon-reload && systemctl start docker.service && systemctl enable docker.service
- when: docker_install_version.stderr != ""
+ src: |
+ {{ .work_dir }}/kubekey/pki/image_registry.key
+ dest: |
+ /etc/docker/certs.d/{{ .image_registry.auth.registry }}/server.key
diff --git a/builtin/roles/install/cri/tasks/main.yaml b/builtin/roles/install/cri/tasks/main.yaml
index f372fb66a..d3b957cb6 100644
--- a/builtin/roles/install/cri/tasks/main.yaml
+++ b/builtin/roles/install/cri/tasks/main.yaml
@@ -4,16 +4,16 @@
# install docker
- include_tasks: install_docker.yaml
- when: cri.container_manager == "docker"
+ when: .cri.container_manager | eq "docker"
# install containerd
- include_tasks: install_containerd.yaml
- when: cri.container_manager == "containerd"
+ when: .cri.container_manager | eq "containerd"
# install cridockerd
- include_tasks: install_cridockerd.yaml
when:
- - cri.container_manager == "docker"
- - kube_version|version:'>=v1.24.0'
+ - .cri.container_manager | eq "docker"
+ - .kube_version | semverCompare ">=v1.24.0"
diff --git a/builtin/roles/install/cri/templates/containerd.config b/builtin/roles/install/cri/templates/containerd.config
index fd874942a..5b83af1cf 100644
--- a/builtin/roles/install/cri/templates/containerd.config
+++ b/builtin/roles/install/cri/templates/containerd.config
@@ -1,6 +1,6 @@
version = 2
-root = {{ cri.containerd.data_root|default_if_none:"/var/lib/containerd" }}
+root = {{ .cri.containerd.data_root | default "/var/lib/containerd" }}
state = "/run/containerd"
[grpc]
@@ -36,11 +36,11 @@ state = "/run/containerd"
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
- sandbox_image = "{{ cri.sandbox_image }}"
+ sandbox_image = "{{ .cri.sandbox_image }}"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
- SystemdCgroup = {% if (cri.cgroup_driver=="systemd") %}true{% else %}false{% endif %}
+ SystemdCgroup = {{ if .cri.cgroup_driver | eq "systemd") }}true{{ else }}false{{ end }}
[plugins."io.containerd.grpc.v1.cri".cni]
bin_dir = "/opt/cni/bin"
conf_dir = "/etc/cni/net.d"
@@ -48,34 +48,37 @@ state = "/run/containerd"
conf_template = ""
[plugins."io.containerd.grpc.v1.cri".registry]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
- {% if (cri.registry.mirrors|length > 0) %}
+{{- if .cri.registry.mirrors | len | lt 0 }}
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
- endpoint = {{ cri.registry.mirrors|to_json|safe }}
- {% endif %}
- {% for ir in cri.registry.insecure_registries %}
- [plugins."io.containerd.grpc.v1.cri".registry.mirrors."{{ ir }}"]
- endpoint = ["http://{{ ir }}"]
- {% endfor %}
-
- {% if (cri.registry.auths|length > 0 || groups['image_registry']|length>0) %}
+ endpoint = {{ .cri.registry.mirrors | toJson }}
+{{- end }}
+{{- range .cri.registry.insecure_registries }}
+ [plugins."io.containerd.grpc.v1.cri".registry.mirrors."{{ . }}"]
+ endpoint = ["http://{{ . }}"]
+{{- end }}
+{{- if or (.cri.registry.auths | len | lt 0) (.groups.image_registry | default list | len | lt 0) }}
[plugins."io.containerd.grpc.v1.cri".registry.configs]
- [plugins."io.containerd.grpc.v1.cri".registry.configs."{{ image_registry.auth.registry }}".auth]
- username = "{{ image_registry.auth.username }}"
- password = "{{ image_registry.auth.password }}"
- [plugins."io.containerd.grpc.v1.cri".registry.configs."{{ image_registry.auth.registry }}".tls]
- ca_file = "/etc/containerd/certs.d/{{ image_registry.auth.registry }}/ca.crt"
- cert_file = "/etc/containerd/certs.d/{{ image_registry.auth.registry }}/server.crt"
+ [plugins."io.containerd.grpc.v1.cri".registry.configs."{{ .image_registry.auth.registry }}".auth]
+ username = "{{ .image_registry.auth.username }}"
+ password = "{{ .image_registry.auth.password }}"
+ [plugins."io.containerd.grpc.v1.cri".registry.configs."{{ .image_registry.auth.registry }}".tls]
+ ca_file = "/etc/containerd/certs.d/{{ .image_registry.auth.registry }}/ca.crt"
+ cert_file = "/etc/containerd/certs.d/{{ .image_registry.auth.registry }}/server.crt"
key_file = "/etc/containerd/certs.d/{{ image_registry.auth.registry }}/server.key"
- {% for ir in cri.registry.auths %}
- [plugins."io.containerd.grpc.v1.cri".registry.configs."{{ ir.repo }}".auth]
- username = "{{ ir.username }}"
- password = "{{ ir.password }}"
- {% if (ir.ca_file|defined) %}
- [plugins."io.containerd.grpc.v1.cri".registry.configs."{{ ir.repo }}".tls]
- ca_file = "{{ ir.ca_file }}"
- cert_file = "{{ ir.crt_file }}"
- key_file = "{{ ir.key_file }}"
- insecure_skip_verify = {{ ir.skip_ssl }}
- {% endif %}
- {% endfor %}
- {% endif %}
+ {{- range .cri.registry.auths }}
+ [plugins."io.containerd.grpc.v1.cri".registry.configs."{{ .repo }}".auth]
+ username = "{{ .username }}"
+ password = "{{ .password }}"
+ [plugins."io.containerd.grpc.v1.cri".registry.configs."{{ .repo }}".tls]
+ {{- if.ca_file }}
+ ca_file = {{ .ca_file }}
+ {{- end }}
+ {{- if .crt_file }}
+ cert_file = {{ .crt_file }}
+ {{- end }}
+ {{- if .key_file }}
+ key_file = {{ .key_file }}
+ {{- end }}
+ insecure_skip_verify = {{ .skip_ssl | default true }}
+ {{- end }}
+{{- end }}
diff --git a/builtin/roles/install/cri/templates/crictl.config b/builtin/roles/install/cri/templates/crictl.config
index 29ba9c1de..9a8544e69 100644
--- a/builtin/roles/install/cri/templates/crictl.config
+++ b/builtin/roles/install/cri/templates/crictl.config
@@ -1,5 +1,5 @@
-runtime-endpoint: {{ cri.cri_socket }}
-image-endpoint: {{ cri.cri_socket }}
+runtime-endpoint: {{ .cri.cri_socket }}
+image-endpoint: {{ .cri.cri_socket }}
timeout: 5
debug: false
pull-image-on-create: false
diff --git a/builtin/roles/install/cri/templates/docker.config b/builtin/roles/install/cri/templates/docker.config
index d2bc7f80c..7eb1cc766 100644
--- a/builtin/roles/install/cri/templates/docker.config
+++ b/builtin/roles/install/cri/templates/docker.config
@@ -3,17 +3,17 @@
"max-size": "5m",
"max-file":"3"
},
- {% if (cri.docker.data_root|defined) %}
- "data-root": "{{ cri.docker.data_root }}",
- {% endif %}
- {% if (cri.registry.mirrors|defined) %}
- "registry-mirrors": {{ cri.registry.mirrors|to_json|safe }},
- {% endif %}
- {% if (cri.registry.insecure_registries|defined) %}
- "insecure-registries": {{ cri.registry.insecure_registries|to_json|safe }},
- {% endif %}
- {% if (cri.docker.bridge_ip|defined) %}
- "bip": "{{ cri.docker.bridge_ip }}",
- {% endif %}
- "exec-opts": ["native.cgroupdriver={{ cri.cgroup_driver }}"]
+{{- if .cri.docker.data_root }}
+ "data-root": "{{ .cri.docker.data_root }}",
+{{- end }}
+{{- if .cri.registry.mirrors }}
+ "registry-mirrors": {{ .cri.registry.mirrors | toJson }},
+{{- end }}
+ {{- if .cri.registry.insecure_registries }}
+ "insecure-registries": {{ .cri.registry.insecure_registries | toJson }},
+{{- end }}
+ {{- if .cri.docker.bridge_ip }}
+ "bip": "{{ .cri.docker.bridge_ip }}",
+{{- end }}
+ "exec-opts": ["native.cgroupdriver={{ .cri.cgroup_driver }}"]
}
diff --git a/builtin/roles/install/etcd/defaults/main.yaml b/builtin/roles/install/etcd/defaults/main.yaml
index 703aa9362..6f017bcc1 100644
--- a/builtin/roles/install/etcd/defaults/main.yaml
+++ b/builtin/roles/install/etcd/defaults/main.yaml
@@ -21,7 +21,7 @@ etcd:
backup:
backup_dir: /var/lib/etcd-backup
keep_backup_number: 5
-# etcd_backup_script: /usr/local/bin/kube-scripts/backup-etcd.sh
+ etcd_backup_script: "backup.sh"
on_calendar: "*-*-* *:00/30:00"
performance: false
traffic_priority: false
diff --git a/builtin/roles/install/etcd/tasks/backup_etcd.yaml b/builtin/roles/install/etcd/tasks/backup_etcd.yaml
index ddbc4e69b..772071b37 100644
--- a/builtin/roles/install/etcd/tasks/backup_etcd.yaml
+++ b/builtin/roles/install/etcd/tasks/backup_etcd.yaml
@@ -1,28 +1,20 @@
---
-- name: Generate default backup etcd script
- template:
- src: "backup.sh"
- dest: "/usr/local/bin/kube-scripts/backup-etcd.sh"
- mode: 777
- when:
- - ! etcd.backup.etcd_backup_script|defined
-
- name: Sync custom backup etcd script
template:
- src: "{{ etcd.backup.etcd_backup_script }}"
- dest: "/usr/local/bin/kube-scripts/backup-etcd.sh"
+ src: |
+ {{ .etcd.backup.etcd_backup_script }}
+ dest: /usr/local/bin/kube-scripts/backup-etcd.sh
mode: 777
- when: etcd.backup.etcd_backup_script|defined
- name: Generate backup etcd service
copy:
- src: "backup.service"
- dest: "/etc/systemd/system/backup-etcd.service"
+ src: backup.service
+ dest: /etc/systemd/system/backup-etcd.service
- name: Generate backup etcd timer
template:
- src: "backup.timer"
- dest: "/etc/systemd/system/backup-etcd.timer"
+ src: backup.timer
+ dest: /etc/systemd/system/backup-etcd.timer
- name: Enable etcd timer
command: |
diff --git a/builtin/roles/install/etcd/tasks/install_etcd.yaml b/builtin/roles/install/etcd/tasks/install_etcd.yaml
index a8072fb0a..b8e5be242 100644
--- a/builtin/roles/install/etcd/tasks/install_etcd.yaml
+++ b/builtin/roles/install/etcd/tasks/install_etcd.yaml
@@ -1,44 +1,49 @@
---
- name: Sync etcd binary to node
copy:
- src: "{{ work_dir }}/kubekey/etcd/{{ etcd_version }}/{{ binary_type.stdout }}/etcd-{{ etcd_version }}-linux-{{ binary_type.stdout }}.tar.gz"
- dest: "/tmp/kubekey/etcd-{{ etcd_version }}-linux-{{ binary_type.stdout }}.tar.gz"
+ src: |
+ {{ .work_dir }}/kubekey/etcd/{{ .etcd_version }}/{{ .binary_type.stdout }}/etcd-{{ .etcd_version }}-linux-{{ .binary_type.stdout }}.tar.gz
+ dest: |
+ /tmp/kubekey/etcd-{{ .etcd_version }}-linux-{{ .binary_type.stdout }}.tar.gz
- name: Extract etcd binary
command: |
- tar --strip-components=1 -C /usr/local/bin/ -xvf /tmp/kubekey/etcd-{{ etcd_version }}-linux-{{ binary_type.stdout }}.tar.gz \
- --wildcards etcd-{{ etcd_version }}-linux-{{ binary_type.stdout }}/etcd*
+ tar --strip-components=1 -C /usr/local/bin/ -xvf /tmp/kubekey/etcd-{{ .etcd_version }}-linux-{{ .binary_type.stdout }}.tar.gz \
+ --wildcards etcd-{{ .etcd_version }}-linux-{{ .binary_type.stdout }}/etcd*
- name: Sync ca file to remote
copy:
- src: "{{ work_dir }}/kubekey/pki/root.crt"
- dest: "/etc/ssl/etcd/ssl/ca.crt"
+ src: |
+ {{ .work_dir }}/kubekey/pki/root.crt
+ dest: /etc/ssl/etcd/ssl/ca.crt
- name: Sync etcd cert file to remote
copy:
- src: "{{ work_dir }}/kubekey/pki/etcd.crt"
- dest: "/etc/ssl/etcd/ssl/server.crt"
+ src: |
+ {{ .work_dir }}/kubekey/pki/etcd.crt
+ dest: /etc/ssl/etcd/ssl/server.crt
- name: Sync etcd key file to remote
copy:
- src: "{{ work_dir }}/kubekey/pki/etcd.key"
- dest: "/etc/ssl/etcd/ssl/server.key"
+ src: |
+ {{ .work_dir }}/kubekey/pki/etcd.key
+ dest: /etc/ssl/etcd/ssl/server.key
- name: Generate etcd env file
template:
- src: "etcd.env"
- dest: "/etc/etcd.env"
+ src: etcd.env
+ dest: /etc/etcd.env
- name: Generate etcd systemd service file
copy:
- src: "etcd.service"
- dest: "/etc/systemd/system/etcd.service"
+ src: etcd.service
+ dest: /etc/systemd/system/etcd.service
# refer: https://etcd.io/docs/v3.5/tuning/
- name: Set cpu to performance
command: |
echo performance | tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
- when: etcd.performance
+ when: .etcd.performance
- name: Set Traffic Priority
command: |
@@ -47,7 +52,7 @@
tc filter add dev eth0 parent 1: protocol ip prio 1 u32 match ip dport 2380 0xffff flowid 1:1
tc filter add dev eth0 parent 1: protocol ip prio 2 u32 match ip sport 2379 0xffff flowid 1:1
tc filter add dev eth0 parent 1: protocol ip prio 2 u32 match ip dport 2379 0xffff flowid 1:1
- when: etcd.traffic_priority
+ when: .etcd.traffic_priority
- name: Start etcd service
command: systemctl daemon-reload && systemctl start etcd && systemctl enable etcd
diff --git a/builtin/roles/install/etcd/tasks/main.yaml b/builtin/roles/install/etcd/tasks/main.yaml
index b33fa286b..2c3531d72 100644
--- a/builtin/roles/install/etcd/tasks/main.yaml
+++ b/builtin/roles/install/etcd/tasks/main.yaml
@@ -5,22 +5,23 @@
run_once: true
register: etcd_install_version
-- name: Init etcd
- when: etcd_install_version.stderr != ""
+- name: Install etcd
+ when: |
+ or (.etcd_install_version.stderr | ne "") (.etcd_install_version.stdout | hasPrefix (printf "etcd Version: %s\n" (.etcd_version | default "" | trimPrefix "v")) | not)
block:
- - name: Add etcd user
- command: |
- useradd -M -c 'Etcd user' -s /sbin/nologin -r etcd || :
- - name: Create etcd directories
- command: |
- if [ ! -d "{{ item }}" ]; then
- mkdir -p {{ item }} && chown -R etcd {{ item }}
- fi
- loop:
- - "/var/lib/etcd"
+ - name: Init etcd
+ block:
+ - name: Add etcd user
+ command: |
+ useradd -M -c 'Etcd user' -s /sbin/nologin -r etcd || :
+ - name: Create etcd directories
+ command: |
+ if [ ! -d "{{ .item }}" ]; then
+ mkdir -p {{ .item }} && chown -R etcd {{ .item }}
+ fi
+ loop:
+ - "/var/lib/etcd"
-- include_tasks: install_etcd.yaml
- when: etcd_install_version.stderr != ""
+ - include_tasks: install_etcd.yaml
-- include_tasks: backup_etcd.yaml
- when: etcd_install_version.stderr != ""
+ - include_tasks: backup_etcd.yaml
diff --git a/builtin/roles/install/etcd/templates/backup.script b/builtin/roles/install/etcd/templates/backup.sh
similarity index 78%
rename from builtin/roles/install/etcd/templates/backup.script
rename to builtin/roles/install/etcd/templates/backup.sh
index e91652278..61e70235a 100644
--- a/builtin/roles/install/etcd/templates/backup.script
+++ b/builtin/roles/install/etcd/templates/backup.sh
@@ -5,10 +5,10 @@ set -o nounset
set -o pipefail
ETCDCTL_PATH='/usr/local/bin/etcdctl'
-ENDPOINTS='https://{{ internal_ipv4 }}:2379'
-ETCD_DATA_DIR="{{ etcd.env.data_dir }}"
-BACKUP_DIR="{{ etcd.backup.backup_dir }}/etcd-$(date +%Y-%m-%d-%H-%M-%S)"
-KEEPBACKUPNUMBER='{{ etcd.backup.keep_backup_number }}'
+ENDPOINTS='https://{{ .internal_ipv4 }}:2379'
+ETCD_DATA_DIR="{{ .etcd.env.data_dir }}"
+BACKUP_DIR="{{ .etcd.backup.backup_dir }}/etcd-$(date +%Y-%m-%d-%H-%M-%S)"
+KEEPBACKUPNUMBER='{{ .etcd.backup.keep_backup_number }}'
((KEEPBACKNUMBER++))
ETCDCTL_CERT="/etc/ssl/etcd/ssl/server.crt"
diff --git a/builtin/roles/install/etcd/templates/backup.timer b/builtin/roles/install/etcd/templates/backup.timer
index 6141397d0..4b73c6bc2 100644
--- a/builtin/roles/install/etcd/templates/backup.timer
+++ b/builtin/roles/install/etcd/templates/backup.timer
@@ -1,7 +1,7 @@
[Unit]
Description=Timer to backup ETCD
[Timer]
-OnCalendar={{ etcd.backup.on_calendar }}
+OnCalendar={{ .etcd.backup.on_calendar }}
Unit=backup-etcd.service
[Install]
WantedBy=multi-user.target
diff --git a/builtin/roles/install/etcd/templates/etcd.env b/builtin/roles/install/etcd/templates/etcd.env
index d79658280..e578a6c25 100644
--- a/builtin/roles/install/etcd/templates/etcd.env
+++ b/builtin/roles/install/etcd/templates/etcd.env
@@ -1,39 +1,43 @@
-ETCD_DATA_DIR={{ etcd.env.data_dir }}
-ETCD_ADVERTISE_CLIENT_URLS={{ internal_ipv4|stringformat:"https://%s:2379" }}
-ETCD_INITIAL_ADVERTISE_PEER_URLS={{ internal_ipv4|stringformat:"https://%s:2380" }}
-ETCD_INITIAL_CLUSTER_STATE={{ etcd.state }}
-ETCD_LISTEN_CLIENT_URLS={{ internal_ipv4|stringformat:"https://%s:2379" }},https://127.0.0.1:2379
-ETCD_INITIAL_CLUSTER_TOKEN={{ etcd.env.token }}
-ETCD_LISTEN_PEER_URLS={{ internal_ipv4|stringformat:"https://%s:2380" }}
-ETCD_NAME={{ inventory_name }}
+ETCD_DATA_DIR={{ .etcd.env.data_dir }}
+ETCD_ADVERTISE_CLIENT_URLS={{ printf "https://%s:2379" .internal_ipv4 }}
+ETCD_INITIAL_ADVERTISE_PEER_URLS={{ printf "https://%s:2380" .internal_ipv4 }}
+ETCD_INITIAL_CLUSTER_STATE={{ .etcd.state }}
+ETCD_LISTEN_CLIENT_URLS={{ printf "https://%s:2379" .internal_ipv4 }},https://127.0.0.1:2379
+ETCD_INITIAL_CLUSTER_TOKEN={{ .etcd.env.token }}
+ETCD_LISTEN_PEER_URLS={{ printf "https://%s:2380" .internal_ipv4 }}
+ETCD_NAME={{ .inventory_name }}
ETCD_PROXY=off
ETCD_ENABLE_V2=true
-ETCD_INITIAL_CLUSTER={% for h in groups['etcd'] %}{% set hv=inventory_hosts[h] %}{{ hv.inventory_name }}={{ hv.internal_ipv4|stringformat:"https://%s:2380" }}{% if (not forloop.Last) %},{% endif %}{% endfor %}
-ETCD_ELECTION_TIMEOUT={{ etcd.env.election_timeout }}
-ETCD_HEARTBEAT_INTERVAL={{ etcd.env.heartbeat_interval }}
-ETCD_AUTO_COMPACTION_RETENTION={{ etcd.env.compaction_retention }}
-ETCD_SNAPSHOT_COUNT={{ etcd.env.snapshot_count }}
-{% if (etcd.metrics|defined) %}
-ETCD_METRICS={{ etcd.env.metrics }}
-{% endif %}
-{% if (etcd.env.quota_backend_bytes|defined) %}
-ETCD_QUOTA_BACKEND_BYTES={{ etcd.env.quota_backend_bytes }}
-{% endif %}
-{% if (etcd.env.max_request_bytes|defined) %}
-ETCD_MAX_REQUEST_BYTES={{ etcd.env.max_request_bytes }}
-{% endif %}
-{% if (etcd.env.max_snapshots|defined) %}
-ETCD_MAX_SNAPSHOTS={{ etcd.env.max_snapshots }}
-{% endif %}
-{% if (etcd.env.max_wals|defined) %}
-ETCD_MAX_WALS={{ etcd.env.max_wals }}
-{% endif %}
-{% if (etcd.env.log_level|defined) %}
-ETCD_LOG_LEVEL={{ etcd.env.log_level }}
-{% endif %}
-{% if (etcd.env.unsupported_arch|defined) %}
-ETCD_UNSUPPORTED_ARCH={{ etcd.env.unsupported_arch }}
-{% endif %}
+{{ $ips := list }}
+{{- range .groups.etcd | default list -}}
+ {{- $ips = append $ips (printf "%s=https://%s:2380" (index $.inventory_hosts . "inventory_name") (index $.inventory_hosts . "internal_ipv4")) -}}
+{{- end -}}
+ETCD_INITIAL_CLUSTER={{ $ips | join "," }}
+ETCD_ELECTION_TIMEOUT={{ .etcd.env.election_timeout }}
+ETCD_HEARTBEAT_INTERVAL={{ .etcd.env.heartbeat_interval }}
+ETCD_AUTO_COMPACTION_RETENTION={{ .etcd.env.compaction_retention }}
+ETCD_SNAPSHOT_COUNT={{ .etcd.env.snapshot_count }}
+{{- if .etcd.metrics }}
+ETCD_METRICS={{ .etcd.env.metrics }}
+{{- end }}
+{{- if .etcd.env.quota_backend_bytes }}
+ETCD_QUOTA_BACKEND_BYTES={{ .etcd.env.quota_backend_bytes }}
+{{- end }}
+{{- if .etcd.env.max_request_bytes }}
+ETCD_MAX_REQUEST_BYTES={{ .etcd.env.max_request_bytes }}
+{{- end }}
+{{- if .etcd.env.max_snapshots }}
+ETCD_MAX_SNAPSHOTS={{ .etcd.env.max_snapshots }}
+{{- end }}
+{{- if .etcd.env.max_wals }}
+ETCD_MAX_WALS={{ .etcd.env.max_wals }}
+{{- end }}
+{{- if .etcd.env.log_level }}
+ETCD_LOG_LEVEL={{ .etcd.env.log_level }}
+{{- end }}
+{{- if .etcd.env.unsupported_arch }}
+ETCD_UNSUPPORTED_ARCH={{ .etcd.env.unsupported_arch }}
+{{- end }}
# TLS settings
ETCD_TRUSTED_CA_FILE=/etc/ssl/etcd/ssl/ca.crt
diff --git a/builtin/roles/install/image-registry/defaults/main.yaml b/builtin/roles/install/image-registry/defaults/main.yaml
index 75d0d3d49..51750ee2f 100644
--- a/builtin/roles/install/image-registry/defaults/main.yaml
+++ b/builtin/roles/install/image-registry/defaults/main.yaml
@@ -2,7 +2,12 @@ image_registry:
# ha_vip: 192.168.122.59
namespace_override: ""
auth:
- registry: "{% if (image_registry.ha_vip|defined) %}{{ image_registry.ha_vip }}{% else %}{{ groups['image_registry']|first }}{% endif %}"
+ registry: |
+ {{- if and .image_registry.ha_vip (ne .image_registry.ha_vip "") -}}
+ {{ .image_registry.ha_vip }}
+ {{- else -}}
+ {{ .groups.image_registry | default list | first }}
+ {{- end -}}
username: admin
password: Harbor12345
# registry type. support: harbor, registry
diff --git a/builtin/roles/install/image-registry/tasks/install_docker.yaml b/builtin/roles/install/image-registry/tasks/install_docker.yaml
index 4ec278404..0d27a5a8c 100644
--- a/builtin/roles/install/image-registry/tasks/install_docker.yaml
+++ b/builtin/roles/install/image-registry/tasks/install_docker.yaml
@@ -4,37 +4,31 @@
command: docker --version
register: docker_install_version
-- name: Sync docker binary to remote
- copy:
- src: "{{ work_dir }}/kubekey/docker/{{ docker_version }}/{{ binary_type.stdout }}/docker-{{ docker_version }}.tgz"
- dest: "/tmp/kubekey/docker-{{ docker_version }}.tgz"
- when: docker_install_version.stderr != ""
-
-- name: Generate docker config file
- template:
- src: "docker.config"
- dest: "/etc/docker/daemon.json"
- when: docker_install_version.stderr != ""
-
-- name: Unpackage docker binary
- command: |
- tar -C /usr/local/bin/ --strip-components=1 -xvf /tmp/kubekey/docker-{{ docker_version }}.tgz --wildcards docker/*
- when: docker_install_version.stderr != ""
-
-- name: Generate docker service file
- copy:
- src: "docker.service"
- dest: "/etc/systemd/system/docker.service"
- when: docker_install_version.stderr != ""
-
-- name: Generate containerd service file
- copy:
- src: "containerd.service"
- dest: "/etc/systemd/system/containerd.service"
- when: docker_install_version.stderr != ""
-
-- name: Start docker service
- command: |
- systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service
- systemctl daemon-reload && systemctl start docker.service && systemctl enable docker.service
- when: docker_install_version.stderr != ""
+- name: Install docker
+ when: or (.docker_install_version.stderr | ne "") (.docker_install_version.stdout | hasPrefix (printf "Docker version %s," .docker_version) | not)
+ block:
+ - name: Sync docker binary to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/docker/{{ .docker_version }}/{{ .binary_type.stdout }}/docker-{{ .docker_version }}.tgz
+ dest: |
+ /tmp/kubekey/docker-{{ .docker_version }}.tgz
+ - name: Generate docker config file
+ template:
+ src: docker.config
+ dest: /etc/docker/daemon.json
+ - name: Unpackage docker binary
+ command: |
+ tar -C /usr/local/bin/ --strip-components=1 -xvf /tmp/kubekey/docker-{{ .docker_version }}.tgz --wildcards docker/*
+ - name: Generate docker service file
+ copy:
+ src: docker.service
+ dest: /etc/systemd/system/docker.service
+ - name: Generate containerd service file
+ copy:
+ src: containerd.service
+ dest: /etc/systemd/system/containerd.service
+ - name: Start docker service
+ command: |
+ systemctl daemon-reload && systemctl start containerd.service && systemctl enable containerd.service
+ systemctl daemon-reload && systemctl start docker.service && systemctl enable docker.service
diff --git a/builtin/roles/install/image-registry/tasks/install_docker_compose.yaml b/builtin/roles/install/image-registry/tasks/install_docker_compose.yaml
index 9e6dcee37..de8bdc8ed 100644
--- a/builtin/roles/install/image-registry/tasks/install_docker_compose.yaml
+++ b/builtin/roles/install/image-registry/tasks/install_docker_compose.yaml
@@ -5,9 +5,9 @@
register: dockercompose_install_version
- name: Sync docker-compose to remote
+ when: or (.dockercompose_install_version.stderr | ne "") (.dockercompose_install_version.stdout | ne (printf "Docker Compose version %s" .dockercompose_version))
copy:
- src: "{{ work_dir }}/kubekey/image-registry/docker-compose/{{ dockercompose_version }}/{{ binary_type.stdout }}/docker-compose"
- dest: "/usr/local/bin/docker-compose"
+ src: |
+ {{ .work_dir }}/kubekey/image-registry/docker-compose/{{ .dockercompose_version }}/{{ .binary_type.stdout }}/docker-compose
+ dest: /usr/local/bin/docker-compose
mode: 0755
- when:
- - dockercompose_install_version.stderr != ""
diff --git a/builtin/roles/install/image-registry/tasks/install_harbor.yaml b/builtin/roles/install/image-registry/tasks/install_harbor.yaml
index 66fb44dc6..5018d4d73 100644
--- a/builtin/roles/install/image-registry/tasks/install_harbor.yaml
+++ b/builtin/roles/install/image-registry/tasks/install_harbor.yaml
@@ -1,44 +1,52 @@
---
- name: Sync harbor package to remote
copy:
- src: "{{ work_dir }}/kubekey/image-registry/harbor/{{ harbor_version }}/{{ binary_type.stdout }}/harbor-offline-installer-{{ harbor_version }}.tgz"
- dest: "/opt/harbor/{{ harbor_version }}/harbor-offline-installer-{{ harbor_version }}.tgz"
+ src: |
+ {{ .work_dir }}/kubekey/image-registry/harbor/{{ .harbor_version }}/{{ .binary_type.stdout }}/harbor-offline-installer-{{ .harbor_version }}.tgz
+ dest: |
+ /opt/harbor/{{ .harbor_version }}/harbor-offline-installer-{{ .harbor_version }}.tgz
- name: Untar harbor package
command: |
- cd /opt/harbor/{{ harbor_version }}/ && tar -zxvf harbor-offline-installer-{{ harbor_version }}.tgz
+ cd /opt/harbor/{{ .harbor_version }}/ && tar -zxvf harbor-offline-installer-{{ .harbor_version }}.tgz
- name: Sync image registry cert file to remote
copy:
- src: "{{ work_dir }}/kubekey/pki/image_registry.crt"
- dest: "/opt/harbor/{{ harbor_version }}/ssl/server.crt"
+ src: |
+ {{ .work_dir }}/kubekey/pki/image_registry.crt
+ dest: |
+ /opt/harbor/{{ .harbor_version }}/ssl/server.crt
- name: Sync image registry key file to remote
copy:
- src: "{{ work_dir }}/kubekey/pki/image_registry.key"
- dest: "/opt/harbor/{{ harbor_version }}/ssl/server.key"
+ src: |
+ {{ .work_dir }}/kubekey/pki/image_registry.key
+ dest: |
+ /opt/harbor/{{ .harbor_version }}/ssl/server.key
- name: Generate harbor config
template:
- src: "harbor.config"
- dest: "/opt/harbor/{{ harbor_version }}/harbor/harbor.yml"
+ src: harbor.config
+ dest: |
+ /opt/harbor/{{ .harbor_version }}/harbor/harbor.yml
- name: Generate keepalived docker compose
template:
- src: "harbor_keepalived.docker-compose"
- dest: "/opt/harbor/{{ harbor_version }}/harbor/docker-compose-keepalived.yml"
+ src: harbor_keepalived.docker-compose
+ dest: |
+ /opt/harbor/{{ .harbor_version }}/harbor/docker-compose-keepalived.yml
when:
- - image_registry.ha_vip | defined
- - image_registry_service.stderr != ""
+ - and .image_registry.ha_vip (ne .image_registry.ha_vip "")
+ - .image_registry_service.stderr | ne ""
- name: Install harbor
command: |
- cd /opt/harbor/{{ harbor_version }}/harbor && /bin/bash install.sh
+ cd /opt/harbor/{{ .harbor_version }}/harbor && /bin/bash install.sh
- name: Register harbor service
template:
- src: "harbor.service"
- dest: "/etc/systemd/system/harbor.service"
+ src: harbor.service
+ dest: /etc/systemd/system/harbor.service
- name: Start harbor service
command: systemctl daemon-reload && systemctl start harbor.service && systemctl enable harbor.service
diff --git a/builtin/roles/install/image-registry/tasks/install_keepalived.yaml b/builtin/roles/install/image-registry/tasks/install_keepalived.yaml
index 7f1efefaa..11bf206bb 100644
--- a/builtin/roles/install/image-registry/tasks/install_keepalived.yaml
+++ b/builtin/roles/install/image-registry/tasks/install_keepalived.yaml
@@ -1,19 +1,23 @@
---
- name: Sync keepalived image to remote
copy:
- src: "{{ work_dir }}/kubekey/image-registry/keepalived/{{ keepalived_version }}/{{ binary_type.stdout }}/keepalived-{{ keepalived_version }}-linux-{{ binary_type.stdout }}.tgz"
- dest: "/opt/keepalived/{{ keepalived_version }}/keepalived-{{ keepalived_version }}-linux-{{ binary_type.stdout }}.tgz"
+ src: |
+ {{ .work_dir }}/kubekey/image-registry/keepalived/{{ .keepalived_version }}/{{ .binary_type.stdout }}/keepalived-{{ .keepalived_version }}-linux-{{ .binary_type.stdout }}.tgz
+ dest: |
+ /opt/keepalived/{{ .keepalived_version }}/keepalived-{{ .keepalived_version }}-linux-{{ .binary_type.stdout }}.tgz
- name: Load keeplived image
command: |
- docker load -i /opt/keepalived/{{ keepalived_version }}/keepalived-{{ keepalived_version }}-linux-{{ binary_type.stdout }}.tgz
+ docker load -i /opt/keepalived/{{ .keepalived_version }}/keepalived-{{ .keepalived_version }}-linux-{{ .binary_type.stdout }}.tgz
- name: Sync keeplived config to remote
template:
- src: "keeplived.config"
- dest: "/opt/keeplived/{{ keepalived_version }}/keepalived.conf"
+ src: keeplived.config
+ dest: |
+ /opt/keeplived/{{ .keepalived_version }}/keepalived.conf
- name: Sync healthcheck shell to remote
template:
- src: "keepalived.healthcheck"
- dest: "/opt/keeplived/{{ keepalived_version }}/healthcheck.sh"
+ src: keepalived.healthcheck
+ dest: |
+ /opt/keeplived/{{ .keepalived_version }}/healthcheck.sh
diff --git a/builtin/roles/install/image-registry/tasks/install_registry.yaml b/builtin/roles/install/image-registry/tasks/install_registry.yaml
index 539607758..c980e8fca 100644
--- a/builtin/roles/install/image-registry/tasks/install_registry.yaml
+++ b/builtin/roles/install/image-registry/tasks/install_registry.yaml
@@ -1,52 +1,58 @@
---
- name: Sync registry image to remote
copy:
- src: "{{ work_dir }}/kubekey/image-registry/registry/{{ registry_version }}/{{ binary_type.stdout }}/registry-{{ registry_version }}-linux-{{ binary_type.stdout }}.tgz"
- dest: "/opt/registry/{{ registry_version }}/registry-{{ registry_version }}-linux-{{ binary_type.stdout }}.tgz"
+ src: |
+ {{ .work_dir }}/kubekey/image-registry/registry/{{ .registry_version }}/{{ .binary_type.stdout }}/registry-{{ .registry_version }}-linux-{{ .binary_type.stdout }}.tgz
+ dest: |
+ /opt/registry/{{ .registry_version }}/registry-{{ .registry_version }}-linux-{{ .binary_type.stdout }}.tgz
- name: Mount NFS dir
command: |
- if [ {{ os.release.ID_LIKE }} == 'debian' ]; then
+ {{- if .os.release.ID_LIKE | eq "debian" -}}
yum update && yum install -y nfs-utils
- elif [ {{ os.release.ID_LIKE }} == 'rhel fedora' ]
+ {{- else if .os.release.ID_LIKE | eq "rhel fedora" -}}
apt update && apt install -y nfs-common
- fi
- nfsHostName={{ groups['nfs']|first }}
- {% set hv=inventory_hosts['$nfsHostName'] %}
- mount -t nfs {{ hv.internal_ipv4 }}:{{ image_registry.registry.storage.filesystem.nfs_mount }} {{ image_registryregistry.storage.filesystem.rootdirectory }}
+ {{- end -}}
+ mount -t nfs {{ index .inventory_hosts (.groups.nfs | default list | first) "internal_ipv4" }}:{{ .image_registry.registry.storage.filesystem.nfs_mount }} {{ .image_registryregistry.storage.filesystem.rootdirectory }}
when:
- - image_registry.registry.storage.filesystem.nfs_mount | defined
- - groups['nfs']|length == 1
- - image_registry_service.stderr != ""
+ - and .image_registry.registry.storage.filesystem.nfs_mount (ne .image_registry.registry.storage.filesystem.nfs_mount "")
+ - .groups.nfs | default list | len | eq 1
+ - .image_registry_service.stderr | ne ""
- name: Load registry image
command: |
- docker load -i /opt/registry/{{ registry_version }}/registry-{{ registry_version }}-linux-{{ binary_type.stdout }}.tgz
+ docker load -i /opt/registry/{{ .registry_version }}/registry-{{ .registry_version }}-linux-{{ .binary_type.stdout }}.tgz
- name: Sync image registry cert file to remote
copy:
- src: "{{ work_dir }}/kubekey/pki/image_registry.crt"
- dest: "/opt/registry/{{ registry_version }}/ssl/server.crt"
+ src: |
+ {{ .work_dir }}/kubekey/pki/image_registry.crt
+ dest: |
+ /opt/registry/{{ .registry_version }}/ssl/server.crt
- name: Sync image registry key file to remote
copy:
- src: "{{ work_dir }}/kubekey/pki/image_registry.key"
- dest: "/opt/registry/{{ registry_version }}/ssl/server.key"
+ src: |
+ {{ .work_dir }}/kubekey/pki/image_registry.key
+ dest: |
+ /opt/registry/{{ .registry_version }}/ssl/server.key
- name: Generate registry docker compose
template:
- src: "registry.docker-compose"
- dest: "/opt/registry/{{ registry_version }}/docker-compose.yml"
+ src: registry.docker-compose
+ dest: |
+ /opt/registry/{{ .registry_version }}/docker-compose.yml
- name: Generate registry config
template:
- src: "registry.config"
- dest: "/opt/registry/{{ registry_version }}/config.yml"
+ src: registry.config
+ dest: |
+ /opt/registry/{{ .registry_version }}/config.yml
- name: Register registry service
copy:
- src: "registry.service"
- dest: "/etc/systemd/system/registry.service"
+ src: registry.service
+ dest: /etc/systemd/system/registry.service
- name: Start registry service
command: systemctl daemon-reload && systemctl start registry.service && systemctl enable registry.service
diff --git a/builtin/roles/install/image-registry/tasks/load_images.yaml b/builtin/roles/install/image-registry/tasks/load_images.yaml
index 1e4c4033e..77d94c349 100644
--- a/builtin/roles/install/image-registry/tasks/load_images.yaml
+++ b/builtin/roles/install/image-registry/tasks/load_images.yaml
@@ -2,51 +2,53 @@
- name: Create harbor project for each image
tags: ["only_image"]
command: |
+ {{- if .image_registry.namespace_override | eq "" -}}
for dir in /tmp/kubekey/images/*; do
if [ ! -d "$dir" ]; then
- # only deal directory
+ # only deal with directories
continue
fi
- IFS='=' read -ra array <<< "${dir##*/}"
- if [ $(echo ${my_array[@]} | wc -w) > 3 ]; then
- project=${array[1]}
- dest_image=$(echo "${array[@]:2:-1}" | tr ' ' '/')
- tag=${array[-1]}
+ dir_name=${dir##*/}
+ IFS='=' set -- $dir_name
+ image_array="$@"
+ array_length=$#
+
+ if [ "$array_length" -gt 3 ]; then
+ project=$2
+ dest_image=$(shift 2 && echo "$*" | tr ' ' '/')
+ tag=$(echo "$@" | awk '{print $NF}')
else
- echo "unsupported image"
+ echo "unsupported image: $dir_name"
exit 1
fi
# if project is not exist, create if
- http_code=$(curl -Iks -u "{{ image_registry.auth.username }}:{{ image_registry.auth.password }}" 'https://localhost/api/v2.0/projects?project_name=${project}' | grep HTTP | awk '{print $2}')
+ http_code=$(curl -Iks -u "{{ .image_registry.auth.username }}:{{ .image_registry.auth.password }}" 'https://localhost/api/v2.0/projects?project_name=${project}' | grep HTTP | awk '{print $2}')
if [ $http_code == 404 ]; then
# create project
- curl -u "{{ image_registry.auth.username }}:{{ image_registry.auth.password }}" -k -X POST -H "Content-Type: application/json" "https://localhost/api/v2.0/projects" -d "{ \"project_name\": \"${project}\", \"public\": true}"
+ curl -u "{{ .image_registry.auth.username }}:{{ .image_registry.auth.password }}" -k -X POST -H "Content-Type: application/json" "https://localhost/api/v2.0/projects" -d "{ \"project_name\": \"${project}\", \"public\": true}"
fi
done
- when:
- - image_registry.type == 'harbor'
- - image_registry.namespace_override == ""
- -
-- name: Create harbor project for namespace_override
- tags: ["only_image"]
- command: |
+ {{- else -}}
# if project is not exist, create if
- http_code=$(curl -Iks -u "{{ image_registry.auth.username }}:{{ image_registry.auth.password }}" 'https://localhost/api/v2.0/projects?project_name={{ image_registry.namespace_override }}' | grep HTTP | awk '{print $2}')
+ http_code=$(curl -Iks -u "{{ .image_registry.auth.username }}:{{ .image_registry.auth.password }}" 'https://localhost/api/v2.0/projects?project_name={{ .image_registry.namespace_override }}' | grep HTTP | awk '{print $2}')
if [ $http_code == 404 ]; then
# create project
- curl -u "{{ image_registry.auth.username }}:{{ image_registry.auth.password }}" -k -X POST -H "Content-Type: application/json" "https://localhost/api/v2.0/projects" -d "{ \"project_name\": \"{{ image_registry.namespace_override }}\", \"public\": true}"
+ curl -u "{{ .image_registry.auth.username }}:{{ .image_registry.auth.password }}" -k -X POST -H "Content-Type: application/json" "https://localhost/api/v2.0/projects" -d "{ \"project_name\": \"{{ .image_registry.namespace_override }}\", \"public\": true}"
fi
- when:
- - image_registry.type == 'harbor'
- - image_registry.namespace_override != ""
+ {{- end -}}
+ when: .image_registry.type | eq "harbor"
- name: Sync images package to harbor
tags: ["only_image"]
image:
push:
- registry: "{{ image_registry.auth.registry }}"
- namespace_override: "{{ image_registry.namespace_override }}"
- username: "{{ image_registry.auth.username }}"
- password: "{{ image_registry.auth.password }}"
+ registry: |
+ {{ .image_registry.auth.registry }}
+ namespace_override: |
+ {{ .image_registry.namespace_override }}
+ username: |
+ {{ .image_registry.auth.username }}
+ password: |
+ {{ .image_registry.auth.password }}
diff --git a/builtin/roles/install/image-registry/tasks/main.yaml b/builtin/roles/install/image-registry/tasks/main.yaml
index 0de498c3c..310d799a3 100644
--- a/builtin/roles/install/image-registry/tasks/main.yaml
+++ b/builtin/roles/install/image-registry/tasks/main.yaml
@@ -4,27 +4,27 @@
- include_tasks: install_docker_compose.yaml
- include_tasks: install_keepalived.yaml
- when: image_registry.ha_vip | defined
+ when: and .image_registry.ha_vip (ne .image_registry.ha_vip "")
- name: Install harbor
- when: image_registry.type == 'harbor'
+ when: .image_registry.type | eq "harbor"
block:
- name: Check if harbor installed
ignore_errors: true
command: systemctl status harbor.service
- register: image_registry_service
- - include_tasks: install_registry.yaml
- when: image_registry_service.stderr != ""
+ register: harbor_service_status
+ - include_tasks: install_harbor.yaml
+ when: .harbor_service_status.stderr | ne ""
- name: Install registry
- when: image_registry.type == 'registry'
+ when: .image_registry.type | eq "registry"
block:
- name: Check if registry installed
ignore_errors: true
command: systemctl status registry.service
- register: image_registry_service
+ register: registry_service_status
- include_tasks: install_registry.yaml
- when: image_registry_service.stderr != ""
+ when: .registry_service_status.stderr | ne ""
- include_tasks: load_images.yaml
tags: ["only_image"]
diff --git a/builtin/roles/install/image-registry/templates/docker.config b/builtin/roles/install/image-registry/templates/docker.config
index 8ae73211a..23767c217 100644
--- a/builtin/roles/install/image-registry/templates/docker.config
+++ b/builtin/roles/install/image-registry/templates/docker.config
@@ -3,17 +3,17 @@
"max-size": "5m",
"max-file":"3"
},
- {% if (cri.docker.data_root|defined) %}
- "data-root": {{ cri.docker.data_root }},
- {% endif %}
- {% if (registry.mirrors|defined) %}
- "registry-mirrors": {{ registry.mirrors|to_json|safe }},
- {% endif %}
- {% if (registry.insecure_registries|defined) %}
- "insecure-registries": {{ registry.insecure_registries|to_json|safe }},
- {% endif %}
- {% if (cri.docker.bridge_ip|defined) %}
- "bip": "{{ cri.docker.bridge_ip }}",
- {% endif %}
- "exec-opts": ["native.cgroupdriver=systemd"]
+{{- if and .cri.docker.data_root (ne .cri.docker.data_root "") }}
+ "data-root": "{{ .cri.docker.data_root }}",
+{{- end }}
+{{- if and .cri.registry.mirrors (ne .cri.registry.mirrors "") }}
+ "registry-mirrors": {{ .cri.registry.mirrors | toJson }},
+{{- end }}
+ {{- if and .cri.registry.insecure_registries (ne .cri.registry.insecure_registries "") }}
+ "insecure-registries": {{ .cri.registry.insecure_registries | toJson }},
+{{- end }}
+ {{- if and .cri.docker.bridge_ip (ne .cri.docker.bridge_ip "") }}
+ "bip": "{{ .cri.docker.bridge_ip }}",
+{{- end }}
+ "exec-opts": ["native.cgroupdriver={{ .cri.cgroup_driver | default "systemd" }}"]
}
diff --git a/builtin/roles/install/image-registry/templates/harbor.config b/builtin/roles/install/image-registry/templates/harbor.config
index 83d0b660f..7feac11ce 100644
--- a/builtin/roles/install/image-registry/templates/harbor.config
+++ b/builtin/roles/install/image-registry/templates/harbor.config
@@ -2,7 +2,7 @@
# The IP address or hostname to access admin UI and registry service.
# DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
-hostname: {{ internal_ipv4 }}
+hostname: {{ .internal_ipv4 }}
# http related config
http:
@@ -14,8 +14,8 @@ https:
# https port for harbor, default is 443
port: 443
# The path of cert and key files for nginx
- certificate: /opt/harbor/{{ harbor_version }}/ssl/server.crt
- private_key: /opt/harbor/{{ harbor_version }}/ssl/server.key
+ certificate: /opt/harbor/{{ .harbor_version }}/ssl/server.crt
+ private_key: /opt/harbor/{{ .harbor_version }}/ssl/server.key
# enable strong ssl ciphers (default: false)
# strong_ssl_ciphers: false
@@ -34,7 +34,7 @@ https:
# The initial password of Harbor admin
# It only works in first time to install harbor
# Remember Change the admin password from UI after launching Harbor.
-harbor_admin_password: {{ image_registry.auth.password }}
+harbor_admin_password: {{ .image_registry.auth.password }}
# Harbor DB configuration
database:
@@ -224,7 +224,7 @@ _version: 2.10.0
# Global proxy
# Config http proxy for components, e.g. http://my.proxy.com:3128
-# Components doesn't need to connect to each others via http proxy.
+# Components doesn't need to connectorVars to each others via http proxy.
# Remove component from `components` array if want disable proxy
# for it. If you want use proxy for replication, MUST enable proxy
# for core and jobservice, and set `http_proxy` and `https_proxy`.
diff --git a/builtin/roles/install/image-registry/templates/harbor.service b/builtin/roles/install/image-registry/templates/harbor.service
index 3f32b16f0..9219b1e66 100644
--- a/builtin/roles/install/image-registry/templates/harbor.service
+++ b/builtin/roles/install/image-registry/templates/harbor.service
@@ -5,7 +5,7 @@ Requires=docker.service
[Service]
Type=simple
-ExecStart=/usr/local/bin/docker-compose -p harbor -f /opt/harbor/{{ harbor_version }}/harbor/docker-compose.yml up {% if (image_registry.ha_vip | defined) %}&& /usr/local/bin/docker-compose -p harbor -f /opt/harbor/{{ harbor_version }}/docker-compose-keepalived.yml up{% endif %}
+ExecStart=/usr/local/bin/docker-compose -p harbor -f /opt/harbor/{{ .harbor_version }}/harbor/docker-compose.yml up{{ if and .image_registry.ha_vip (ne .image_registry.ha_vip "") }} && /usr/local/bin/docker-compose -p harbor -f /opt/harbor/{{ .harbor_version }}/harbor/docker-compose-keepalived.yml up{{ end }}
ExecStop=/usr/local/bin/docker-compose -p harbor down
Restart=on-failure
[Install]
diff --git a/builtin/roles/install/image-registry/templates/harbor_keepalive.docker-compose b/builtin/roles/install/image-registry/templates/harbor_keepalived.docker-compose
similarity index 68%
rename from builtin/roles/install/image-registry/templates/harbor_keepalive.docker-compose
rename to builtin/roles/install/image-registry/templates/harbor_keepalived.docker-compose
index 4356e8557..49e86c23c 100644
--- a/builtin/roles/install/image-registry/templates/harbor_keepalive.docker-compose
+++ b/builtin/roles/install/image-registry/templates/harbor_keepalived.docker-compose
@@ -2,7 +2,7 @@
version: '2.3'
services:
keepalived:
- image: osixia/keepalived: {{ keepalived_version }}
+ image: osixia/keepalived: {{ .keepalived_version }}
container_name: keepalived
restart: always
dns_search: .
@@ -17,10 +17,10 @@ services:
- proxy
volumes:
- type: bind
- source: /opt/keeplived/{{ keepalived_version }}/keepalived.conf
+ source: /opt/keeplived/{{ .keepalived_version }}/keepalived.conf
target: /container/service/keepalived/assets/keepalived.conf
- type: bind
- source: /opt/keeplived/{{ keepalived_version }}/healthcheck.sh
+ source: /opt/keeplived/{{ .keepalived_version }}/healthcheck.sh
target: /etc/keepalived/healthcheck.sh
networks:
- harbor
diff --git a/builtin/roles/install/image-registry/templates/keepalived.config b/builtin/roles/install/image-registry/templates/keepalived.config
index ad7309ed8..36c11c2f0 100644
--- a/builtin/roles/install/image-registry/templates/keepalived.config
+++ b/builtin/roles/install/image-registry/templates/keepalived.config
@@ -23,7 +23,7 @@ vrrp_script healthcheck {
auth_pass k8s-test
}
virtual_ipaddress {
- {{ image_registry.ha_vip }}
+ {{ .image_registry.ha_vip }}
}
track_script {
healthcheck
diff --git a/builtin/roles/install/image-registry/templates/keepalived.healthcheck b/builtin/roles/install/image-registry/templates/keepalived.healthcheck
index cfbf520d5..c517da8eb 100644
--- a/builtin/roles/install/image-registry/templates/keepalived.healthcheck
+++ b/builtin/roles/install/image-registry/templates/keepalived.healthcheck
@@ -1,12 +1,12 @@
#!/bin/bash
-{% if (image_registry.type=='registry') %}
+{{- if .image_registry.type | eq "registry" }}
# registry service
service=registry:5000
-{% else %}
+{{- else }}
# harbor service
service=harbor:80
-{% endif %}
+{{- end }}
nc -zv -w 2 $service > /dev/null 2>&1
diff --git a/builtin/roles/install/image-registry/templates/registry.config b/builtin/roles/install/image-registry/templates/registry.config
index 1f7c30487..c008f1028 100644
--- a/builtin/roles/install/image-registry/templates/registry.config
+++ b/builtin/roles/install/image-registry/templates/registry.config
@@ -22,55 +22,55 @@ log:
# to:
# - errors@example.com
storage:
-{% if (image_registryregistry.storage.filesystem|length != 0) %}
+{{- if and .image_registry.registry.storage.filesystem.rootdirectory (ne .image_registry.registry.storage.filesystem.rootdirectory "") }}
filesystem:
- rootdirectory: {{ image_registryregistry.storage.filesystem.rootdirectory }}
+ rootdirectory: {{ .image_registry.registry.storage.filesystem.rootdirectory }}
maxthreads: 100
-{% endif %}
-{% if (image_registryregistry.storage.azure|length != 0) %}
+{{- end }}
+{{- if .image_registry.registry.storage.azure }}
azure:
- accountname: {{ image_registryregistry.storage.azure.accountname }}
- accountkey: {{ image_registryregistry.storage.azure.accountkey }}
- container: {{ image_registryregistry.storage.azure.container }}
-{% endif %}
-{% if (image_registryregistry.storage.gcs|length != 0) %}
+ accountname: {{ .image_registry.registry.storage.azure.accountname }}
+ accountkey: {{ .image_registry.registry.storage.azure.accountkey }}
+ container: {{ .image_registry.registry.storage.azure.container }}
+{{- end }}
+{{- if .image_registry.registry.storage.gcs }}
gcs:
- bucket: {{ image_registryregistry.storage.gcs.bucket }}
- keyfile: {{ image_registryregistry.storage.gcs.keyfile }}
+ bucket: {{ .image_registry.registry.storage.gcs.bucket }}
+ keyfile: {{ .image_registry.registry.storage.gcs.keyfile }}
credentials:
type: service_account
- project_id: {{ image_registryregistry.storage.gcs.credentials.project_id }}
- private_key_id: {{ image_registryregistry.storage.gcs.credentials.private_key_id }}
- private_key: {{ image_registryregistry.storage.gcs.credentials.private_key }}
- client_email: {{ image_registryregistry.storage.gcs.credentials.client_email }}
- client_id: {{ image_registryregistry.storage.gcs.credentials.client_id }}
- auth_uri: {{ image_registryregistry.storage.gcs.credentials.auth_uri }}
- token_uri: {{ image_registryregistry.storage.gcs.credentials.token_uri }}
- auth_provider_x509_cert_url: {{ image_registryregistry.storage.gcs.credentials.auth_provider_x509_cert_url }}
- client_x509_cert_url: {{ image_registryregistry.storage.gcs.credentials.client_x509_cert_url }}
- rootdirectory: {{ image_registryregistry.storage.gcs.rootdirectory }}
-{% endif %}
-{% if (image_registryregistry.storage.s3|length != 0) %}
+ project_id: {{ .image_registry.registry.storage.gcs.credentials.project_id }}
+ private_key_id: {{ .image_registry.registry.storage.gcs.credentials.private_key_id }}
+ private_key: {{ .image_registry.registry.storage.gcs.credentials.private_key }}
+ client_email: {{ .image_registry.registry.storage.gcs.credentials.client_email }}
+ client_id: {{ .image_registry.registry.storage.gcs.credentials.client_id }}
+ auth_uri: {{ .image_registry.registry.storage.gcs.credentials.auth_uri }}
+ token_uri: {{ .image_registry.registry.storage.gcs.credentials.token_uri }}
+ auth_provider_x509_cert_url: {{ .image_registry.registry.storage.gcs.credentials.auth_provider_x509_cert_url }}
+ client_x509_cert_url: {{ .image_registry.registry.storage.gcs.credentials.client_x509_cert_url }}
+ rootdirectory: {{ .image_registry.registry.storage.gcs.rootdirectory }}
+{{- end }}
+{{- if .image_registry.registry.storage.s3 }}
s3:
- accesskey: {{ image_registryregistry.storage.s3.accesskey }}
- secretkey: {{ image_registryregistry.storage.s3.secretkey }}
- region: {{ image_registryregistry.storage.s3.region }}
- regionendpoint: {{ image_registryregistry.storage.s3.regionendpoint }}
+ accesskey: {{ .image_registry.registry.storage.s3.accesskey }}
+ secretkey: {{ .image_registry.registry.storage.s3.secretkey }}
+ region: {{ .image_registry.registry.storage.s3.region }}
+ regionendpoint: {{ .image_registry.registry.storage.s3.regionendpoint }}
forcepathstyle: true
accelerate: false
- bucket: {{ image_registryregistry.storage.s3.bucket }}
+ bucket: {{ .image_registry.registry.storage.s3.bucket }}
encrypt: true
- keyid: {{ image_registryregistry.storage.s3.keyid }}
+ keyid: {{ .image_registry.registry.storage.s3.keyid }}
secure: true
v4auth: true
chunksize: 5242880
multipartcopychunksize: 33554432
multipartcopymaxconcurrency: 100
multipartcopythresholdsize: 33554432
- rootdirectory: {{ image_registryregistry.storage.s3.rootdirectory }}
+ rootdirectory: {{ .image_registry.registry.storage.s3.rootdirectory }}
usedualstack: false
loglevel: debug
-{% endif %}
+{{- end }}
inmemory: # This driver takes no parameters
delete:
enabled: false
diff --git a/builtin/roles/install/image-registry/templates/registry.docker-compose b/builtin/roles/install/image-registry/templates/registry.docker-compose
index 5fa35cdb5..573603764 100644
--- a/builtin/roles/install/image-registry/templates/registry.docker-compose
+++ b/builtin/roles/install/image-registry/templates/registry.docker-compose
@@ -2,7 +2,7 @@
version: '2.3'
services:
registry:
- image: registry:{{ registry_version }}
+ image: registry:{{ .registry_version }}
container_name: registry
restart: always
dns_search: .
@@ -15,18 +15,18 @@ services:
- SETUID
volumes:
- type: bind
- source: /opt/registry/{{ registry_version }}/ssl/
+ source: /opt/registry/{{ .registry_version }}/ssl/
target: /etc/registry/ssl/
- type: bind
- source: /opt/registry/{{ registry_version }}/config.yml
+ source: /opt/registry/{{ .registry_version }}/config.yml
target: /etc/docker/registry/config.yml
port:
- 443:5000
networks:
- registry
-{% if (image_registry.ha_vip | defined) %}
+{{- if and .image_registry.ha_vip (ne .image_registry.ha_vip "") }}
keepalived:
- image: osixia/keepalived: {{ keepalived_version }}
+ image: osixia/keepalived:{{ .keepalived_version }}
container_name: keepalived
restart: always
dns_search: .
@@ -41,14 +41,14 @@ services:
- registry
volumes:
- type: bind
- source: /opt/keeplived/{{ keepalived_version }}/keepalived.conf
+ source: /opt/keeplived/{{ .keepalived_version }}/keepalived.conf
target: /container/service/keepalived/assets/keepalived.conf
- type: bind
- source: /opt/keeplived/{{ keepalived_version }}/healthcheck.sh
+ source: /opt/keeplived/{{ .keepalived_version }}/healthcheck.sh
target: /etc/keepalived/healthcheck.sh
networks:
- registry
-{% endif %}
+{{- end }}
networks:
registry:
external: false
diff --git a/builtin/roles/install/image-registry/templates/registry.service b/builtin/roles/install/image-registry/templates/registry.service
index e8f13ba08..f6e7f56cf 100644
--- a/builtin/roles/install/image-registry/templates/registry.service
+++ b/builtin/roles/install/image-registry/templates/registry.service
@@ -5,7 +5,7 @@ Requires=docker.service
[Service]
Type=simple
-ExecStart=/usr/local/bin/docker-compose -p registry -f /opt/registry/{{ registry_version }}/docker-compose.yml up
+ExecStart=/usr/local/bin/docker-compose -p registry -f /opt/registry/{{ .registry_version }}/docker-compose.yml up
ExecStop=/usr/local/bin/docker-compose -p registry down
Restart=on-failure
[Install]
diff --git a/builtin/roles/install/kubernetes/defaults/main.yaml b/builtin/roles/install/kubernetes/defaults/main.yaml
index 0157fff05..45e2b375a 100644
--- a/builtin/roles/install/kubernetes/defaults/main.yaml
+++ b/builtin/roles/install/kubernetes/defaults/main.yaml
@@ -3,8 +3,8 @@ kubernetes:
# support: flannel, calico
kube_network_plugin: calico
# the image repository of kubernetes.
- image_repository: "{{ k8s_registry }}"
-
+ image_repository: |
+ {{ .k8s_registry }}
# memory size for each kube_worker node.(unit kB)
# should be greater than or equal to minimal_node_memory_mb.
minimal_node_memory_mb: 10
@@ -17,9 +17,12 @@ kubernetes:
# the first value is ipv4_cidr, the last value is ipv6_cidr.
pod_cidr: 10.233.64.0/18
service_cidr: 10.233.0.0/18
- dns_image: "{{ k8s_registry }}/coredns/coredns:v1.8.6"
- dns_cache_image: "{{ dockerio_registry }}/kubesphere/k8s-dns-node-cache:1.22.20"
- dns_service_ip: "{{ kubernetes.networking.service_cidr|ip_range:2 }}"
+ dns_image: |
+ {{ .k8s_registry }}/coredns/coredns:v1.8.6
+ dns_cache_image: |
+ {{ .dockerio_registry }}/kubesphere/k8s-dns-node-cache:1.22.20
+ dns_service_ip: |
+ {{ .kubernetes.networking.service_cidr | ipInCIDR 2 }}
# Specify a stable IP address or DNS name for the control plane.
# control_plane_endpoint: lb.kubesphere.local
apiserver:
@@ -49,10 +52,10 @@ kubernetes:
kubelet:
max_pod: 110
pod_pids_limit: 10000
- feature_gates: {}
+# feature_gates:
container_log_max_size: 5Mi
container_log_max_files: 3
- extra_args: {}
+# extra_args:
coredns:
dns_etc_hosts: []
# the config for zones
@@ -72,7 +75,7 @@ kubernetes:
cache: 30
kubernetes:
zones:
- - "{{ kubernetes.networking.dns_domain }}"
+ - "{{ .kubernetes.networking.dns_domain }}"
# rewrite performs internal message rewriting.
# rewrite:
# # specify multiple rules and an incoming query matches multiple rules.
@@ -142,17 +145,22 @@ kubernetes:
max_concurrent: 1000
kube_vip:
enabled: false
+ address: |
+ {{ .kubernetes.control_plane_endpoint }}
# support:BGP, ARP
mode: BGP
- image: "{{ dockerio_registry }}/plndr/kube-vip:v0.7.2"
+ image: |
+ {{ .dockerio_registry }}/plndr/kube-vip:v0.7.2
haproxy:
enabled: false
health_port: 8081
- image: "{{ dockerio_registry }}/library/haproxy:2.9.6-alpine"
- etcd: # todo should apply zone variable
+ image: |
+ {{ .dockerio_registry }}/library/haproxy:2.9.6-alpine
+ etcd:
# It is possible to deploy etcd with three methods.
# external: Deploy etcd cluster with external etcd cluster.
# internal: Deploy etcd cluster by static pod.
deployment_type: external
- image: "{{ k8s_registry }}/etcd:3.5.0"
+ image: |
+ {{ .k8s_registry }}/etcd:3.5.0
custom_label: {}
diff --git a/builtin/roles/install/kubernetes/tasks/deploy_kube_vip.yaml b/builtin/roles/install/kubernetes/tasks/deploy_kube_vip.yaml
index 8db71eb20..8f33ec433 100644
--- a/builtin/roles/install/kubernetes/tasks/deploy_kube_vip.yaml
+++ b/builtin/roles/install/kubernetes/tasks/deploy_kube_vip.yaml
@@ -2,30 +2,27 @@
# install with static pod: https://kube-vip.io/docs/installation/static/
- name: Get interface for ipv4
command: |
- ip route | grep ' {{ internal_ipv4 }} ' | grep 'proto kernel scope link src' | sed -e \"s/^.*dev.//\" -e \"s/.proto.*//\"| uniq
+ ip route | grep ' {{ .internal_ipv4 }} ' | grep 'proto kernel scope link src' | sed -e \"s/^.*dev.//\" -e \"s/.proto.*//\"| uniq
register: interface
-- name: Should ipv4 interface not be empty
- assert: interface.stdout != ""
- fail_msg: "{{ internal_ipv4 }} cannot be found in network interface."
-
- name: Generate kubevip manifest
template:
- src: "kubevip/kubevip.{{ kubernetes.kube_vip.mode }}"
- dest: "/etc/kubernetes/manifests/kubevip.yaml"
+ src: |
+ kubevip/kubevip.{{ .kubernetes.kube_vip.mode }}
+ dest: /etc/kubernetes/manifests/kubevip.yaml
- name: Update kubelet config
command: |
- sed -i 's#server:.*#server: https://127.0.0.1:{{ kubernetes.apiserver.port }}#g' /etc/kubernetes/kubelet.conf
+ sed -i 's#server:.*#server: https://127.0.0.1:{{ .kubernetes.apiserver.port }}#g' /etc/kubernetes/kubelet.conf
systemctl restart kubelet
- name: Update kube-proxy config
command: |
set -o pipefail && /usr/local/bin/kubectl --kubeconfig /etc/kubernetes/admin.conf get configmap kube-proxy -n kube-system -o yaml \
- | sed 's#server:.*#server: https://127.0.0.1:{{ kubernetes.apiserver.port }}#g' \
+ | sed 's#server:.*#server: https://127.0.0.1:{{ .kubernetes.apiserver.port }}#g' \
| /usr/local/bin/kubectl --kubeconfig /etc/kubernetes/admin.conf replace -f -
/usr/local/bin/kubectl --kubeconfig /etc/kubernetes/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0
- name: Update hosts file
command: |
- sed -i 's#.* {{ kubernetes.control_plane_endpoint }}#127.0.0.1 {{ kubernetes.control_plane_endpoint }}s#g' /etc/hosts
+ sed -i 's#.* {{ .kubernetes.control_plane_endpoint }}#127.0.0.1 {{ .kubernetes.control_plane_endpoint }}s#g' /etc/hosts
diff --git a/builtin/roles/install/kubernetes/tasks/init_kubernetes.yaml b/builtin/roles/install/kubernetes/tasks/init_kubernetes.yaml
index 36c7fafb4..d70724a3f 100644
--- a/builtin/roles/install/kubernetes/tasks/init_kubernetes.yaml
+++ b/builtin/roles/install/kubernetes/tasks/init_kubernetes.yaml
@@ -5,8 +5,8 @@
- name: Create kube directories
command: |
- if [ ! -d "{{ item.path }}" ]; then
- mkdir -p {{ item.path }} && chown kube -R {{ item.chown }}
+ if [ ! -d "{{ .item.path }}" ]; then
+ mkdir -p {{ .item.path }} && chown kube -R {{ .item.chown }}
fi
loop:
- {path: "/usr/local/bin", chown: "/usr/local/bin"}
@@ -20,52 +20,49 @@
- {path: "/var/lib/calico", chown: "/var/lib/calico"}
- name: Sync external etcd config
- when:
- - kubernetes.etcd.deployment_type == 'external' && groups['etcd']|length > 0
+ when: and (.kubernetes.etcd.deployment_type | eq "external") (.groups.etcd | default list | len | lt 0)
block:
- name: Sync etcd ca file to remote
copy:
- src: "{{ work_dir }}/kubekey/pki/root.crt"
- dest: "/etc/kubernetes/pki/etcd/ca.crt"
+ src: |
+ {{ .work_dir }}/kubekey/pki/root.crt
+ dest: /etc/kubernetes/pki/etcd/ca.crt
- name: Sync etcd cert files to remote
copy:
- src: "{{ work_dir }}/kubekey/pki/etcd.crt"
- dest: "/etc/kubernetes/pki/etcd/client.crt"
+ src: |
+ {{ .work_dir }}/kubekey/pki/etcd.crt
+ dest: /etc/kubernetes/pki/etcd/client.crt
- name: Sync etcd key files to remote
copy:
- src: "{{ work_dir }}/kubekey/pki/etcd.key"
- dest: "/etc/kubernetes/pki/etcd/client.key"
+ src: |
+ {{ .work_dir }}/kubekey/pki/etcd.key
+ dest: /etc/kubernetes/pki/etcd/client.key
- name: Sync audit policy file to remote
copy:
- src: "audit"
- dest: "/etc/kubernetes/audit/"
- when:
- - kubernetes.audit
+ src: audit
+ dest: /etc/kubernetes/audit/
+ when: .kubernetes.audit
- name: Generate kubeadm init config
template:
- src: "kubeadm/{% if (kube_version|version:'>=v1.24.0') %}kubeadm-init.v1beta3{% else %}kubeadm-init.v1beta2{% endif %}"
- dest: "/etc/kubernetes/kubeadm-config.yaml"
+ src: |
+ {{- if .kube_version | semverCompare ">=v1.24.0" -}}
+ kubeadm/kubeadm-init.v1beta3
+ {{- else -}}
+ kubeadm/kubeadm-init.v1beta2
+ {{- end -}}
+ dest: /etc/kubernetes/kubeadm-config.yaml
- name: Init kubernetes cluster
block:
- name: Init kubernetes by kubeadm
command: |
- /usr/local/bin/kubeadm init \
- --config=/etc/kubernetes/kubeadm-config.yaml \
- --ignore-preflight-errors=FileExisting-crictl,ImagePull \
- {% if (not kubernetes.kube_proxy.enabled) %}--skip-phases=addon/kube-proxy{% endif %}
+ /usr/local/bin/kubeadm init --config=/etc/kubernetes/kubeadm-config.yaml --ignore-preflight-errors=FileExisting-crictl,ImagePull {{ if not .kubernetes.kube_proxy.enabled }}--skip-phases=addon/kube-proxy{{ end }}
rescue:
- name: Reset kubeadm if init failed
- command: kubeadm reset -f {% if (cri.cri_socket !="") %}--cri-socket {{ cri.cri_socket }}{% endif %}
-
-- name: Remote master taint
- ignore_errors: true
- command: |
- /usr/local/bin/kubectl taint nodes {{ inventory_name }} node-role.kubernetes.io/master=:NoSchedule-
- /usr/local/bin/kubectl taint nodes {{ inventory_name }} node-role.kubernetes.io/control-plane=:NoSchedule-
- when: inventory_name in groups["kube_worker"]
+ command: |
+ kubeadm reset -f {{ if and .cri.cri_socket (ne .cri.cri_socket "") }}--cri-socket {{ .cri.cri_socket }}{{ end }}
- name: Copy kubeconfig to default dir
command: |
@@ -73,16 +70,16 @@
mkdir -p /root/.kube
fi
cp -f /etc/kubernetes/admin.conf /root/.kube/config
- when: kube_node_info_important.stderr != ""
+ when: .kube_node_info_important.stderr | ne ""
- name: Set to worker node
- when: inventory_name in groups["kube_worker"]
+ when: .groups.kube_worker | default list | has .inventory_name
block:
- name: Remote master taint
ignore_errors: true
command: |
- /usr/local/bin/kubectl taint nodes {{ inventory_name }} node-role.kubernetes.io/master=:NoSchedule-
- /usr/local/bin/kubectl taint nodes {{ inventory_name }} node-role.kubernetes.io/control-plane=:NoSchedule-
+ /usr/local/bin/kubectl taint nodes {{ .inventory_name }} node-role.kubernetes.io/master=:NoSchedule-
+ /usr/local/bin/kubectl taint nodes {{ .inventory_name }} node-role.kubernetes.io/control-plane=:NoSchedule-
- name: Add work label
command: |
- /usr/local/bin/kubectl label --overwrite node {{ inventory_name }} node-role.kubernetes.io/worker=
+ /usr/local/bin/kubectl label --overwrite node {{ .inventory_name }} node-role.kubernetes.io/worker=
diff --git a/builtin/roles/install/kubernetes/tasks/install_binaries.yaml b/builtin/roles/install/kubernetes/tasks/install_binaries.yaml
new file mode 100644
index 000000000..32f86a068
--- /dev/null
+++ b/builtin/roles/install/kubernetes/tasks/install_binaries.yaml
@@ -0,0 +1,65 @@
+---
+- name: Check if helm is installed
+ ignore_errors: true
+ command: helm version
+ register: helm_install_version
+- name: Install helm
+ when: or (.helm_install_version.stderr | ne "") (.helm_install_version.stdout | contains (printf "Version:\"%s\"" .helm_version) | not)
+ block:
+ - name: Sync helm to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/helm/{{ .helm_version }}/{{ .binary_type.stdout }}/helm-{{ .helm_version }}-linux-{{ .binary_type.stdout }}.tar.gz
+ dest: |
+ /tmp/kubekey/helm-{{ .helm_version }}-linux-{{ .binary_type.stdout }}.tar.gz
+ - name: Install helm
+ command: |
+ tar --strip-components=1 -zxvf /tmp/kubekey/helm-{{ .helm_version }}-linux-{{ .binary_type.stdout }}.tar.gz -C /usr/local/bin linux-{{ .binary_type.stdout }}/helm
+
+- name: Check if kubeadm is installed
+ ignore_errors: true
+ command: kubeadm version -o short
+ register: kubeadm_install_version
+- name: Install kubeadm
+ when: or (.kubeadm_install_version.stderr | ne "") (.kubeadm_install_version.stdout | ne .kube_version)
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/kube/{{ .kube_version }}/{{ .binary_type.stdout }}/kubeadm
+ dest: /usr/local/bin/kubeadm
+ mode: 0755
+
+- name: Check if kubectl is installed
+ ignore_errors: true
+ command: kubectl version
+ register: kubectl_install_version
+- name: Sync kubectl to remote
+ when: or (.kubectl_install_version.stderr | ne "") (.kubectl_install_version.stdout | contains (printf "GitVersion:\"%s\"" .kube_version) | not)
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/kube/{{ .kube_version }}/{{ .binary_type.stdout }}/kubectl
+ dest: /usr/local/bin/kubectl
+ mode: 0755
+
+- name: Check if kubelet is installed
+ ignore_errors: true
+ command: kubelet --version
+ register: kubelet_install_version
+- name: Install kubelet
+ when: or (.kubelet_install_version.stderr | ne "") (.kubelet_install_version.stdout | ne (printf "Kubernetes %s" .kube_version))
+ block:
+ - name: Sync kubelet to remote
+ copy:
+ src: |
+ {{ .work_dir }}/kubekey/kube/{{ .kube_version }}/{{ .binary_type.stdout }}/kubelet
+ dest: /usr/local/bin/kubelet
+ mode: 0755
+ - name: Sync kubelet env to remote
+ template:
+ src: kubeadm/kubelet.env
+ dest: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
+ - name: Sync kubelet service to remote
+ copy:
+ src: kubelet.service
+ dest: /etc/systemd/system/kubelet.service
+ - name: Register kubelet service
+ command: systemctl daemon-reload && systemctl enable kubelet.service
diff --git a/builtin/roles/install/kubernetes/tasks/install_kube_binaries.yaml b/builtin/roles/install/kubernetes/tasks/install_kube_binaries.yaml
deleted file mode 100644
index 167a53f08..000000000
--- a/builtin/roles/install/kubernetes/tasks/install_kube_binaries.yaml
+++ /dev/null
@@ -1,69 +0,0 @@
----
-- name: Check if helm is installed
- ignore_errors: true
- command: helm version
- register: helm_install_version
-
-- name: Sync helm to remote
- copy:
- src: "{{ work_dir }}/kubekey/helm/{{ helm_version }}/{{ binary_type.stdout }}/helm-{{ helm_version }}-linux-{{ binary_type.stdout }}.tar.gz"
- dest: "/tmp/kubekey/helm-{{ helm_version }}-linux-{{ binary_type.stdout }}.tar.gz"
- when: helm_install_version.stderr != ""
-
-- name: Install helm
- command: |
- tar --strip-components=1 -zxvf /tmp/kubekey/helm-{{ helm_version }}-linux-{{ binary_type.stdout }}.tar.gz -C /usr/local/bin linux-{{ binary_type.stdout }}/helm
- when: helm_install_version.stderr != ""
-
-- name: Check if kubeadm is installed
- ignore_errors: true
- command: kubeadm version
- register: kubeadm_install_version
-
-- name: Sync kubeadm to remote
- copy:
- src: "{{ work_dir }}/kubekey/kube/{{ kube_version }}/{{ binary_type.stdout }}/kubeadm"
- dest: "/usr/local/bin/kubeadm"
- mode: 0755
- when: kubeadm_install_version.stderr != ""
-
-- name: Check if kubectl is installed
- ignore_errors: true
- command: kubectl version
- register: kubectl_install_version
-
-- name: Sync kubectl to remote
- copy:
- src: "{{ work_dir }}/kubekey/kube/{{ kube_version }}/{{ binary_type.stdout }}/kubectl"
- dest: "/usr/local/bin/kubectl"
- mode: 0755
- when: kubectl_install_version.stderr != ""
-
-
-- name: Check if kubelet is installed
- ignore_errors: true
- command: systemctl status kubelet
- register: kubelet_install_version
-
-- name: Sync kubelet to remote
- copy:
- src: "{{ work_dir }}/kubekey/kube/{{ kube_version }}/{{ binary_type.stdout }}/kubelet"
- dest: "/usr/local/bin/kubelet"
- mode: 0755
- when: kubelet_install_version.stderr != ""
-
-- name: Sync kubelet env to remote
- template:
- src: "kubeadm/kubelet.env"
- dest: "/etc/systemd/system/kubelet.service.d/10-kubeadm.conf"
- when: kubelet_install_version.stderr != ""
-
-- name: Sync kubelet service to remote
- copy:
- src: "kubelet.service"
- dest: "/etc/systemd/system/kubelet.service"
- when: kubelet_install_version.stderr != ""
-
-- name: Register kubelet service
- command: systemctl daemon-reload && systemctl enable kubelet.service
- when: kubelet_install_version.stderr != ""
diff --git a/builtin/roles/install/kubernetes/tasks/join_kubernetes.yaml b/builtin/roles/install/kubernetes/tasks/join_kubernetes.yaml
index fbbe30de1..827f6c875 100644
--- a/builtin/roles/install/kubernetes/tasks/join_kubernetes.yaml
+++ b/builtin/roles/install/kubernetes/tasks/join_kubernetes.yaml
@@ -1,15 +1,19 @@
---
- name: Generate kubeadm join config
template:
- src: kubeadm/{% if (kube_version|version:">=v1.24.0") %}kubeadm-join.v1beta3{% else %}kubeadm-join.v1beta2{% endif %}
+ src: |
+ {{- if .kube_version | semverCompare ">=v1.24.0" -}}
+ kubeadm/kubeadm-join.v1beta3
+ {{- else -}}
+ kubeadm/kubeadm-join.v1beta2
+ {{- end -}}
dest: /etc/kubernetes/kubeadm-config.yaml
- name: Sync audit policy file to remote
copy:
- src: "audit"
- dest: "/etc/kubernetes/audit/"
- when:
- - kubernetes.audit
+ src: audit
+ dest: /etc/kubernetes/audit/
+ when: .kubernetes.audit
- name: Join kubernetes cluster
block:
@@ -18,21 +22,22 @@
/usr/local/bin/kubeadm join --config=/etc/kubernetes/kubeadm-config.yaml --ignore-preflight-errors=FileExisting-crictl,ImagePull
rescue:
- name: Reset kubeadm if join failed
- command: kubeadm reset -f {% if (cri.cri_socket|defined && cri.cri_socket != "") %}--cri-socket {{ cri.cri_socket }}{% endif %}
+ command: kubeadm reset -f {{ if and .cri.cri_socket (ne .cri.cri_socket "") }}--cri-socket {{ .cri.cri_socket }}{{ end }}
- name: Sync kubeconfig to remote
copy:
- src: "{{ work_dir }}/kubekey/kubeconfig"
+ src: |
+ {{ .work_dir }}/kubekey/kubeconfig
dest: /root/.kube/config
- name: Set to worker node
- when: inventory_name in groups["kube_worker"]
+ when: .groups.kube_worker | default list | has .inventory_name
block:
- name: Remote master taint
ignore_errors: true
command: |
- /usr/local/bin/kubectl taint nodes {{ inventory_name }} node-role.kubernetes.io/master=:NoSchedule-
- /usr/local/bin/kubectl taint nodes {{ inventory_name }} node-role.kubernetes.io/control-plane=:NoSchedule-
+ /usr/local/bin/kubectl taint nodes {{ .inventory_name }} node-role.kubernetes.io/master=:NoSchedule-
+ /usr/local/bin/kubectl taint nodes {{ .inventory_name }} node-role.kubernetes.io/control-plane=:NoSchedule-
- name: Add work label
command: |
- /usr/local/bin/kubectl label --overwrite node {{ inventory_name }} node-role.kubernetes.io/worker=
+ /usr/local/bin/kubectl label --overwrite node {{ .inventory_name }} node-role.kubernetes.io/worker=
diff --git a/builtin/roles/install/kubernetes/tasks/main.yaml b/builtin/roles/install/kubernetes/tasks/main.yaml
index a7003df25..7c1c348ad 100644
--- a/builtin/roles/install/kubernetes/tasks/main.yaml
+++ b/builtin/roles/install/kubernetes/tasks/main.yaml
@@ -1,31 +1,33 @@
---
- name: Check kubernetes if installed
ignore_errors: true
- command: kubectl get node --field-selector metadata.name={{ inventory_name }}
+ command: kubectl get node --field-selector metadata.name={{ .inventory_name }}
register: kube_node_info_important
-- include_tasks: install_kube_binaries.yaml
+- include_tasks: install_binaries.yaml
- include_tasks: deploy_kube_vip.yaml
when:
- - kubernetes.kube_vip.enabled
- - inventory_name in groups['kube_control_plane']
+ - .kubernetes.kube_vip.enabled
+ - .groups.kube_control_plane | default list | has .inventory_name
- name: Select init kubernetes node
run_once: true
set_fact:
- init_kubernetes_node: "{{ groups['kube_control_plane']|first }}"
+ init_kubernetes_node: |
+ {{ .groups.kube_control_plane | default list | first }}
- name: Init kubernetes
- when: inventory_name == init_kubernetes_node
+ when: eq .inventory_name .init_kubernetes_node
block:
- include_tasks: init_kubernetes.yaml
- when: kube_node_info_important.stderr != ""
+ when: .kube_node_info_important.stderr | ne ""
- include_tasks: deploy_cluster_dns.yaml
- name: Fetch kubeconfig to local
fetch:
src: /etc/kubernetes/admin.conf
- dest: "{{ work_dir }}/kubekey/kubeconfig"
+ dest: |
+ {{ .work_dir }}/kubekey/kubeconfig
- name: Generate certificate key by kubeadm
command: |
/usr/local/bin/kubeadm init phase upload-certs --upload-certs --config /etc/kubernetes/kubeadm-config.yaml 2>&1 \
@@ -33,7 +35,8 @@
register: kubeadm_cert_result
- name: Set_Fact certificate key to all hosts
set_fact:
- kubeadm_cert: "{{ kubeadm_cert_result.stdout }}"
+ kubeadm_cert: |
+ {{ .kubeadm_cert_result.stdout }}
- name: Generate kubeadm token
block:
- name: Generate token by kubeadm
@@ -41,24 +44,26 @@
register: kubeadm_token_result
- name: Set_Fact token to all hosts
set_fact:
- kubeadm_token: "{{ kubeadm_token_result.stdout }}"
+ kubeadm_token: |
+ {{ .kubeadm_token_result.stdout }}
- name: Set_Fact init endpoint
set_fact:
- init_kubernetes_endpoint: "{{ inventory_name }}"
+ init_kubernetes_endpoint: |
+ {{ .inventory_name }}
- include_tasks: join_kubernetes.yaml
when:
- - kube_node_info_important.stderr != ""
- - inventory_name != init_kubernetes_node
+ - .kube_node_info_important.stderr | ne ""
+ - ne .inventory_name .init_kubernetes_node
- include_tasks: deploy_haproxy.yaml
when:
- - kubernetes.haproxy.enabled
- - inventory_name in groups['kube_worker']
+ - .kubernetes.haproxy.enabled
+ - .groups.kube_worker | default list | has .inventory_name
- name: Add custom label to cluster
command: |
- {% for k,v in kubernetes.custom_label %}
- /usr/local/bin/kubectl label --overwrite node {{ inventory_name }} {{ k }}={{ v }}
- {% endfor %}
- when: kubernetes.custom_label | length > 0
+ {{- range $k, $v := .kubernetes.custom_label -}}
+ /usr/local/bin/kubectl label --overwrite node {{ $.inventory_name }} {{ $k }}={{ $v }}
+ {{- end -}}
+ when: .kubernetes.custom_label | len | lt 0
diff --git a/builtin/roles/install/kubernetes/templates/dns/coredns.deployment b/builtin/roles/install/kubernetes/templates/dns/coredns.deployment
index 193e48375..990e41b91 100644
--- a/builtin/roles/install/kubernetes/templates/dns/coredns.deployment
+++ b/builtin/roles/install/kubernetes/templates/dns/coredns.deployment
@@ -47,7 +47,7 @@ metadata:
prometheus.io/scrape: "true"
createdby: 'kubekey'
spec:
- clusterIP: {{ kubernetes.networking.dns_service_ip }}
+ clusterIP: {{ .kubernetes.networking.dns_service_ip }}
selector:
k8s-app: kube-dns
ports:
@@ -119,7 +119,7 @@ spec:
- ""
containers:
- name: coredns
- image: "{{ kubernetes.networking.dns_image }}"
+ image: "{{ .kubernetes.networking.dns_image }}"
imagePullPolicy: IfNotPresent
resources:
# TODO: Set memory limits when we've profiled the container for large
@@ -185,58 +185,78 @@ metadata:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
-{% for ez in kubernetes.coredns.zone_configs %}
- {{ ez.zones|join:" " }} {
- cache {{ ez.cache }}
-{% for c in ez.additional_configs %}
- {{ c }}
-{% endfor %}
+ {{- range .kubernetes.coredns.zone_configs }}
+ {{ .zones | join " " }} {
+ cache {{ .cache }}
+ {{- range .additional_configs }}
+ {{ . }}
+ {{- end }}
-{% for r in ez.rewrite %}
- rewrite {{ r.rule }} {
- {{ r.field }} {{ r.type }} {{ r.value }}
- {{ r.options }}
+ {{- range .rewrite }}
+ rewrite {{ .rule }} {
+ {{ .field }} {{ .type }} {{ .value }}
+ {{ .options }}
}
-{% endfor %}
+ {{- end }}
health {
lameduck 5s
}
-{% if (ez.kubernetes.zones|defined) %}
- kubernetes {{ ez.kubernetes.zones|join:" " }} in-addr.arpa ip6.arpa {
+ {{- if .kubernetes.zones | len | lt 0 }}
+ kubernetes {{ .kubernetes.zones | join " " }} in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
-{% endif %}
+ {{- end }}
-{% for f in ez.forward %}
- forward {{ f.from }} {{ f.to|join:" " }} {
-{% if (f.except|length > 0) %} except {{ f.except|join:" " }}{% endif %}
-{% if (f.force_tcp) %} force_tcp{% endif %}
-{% if (f.prefer_udp) %} prefer_udp{% endif %}
-{% if (f.max_fails|defined) %} max_fails {{ f.max_fails }}{% endif %}
-{% if (f.expire|defined) %} expire {{ f.expire }}{% endif %}
-{% if (f.tls|defined) %} tls {{ f.tls.cert_file }} {{ f.tls.key_file }} {{ f.tls.ca_file }}{% endif %}
-{% if (f.tls_servername|defined) %} tls_servername {{ f.tls_servername }}{% endif %}
-{% if (f.policy|defined) %} policy {{ f.policy }}{% endif %}
-{% if (f.health_check|defined) %} health_check {{ f.health_check }}{% endif %}
-{% if (f.max_concurrent|defined) %} max_concurrent {{ f.max_concurrent }}{% endif %}
+ {{- range .forward }}
+ forward {{ .from }} {{ .to | join " " }} {
+ {{- if .except | len | lt 0 }}
+ except {{ .except | join " " }}
+ {{- end }}
+ {{- if .force_tcp }}
+ force_tcp
+ {{- end }}
+ {{- if .prefer_udp }}
+ prefer_udp
+ {{- end }}
+ {{- if .max_fails }}
+ max_fails {{ .max_fails }}
+ {{- end }}
+ {{- if .expire }}
+ expire {{ .expire }}
+ {{- end }}
+ {{- if .tls }}
+ tls {{ .tls.cert_file }} {{ .tls.key_file }} {{ .tls.ca_file }}
+ {{- end }}
+ {{- if .tls_servername }}
+ tls_servername {{ .tls_servername }}
+ {{- end }}
+ {{- if .policy }}
+ policy {{ .policy }}
+ {{- end }}
+ {{- if .health_check }}
+ health_check {{ .health_check }}
+ {{- end }}
+ {{- if .max_concurrent }}
+ max_concurrent {{ .max_concurrent }}
+ {{- end }}
}
-{% endfor %}
+ {{- end }}
-{% if (kubernetes.coredns.dns_etc_hosts|length > 0) %}
+ {{- if $.kubernetes.coredns.dns_etc_hosts | len | lt 0) }}
hosts /etc/coredns/hosts {
fallthrough
}
-{% endif %}
+ {{- end }}
}
-{% endfor %}
+ {{- end }}
-{% if (kubernetes.coredns.dns_etc_hosts|length > 0) %}
+{{- if .kubernetes.coredns.dns_etc_hosts | len | lt 0) }}
hosts: |
-{% for h in kubernetes.coredns.dns_etc_hosts %}
- {{ h }}
-{% endfor %}
-{% endif %}
+ {{- range .kubernetes.coredns.dns_etc_hosts }}
+ {{ $. }}
+ {{- end }}
+{{- end }}
diff --git a/builtin/roles/install/kubernetes/templates/dns/nodelocaldns.daemonset b/builtin/roles/install/kubernetes/templates/dns/nodelocaldns.daemonset
index 6fa29496e..59fc01ca6 100644
--- a/builtin/roles/install/kubernetes/templates/dns/nodelocaldns.daemonset
+++ b/builtin/roles/install/kubernetes/templates/dns/nodelocaldns.daemonset
@@ -43,7 +43,7 @@ spec:
operator: "Exists"
containers:
- name: node-cache
- image: {{ kubernetes.networking.dns_cache_image }}
+ image: {{ .kubernetes.networking.dns_cache_image }}
resources:
limits:
memory: 200Mi
@@ -112,118 +112,118 @@ metadata:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
-{% for ez in kubernetes.coredns.external_zones %}
-{{ ez.zones|join:" " }}{
- log
- errors
- loadbalance
- cache {{ ez.cache }}
- reload
- loop
- bind 169.254.25.10
- prometheus :9253
+ {{- range .kubernetes.coredns.external_zones }}
+ {{ .zones | join " " }}{
+ log
+ errors
+ loadbalance
+ cache {{ index $ez "cache" }}
+ reload
+ loop
+ bind 169.254.25.10
+ prometheus :9253
-{% for r in ez.rewrite %}
- rewrite {{ r.rule }} {
- {{ r.field }} {{ r.type }} {{ r.value }}
- {{ r.options }}
- }
-{% endfor %}
+ {{- range .rewrite }}
+ rewrite {{ .rule }} {
+ {{ .field }} {{ .type }} {{ .value }}
+ {{ .options }}
+ }
+ {{- end }}
-{% for f in ez.forward %}
- forward {{ f.from }} {{ f.to|join:" " }} {
-{% if (f.except|length > 0) %}
- except {{ f.except|join:" " }}
-{% endif %}
-{% if (f.force_tcp) %}
- force_tcp
-{% endif %}
-{% if (f.prefer_udp) %}
- prefer_udp
-{% endif %}
- max_fails {{ f.max_fails|default_if_none:2 }}
- expire {{ f.expire|default_if_none:"10s" }}
-{% if (f.tls|defined) %}
- tls {{ f.tls.cert_file|default_if_none:'""' }} {{ f.tls.key_file|default_if_none:'""' }} {{ f.tls.ca_file|default_if_none:'""' }}
-{% endif %}
-{% if (f.tls_servername|defined) %}
- tls_servername {{ f.tls_servername }}
-{% endif %}
-{% if (f.policy|defined) %}
- policy {{ f.policy }}
-{% endif %}
-{% if (f.health_check|defined) %}
- health_check {{ f.health_check }}
-{% endif %}
-{% if (f.max_concurrent|defined) %}
- max_concurrent {{ f.max_concurrent }}
-{% endif %}
- }
-{% endfor %}
+ {{- range .forward }}
+ forward {{ .from }} {{ .to | join " " }} {
+ {{- if .except | len | lt 0 }}
+ except {{ .except | join " " }}
+ {{- end }}
+ {{- if .force_tcp }}
+ force_tcp
+ {{- end }}
+ {{ if .prefer_udp }}
+ prefer_udp
+ {{- end }}
+ max_fails {{ .max_fails | default 2 }}
+ expire {{ .expire | default "10s" }}
+ {{- if .tls }}
+ tls {{ .tls.cert_file }} {{ i.tls.key_file }} {{ .tls.ca_file }}
+ {{- end }}
+ {{- if .tls_servername }}
+ tls_servername {{ .tls_servername }}
+ {{- end }}
+ {{- if .policy }}
+ policy {{ .policy }}
+ {{- end }}
+ {{- if .health_check }}
+ health_check {{ .health_check }}
+ {{- end }}
+ {{- if .max_concurrent }}
+ max_concurrent {{ .max_concurrent }}
+ {{- end }}
+ }
+ {{- end }}
-{% if (kubernetes.coredns.dns_etc_hosts|length > 0) %}
- hosts /etc/coredns/hosts {
- fallthrough
- }
-{% endif %}
+ {{- if $.kubernetes.coredns.dns_etc_hosts | len | lt 0 }}
+ hosts /etc/coredns/hosts {
+ fallthrough
+ }
+ {{- end }}
}
-{% endfor %}
+ {{- end }}
-
- {{ kubernetes.networking.dns_domain }}:53 {
- errors
- cache {
- success 9984 30
- denial 9984 5
- }
- reload
- loop
- bind 169.254.25.10
- forward . {{ kubernetes.networking.dns_service_ip }} {
- force_tcp
- }
- prometheus :9253
- health 169.254.25.10:9254
+ {{ .kubernetes.networking.dns_domain }}:53 {
+ errors
+ cache {
+ success 9984 30
+ denial 9984 5
+ }
+ reload
+ loop
+ bind 169.254.25.10
+ forward . {{ .kubernetes.networking.dns_service_ip }} {
+ force_tcp
+ }
+ prometheus :9253
+ health 169.254.25.10:9254
}
in-addr.arpa:53 {
- errors
- cache 30
- reload
- loop
- bind 169.254.25.10
- forward . {{ kubernetes.networking.dns_service_ip }} {
- force_tcp
- }
- prometheus :9253
+ errors
+ cache 30
+ reload
+ loop
+ bind 169.254.25.10
+ forward . {{ .kubernetes.networking.dns_service_ip }} {
+ force_tcp
+ }
+ prometheus :9253
}
ip6.arpa:53 {
- errors
- cache 30
- reload
- loop
- bind 169.254.25.10
- forward . {{ kubernetes.networking.dns_service_ip }} {
- force_tcp
- }
- prometheus :9253
+ errors
+ cache 30
+ reload
+ loop
+ bind 169.254.25.10
+ forward . {{ .kubernetes.networking.dns_service_ip }} {
+ force_tcp
+ }
+ prometheus :9253
}
.:53 {
- errors
- cache 30
- reload
- loop
- bind 169.254.25.10
- forward . /etc/resolv.conf
- prometheus :9253
-{% if (kubernetes.coredns.dns_etc_hosts|length > 0) %}
- hosts /etc/coredns/hosts {
- fallthrough
- }
-{% endif %}
+ errors
+ cache 30
+ reload
+ loop
+ bind 169.254.25.10
+ forward . /etc/resolv.conf
+ prometheus :9253
+ {{- if .kubernetes.coredns.dns_etc_hosts | len | lt 0 }}
+ hosts /etc/coredns/hosts {
+ fallthrough
+ }
+ {{- end }}
}
-{% if (kubernetes.coredns.dns_etc_hosts|length > 0) %}
+
+{{- if .kubernetes.coredns.dns_etc_hosts | len | lt 0) }}
hosts: |
-{% for h in kubernetes.coredns.dns_etc_hosts %}
- {{ h }}
-{% endfor %}
-{% endif %}
+ {{- range .kubernetes.coredns.dns_etc_hosts }}
+ {{ . }}
+ {{- end }}
+{{- end }}
diff --git a/builtin/roles/install/kubernetes/templates/haproxy/haproxy.cfg b/builtin/roles/install/kubernetes/templates/haproxy/haproxy.cfg
index b2a001550..13a982d99 100644
--- a/builtin/roles/install/kubernetes/templates/haproxy/haproxy.cfg
+++ b/builtin/roles/install/kubernetes/templates/haproxy/haproxy.cfg
@@ -12,7 +12,7 @@ defaults
retries 5
timeout http-request 5m
timeout queue 5m
- timeout connect 30s
+ timeout connectorVars 30s
timeout client 30s
timeout server 15m
timeout http-keep-alive 30s
@@ -20,12 +20,12 @@ defaults
maxconn 4000
frontend healthz
- bind *:{{ kubernetes.haproxy.health_port }}
+ bind *:{{ .kubernetes.haproxy.health_port }}
mode http
monitor-uri /healthz
frontend kube_api_frontend
- bind 127.0.0.1:{{ kubernetes.apiserver.port }}
+ bind 127.0.0.1:{{ .kubernetes.apiserver.port }}
mode tcp
option tcplog
default_backend kube_api_backend
@@ -36,6 +36,6 @@ backend kube_api_backend
default-server inter 15s downinter 15s rise 2 fall 2 slowstart 60s maxconn 1000 maxqueue 256 weight 100
option httpchk GET /healthz
http-check expect status 200
-{%for h in groups['kube_control_plane'] %}
- server {{ h.inventory_name }} {{ h.internal_ipv4 }}:{{ kubernetes.apiserver.port }} check check-ssl verify none
-{% endfor %}
+{{- range .groups.kube_control_plane | default list }}
+ server {{ index $.inventory_hosts . "inventory_name" }} {{ index $.inventory_hosts . "internal_ipv4" }}:{{ $.kubernetes.apiserver.port }} check check-ssl verify none
+{{- end }}
diff --git a/builtin/roles/install/kubernetes/templates/haproxy/haproxy.yaml b/builtin/roles/install/kubernetes/templates/haproxy/haproxy.yaml
index b50e3eedf..d7b173fa9 100644
--- a/builtin/roles/install/kubernetes/templates/haproxy/haproxy.yaml
+++ b/builtin/roles/install/kubernetes/templates/haproxy/haproxy.yaml
@@ -8,7 +8,7 @@ metadata:
addonmanager.kubernetes.io/mode: Reconcile
k8s-app: kube-haproxy
annotations:
- cfg-checksum: "{{ cfg_md5.stdout }}"
+ cfg-checksum: "{{ .cfg_md5.stdout }}"
spec:
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
@@ -17,7 +17,7 @@ spec:
priorityClassName: system-node-critical
containers:
- name: haproxy
- image: {{ kubernetes.haproxy.image }}
+ image: {{ .kubernetes.haproxy.image }}
imagePullPolicy: IfNotPresent
resources:
requests:
@@ -26,11 +26,11 @@ spec:
livenessProbe:
httpGet:
path: /healthz
- port: {{ kubernetes.haproxy.health_port }}
+ port: {{ .kubernetes.haproxy.health_port }}
readinessProbe:
httpGet:
path: /healthz
- port: {{ kubernetes.haproxy.health_port }}
+ port: {{ .kubernetes.haproxy.health_port }}
volumeMounts:
- mountPath: /usr/local/etc/haproxy/
name: etc-haproxy
diff --git a/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta2 b/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta2
index f4f8ce5ba..a1eefcc8a 100644
--- a/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta2
+++ b/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta2
@@ -2,42 +2,40 @@
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
etcd:
-{% if (kubernetes.etcd.deployment_type=='internal') %}
+{{- if .kubernetes.etcd.deployment_type | eq "internal" }}
local:
- {% set etcd_image_info=kubernetes.etcd.image|split:":" %}
- imageRepository: {{ etcd_image_info[0]|split:"/"|slice:":-1"|join:"/"|safe }}
- imageTag: {{ etcd_image_info[1] }}
+ imageRepository: {{ slice (.kubernetes.etcd.image | splitList ":" | first | splitList "/") 1 (.kubernetes.etcd.image | splitList ":" | first | splitList "/" | len) | join "/" }}
+ imageTag: {{ .kubernetes.etcd.image | splitList ":" | last }}
serverCertSANs:
- {% for h in groups['etcd'] %}{% set hv=inventory_hosts[h] %}
- - {{ hv.internal_ipv4|stringformat:"https://%s:2379" }}
- {% endfor %}
-{% else %}
+ {{- range .groups.etcd | default list }}
+ - https://{{ index $.inventory_hosts . "internal_ipv4" }}:2379
+ {{- end }}
+{{- else }}
external:
endpoints:
- {% for h in groups['etcd'] %}{% set hv=inventory_hosts[h] %}
- - {{ hv.internal_ipv4|stringformat:"https://%s:2379" }}
- {% endfor %}
+ {{- range .groups.etcd | default list }}
+ - https://{{ index $.inventory_hosts . "internal_ipv4" }}:2379
+ {{- end }}
caFile: /etc/kubernetes/pki/etcd/ca.crt
certFile: /etc/kubernetes/pki/etcd/client.crt
keyFile: /etc/kubernetes/pki/etcd/client.key
-{% endif %}
+{{- end }}
dns:
type: CoreDNS
- {% set core_image_info=kubernetes.networking.dns_image|split:":" %}
- imageRepository: {{ core_image_info[0]|split:"/"|slice:":-1"|join:"/"|safe }}
- imageTag: {{ core_image_info[1] }}
-imageRepository: {{ kubernetes.image_repository }}
-kubernetesVersion: {{ kube_version }}
+ imageRepository: {{ slice (.kubernetes.networking.dns_image | splitList ":" | first | splitList "/") 1 (.kubernetes.networking.dns_image | splitList ":" | first | splitList "/" | len) | join "/" }}
+ imageTag: {{ .kubernetes.networking.dns_image | splitList ":" | last }}
+imageRepository: {{ .kubernetes.image_repository }}
+kubernetesVersion: {{ .kube_version }}
certificatesDir: /etc/kubernetes/pki
-clusterName: {{ kubernetes.cluster_name }}
-controlPlaneEndpoint: {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint }}{% else %}{{ init_kubernetes_node }}{% endif %}
+clusterName: {{ .kubernetes.cluster_name }}
+controlPlaneEndpoint: {{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }}
networking:
- dnsDomain: {{ kubernetes.networking.dns_domain }}
- podSubnet: {{ kubernetes.networking.pod_cidr }}
- serviceSubnet: {{ kubernetes.networking.service_cidr }}
+ dnsDomain: {{ .kubernetes.networking.dns_domain }}
+ podSubnet: {{ .kubernetes.networking.pod_cidr }}
+ serviceSubnet: {{ .kubernetes.networking.service_cidr }}
apiServer:
extraArgs:
-{% if (security_enhancement) %}
+{{- if .security_enhancement }}
authorization-mode: Node,RBAC
enable-admission-plugins: AlwaysPullImages,ServiceAccount,NamespaceLifecycle,NodeRestriction,LimitRanger,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,PodNodeSelector,PodSecurity
profiling: false
@@ -45,62 +43,64 @@ apiServer:
service-account-lookup: true
tls-min-version: VersionTLS12
tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
-{% endif %}
-{% if (kubernetes.audit) %}
+{{- end }}
+{{- if .kubernetes.audit }}
audit-log-format: json
audit-log-maxbackup: 2
audit-log-maxsize: 200
audit-policy-file: /etc/kubernetes/audit/policy.yaml
audit-webhook-config-file: /etc/kubernetes/audit/webhook.yaml
-{% endif %}
-{{ kubernetes.apiserver.extra_args|to_yaml:4|safe }}
+{{- end }}
+{{ .kubernetes.apiserver.extra_args | toYaml | indent 4 }}
certSANs:
- kubernetes
- kubernetes.default
- kubernetes.default.svc
- localhost
- 127.0.0.1
- - {{ kubernetes.networking.service_cidr|ip_range:0 }}
- - {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint }}{% else %}{{ init_kubernetes_node }}{% endif %}
- - {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint|stringformat:"kubernetes.default.svc.%s" }}{% else %}{{ init_kubernetes_node|stringformat:"kubernetes.default.svc.%s" }}{% endif %}
- - {{ kubernetes.networking.dns_domain|stringformat:"kubernetes.default.svc.%s" }}
- {% for h in groups['k8s_cluster'] %}{% set hv=inventory_hosts[h] %}
- - {{ h }}.{{ kubernetes.networking.dns_domain }}
- - {{ hv.internal_ipv4 }}
- {% if (hv.internal_ipv6|defined) %}- {{ hv.internal_ipv6 }}{% endif %}
- {% endfor %}
- {% for h in kubernetes.apiserver.certSANs %}
- - {{ h }}
- {% endfor %}
-{% if (kubernetes.audit) %}
+ - {{ .kubernetes.networking.service_cidr | ipInCIDR 0 }}
+ - {{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }}
+ - kubernetes.default.svc.{{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }}
+ - kubernetes.default.svc.{{ .kubernetes.networking.dns_domain }}
+ {{- range .groups.k8s_cluster | default list }}
+ - {{ . }}.{{ $.kubernetes.networking.dns_domain }}
+ - {{ index $.inventory_hosts . "internal_ipv4" }}
+ {{- if index $.inventory_hosts . "internal_ipv6" }}
+ - {{ index $.inventory_hosts . "internal_ipv6" }}
+ {{- end }}
+ {{- end }}
+ {{- range .kubernetes.apiserver.certSANs }}
+ - {{ . }}
+ {{- end }}
+{{- if .kubernetes.audit }}
extraVolumes:
- name: k8s-audit
hostPath: /etc/kubernetes/audit
mountPath: /etc/kubernetes/audit
pathType: DirectoryOrCreate
-{% endif %}
+{{- end }}
controllerManager:
extraArgs:
-{% if (internal_ipv6|defined) %}
- node-cidr-mask-size-ipv4: "{{ kubernetes.controller_manager.kube_network_node_prefix }}"
+{{- if and .internal_ipv6 (ne .internal_ipv6 "") }}
+ node-cidr-mask-size-ipv4: "{{ .kubernetes.controller_manager.kube_network_node_prefix }}"
node-cidr-mask-size-ipv6: "64"
-{% else %}
- node-cidr-mask-size: "{{ kubernetes.controller_manager.kube_network_node_prefix }}"
-{% endif %}
-{% if (kube_version|version:'>=v1.9.0') %}
+{{- else }}
+ node-cidr-mask-size: "{{ .kubernetes.controller_manager.kube_network_node_prefix }}"
+{{- end }}
+{{- if .kube_version | semverCompare ">=v1.9.0" }}
cluster-signing-duration: 87600h
-{% else %}
+{{- else }}
experimental-cluster-signing-duration: 87600h
-{% endif %}
-{% if (security_enhancement) %}
+{{- end }}
+{{- if .security_enhancement }}
bind-address: 127.0.0.1
profiling: false
terminated-pod-gc-threshold: 50
use-service-account-credentials: true
-{% else %}
+{{- else }}
bind-address: 0.0.0.0
-{% endif %}
-{{ kubernetes.controller_manager.extra_args|to_yaml:4|safe }}
+{{- end }}
+{{ .kubernetes.controller_manager.extra_args | toYaml | indent 4 }}
extraVolumes:
- name: host-time
hostPath: /etc/localtime
@@ -108,43 +108,40 @@ controllerManager:
readOnly: true
scheduler:
extraArgs:
-{% if (security_enhancement) %}
+{{ if .security_enhancement }}
bind-address: 127.0.0.1
profiling: false
-{% else %}
+{{- else }}
bind-address: 0.0.0.0
-{% endif %}
-{{ kubernetes.scheduler.extra_args|to_yaml:4|safe }}
+{{- end }}
+{{ .kubernetes.scheduler.extra_args | toYaml | indent 4 }}
---
-
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
- advertiseAddress: {{ internal_ipv4 }}
- bindPort: {{ kubernetes.apiserver.port }}
+ advertiseAddress: {{ .internal_ipv4 }}
+ bindPort: {{ .kubernetes.apiserver.port }}
nodeRegistration:
- criSocket: {{ cri.cri_socket }}
+ criSocket: {{ .cri.cri_socket }}
kubeletExtraArgs:
- cgroup-driver: {{ cri.cgroup_driver }}
+ cgroup-driver: {{ .cri.cgroup_driver }}
---
-
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
-clusterCIDR: {{ kubernetes.networking.pod_cidr }}
-mode: {{ kubernetes.kube_proxy.mode }}
-{{ kubernetes.kube_proxy.config|to_yaml|safe }}
+clusterCIDR: {{ .kubernetes.networking.pod_cidr }}
+mode: {{ .kubernetes.kube_proxy.mode }}
+{{ .kubernetes.kube_proxy.config | toYaml }}
---
-
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
-clusterDomain: {{ kubernetes.networking.dns_domain }}
+clusterDomain: {{ .kubernetes.networking.dns_domain }}
clusterDNS:
- - {{ kubernetes.networking.dns_service_ip }}
-maxPods: {{ kubernetes.max_pods }}
-podPidsLimit: {{ kubernetes.kubelet.pod_pids_limit }}
+ - {{ .kubernetes.networking.dns_service_ip }}
+maxPods: {{ .kubernetes.max_pods }}
+podPidsLimit: {{ .kubernetes.kubelet.pod_pids_limit }}
rotateCertificates: true
kubeReserved:
cpu: 200m
@@ -161,8 +158,7 @@ evictionSoftGracePeriod:
memory.available: 2m
evictionMaxPodGracePeriod: 120
evictionPressureTransitionPeriod: 30s
-
-{% if (security_enhancement) %}
+{{- if .security_enhancement }}
readOnlyPort: 0
protectKernelDefaults: true
eventRecordQPS: 1
@@ -175,25 +171,30 @@ tlsCipherSuites:
featureGates:
RotateKubeletServerCertificate: true
SeccompDefault: true
-{% if (kube_version|version:">=v1.24.0") %}
+ {{- if .kube_version | semverCompare ">=v1.24.0" }}
TTLAfterFinished: true
-{% endif %}
-{% if (kube_version|version:">=v1.21.0") %}
+ {{- end }}
+ {{ if .kube_version | semverCompare ">=v1.21.0" }}
CSIStorageCapacity: true
-{% endif %}
-{{ features|to_yaml:2|safe }}
-{% else %}
+ {{- end }}
+{{ .kubernetes.kubelet.feature_gates | toYaml | indent 2 }}
+{{- else }}
featureGates:
RotateKubeletServerCertificate: true
-{% if (kube_version|version:">=v1.24.0") %}
+ {{- if .kube_version | semverCompare ">=v1.24.0" }}
TTLAfterFinished: true
-{% endif %}
-{% if (kube_version|version:">=v1.21.0") %}
+ {{- end }}
+ {{- if .kube_version | semverCompare ">=v1.21.0" }}
CSIStorageCapacity: true
ExpandCSIVolumes: true
-{% endif %}
-{{ features|to_yaml:2|safe }}
-{% endif %}
-cgroupDriver: {{ cri.cgroup_driver }}
-containerLogMaxSize: {{ kubernetes.kubelet.container_log_max_size }}
-containerLogMaxFiles: {{ kubernetes.kubelet.container_log_max_files }}
+ {{- end }}
+ {{- if .kubernetes.kubelet.feature_gates }}
+{{ .kubernetes.kubelet.feature_gates | toYaml | indent 2 }}
+ {{- end }}
+{{- end }}
+cgroupDriver: {{ .cri.cgroup_driver }}
+containerLogMaxSize: {{ .kubernetes.kubelet.container_log_max_size }}
+containerLogMaxFiles: {{ .kubernetes.kubelet.container_log_max_files }}
+{{- if .kubernetes.kubelet.extra_args }}
+{{ .kubernetes.kubelet.extra_args | toYaml }}
+{{- end }}
diff --git a/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta3 b/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta3
index 38b82b15a..d9aa64397 100644
--- a/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta3
+++ b/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-init.v1beta3
@@ -2,41 +2,39 @@
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
etcd:
-{% if (kubernetes.etcd.deployment_type=='internal') %}
+{{- if .kubernetes.etcd.deployment_type | eq "internal" }}
local:
- {% set etcd_image_info=kubernetes.etcd.image|split:":" %}
- imageRepository: {{ etcd_image_info[0]|split:"/"|slice:":-1"|join:"/"|safe }}
- imageTag: {{ etcd_image_info[1] }}
+ imageRepository: {{ slice (.kubernetes.etcd.image | splitList ":" | first | splitList "/") 1 (.kubernetes.etcd.image | splitList ":" | first | splitList "/" | len) | join "/" }}
+ imageTag: {{ .kubernetes.etcd.image | splitList ":" | last }}
serverCertSANs:
- {% for h in groups['etcd'] %}{% set hv=inventory_hosts[h] %}
- - {{ hv.internal_ipv4|stringformat:"https://%s:2379" }}
- {% endfor %}
-{% else %}
+ {{- range .groups.etcd | default list }}
+ - https://{{ index $.inventory_hosts . "internal_ipv4" }}:2379
+ {{- end }}
+{{- else }}
external:
endpoints:
- {% for h in groups['etcd'] %}{% set hv=inventory_hosts[h] %}
- - {{ hv.internal_ipv4|stringformat:"https://%s:2379" }}
- {% endfor %}
+ {{- range .groups.etcd | default list }}
+ - https://{{ index $.inventory_hosts . "internal_ipv4" }}:2379
+ {{- end }}
caFile: /etc/kubernetes/pki/etcd/ca.crt
certFile: /etc/kubernetes/pki/etcd/client.crt
keyFile: /etc/kubernetes/pki/etcd/client.key
-{% endif %}
+{{- end }}
dns:
- {% set core_image_info=kubernetes.networking.dns_image|split:":" %}
- imageRepository: {{ core_image_info[0]|split:"/"|slice:":-1"|join:"/"|safe }}
- imageTag: {{ core_image_info[1] }}
-imageRepository: {{ kubernetes.image_repository }}
-kubernetesVersion: {{ kube_version }}
+ imageRepository: {{ slice (.kubernetes.networking.dns_image | splitList ":" | first | splitList "/") 1 (.kubernetes.networking.dns_image | splitList ":" | first | splitList "/" | len) | join "/" }}
+ imageTag: {{ .kubernetes.networking.dns_image | splitList ":" | last }}
+imageRepository: {{ .kubernetes.image_repository }}
+kubernetesVersion: {{ .kube_version }}
certificatesDir: /etc/kubernetes/pki
-clusterName: {{ kubernetes.cluster_name }}
-controlPlaneEndpoint: {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint }}{% else %}{{ init_kubernetes_node }}{% endif %}
+clusterName: {{ .kubernetes.cluster_name }}
+controlPlaneEndpoint: {{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }}
networking:
- dnsDomain: {{ kubernetes.networking.dns_domain }}
- podSubnet: {{ kubernetes.networking.pod_cidr }}
- serviceSubnet: {{ kubernetes.networking.service_cidr }}
+ dnsDomain: {{ .kubernetes.networking.dns_domain }}
+ podSubnet: {{ .kubernetes.networking.pod_cidr }}
+ serviceSubnet: {{ .kubernetes.networking.service_cidr }}
apiServer:
extraArgs:
-{% if (security_enhancement) %}
+{{- if .security_enhancement }}
authorization-mode: Node,RBAC
enable-admission-plugins: AlwaysPullImages,ServiceAccount,NamespaceLifecycle,NodeRestriction,LimitRanger,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,PodNodeSelector,PodSecurity
profiling: false
@@ -44,62 +42,64 @@ apiServer:
service-account-lookup: true
tls-min-version: VersionTLS12
tls-cipher-suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
-{% endif %}
-{% if (kubernetes.audit) %}
+{{- end }}
+{{- if .kubernetes.audit }}
audit-log-format: json
audit-log-maxbackup: 2
audit-log-maxsize: 200
audit-policy-file: /etc/kubernetes/audit/policy.yaml
audit-webhook-config-file: /etc/kubernetes/audit/webhook.yaml
-{% endif %}
-{{ kubernetes.apiserver.extra_args|to_yaml:4|safe }}
+{{- end }}
+{{ .kubernetes.apiserver.extra_args | toYaml | indent 4 }}
certSANs:
- kubernetes
- kubernetes.default
- kubernetes.default.svc
- localhost
- 127.0.0.1
- - {{ kubernetes.networking.service_cidr|ip_range:0 }}
- - {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint }}{% else %}{{ init_kubernetes_node }}{% endif %}
- - {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint|stringformat:"kubernetes.default.svc.%s" }}{% else %}{{ init_kubernetes_node|stringformat:"kubernetes.default.svc.%s" }}{% endif %}
- - {{ kubernetes.networking.dns_domain|stringformat:"kubernetes.default.svc.%s" }}
- {% for h in groups['k8s_cluster'] %}{% set hv=inventory_hosts[h] %}
- - {{ h }}.{{ kubernetes.networking.dns_domain }}
- - {{ hv.internal_ipv4 }}
- {% if (hv.internal_ipv6|defined) %}- {{ hv.internal_ipv6 }}{% endif %}
- {% endfor %}
- {% for h in kubernetes.apiserver.certSANs %}
- - {{ h }}
- {% endfor %}
-{% if (kubernetes.audit) %}
+ - {{ .kubernetes.networking.service_cidr | ipInCIDR 0 }}
+ - {{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }}
+ - kubernetes.default.svc.{{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }}
+ - kubernetes.default.svc.{{ .kubernetes.networking.dns_domain }}
+ {{- range .groups.k8s_cluster | default list }}
+ - {{ . }}.{{ .kubernetes.networking.dns_domain }}
+ - {{ index $.inventory_hosts . "internal_ipv4" }}
+ {{- if index $.inventory_hosts . "internal_ipv6" }}
+ - {{ index $.inventory_hosts . "internal_ipv6" }}
+ {{- end }}
+ {{- end }}
+ {{- range .kubernetes.apiserver.certSANs }}
+ - {{ . }}
+ {{- end }}
+{{- if .kubernetes.audit }}
extraVolumes:
- name: k8s-audit
hostPath: /etc/kubernetes/audit
mountPath: /etc/kubernetes/audit
pathType: DirectoryOrCreate
-{% endif %}
+{{- end }}
controllerManager:
extraArgs:
-{% if (internal_ipv6|defined) %}
- node-cidr-mask-size-ipv4: "{{ kubernetes.controller_manager.kube_network_node_prefix }}"
+{{- if and .internal_ipv6 (ne .internal_ipv6 "") }}
+ node-cidr-mask-size-ipv4: "{{ .kubernetes.controller_manager.kube_network_node_prefix }}"
node-cidr-mask-size-ipv6: "64"
-{% else %}
- node-cidr-mask-size: "{{ kubernetes.controller_manager.kube_network_node_prefix }}"
-{% endif %}
-{% if (kube_version|version:'>=v1.9.0') %}
+{{- else }}
+ node-cidr-mask-size: "{{ .kubernetes.controller_manager.kube_network_node_prefix }}"
+{{- end }}
+{{- if .kube_version | semverCompare ">=v1.9.0" }}
cluster-signing-duration: 87600h
-{% else %}
+{{- else }}
experimental-cluster-signing-duration: 87600h
-{% endif %}
-{% if (security_enhancement) %}
+{{- end }}
+{{- if .security_enhancement }}
bind-address: 127.0.0.1
profiling: false
terminated-pod-gc-threshold: 50
use-service-account-credentials: true
-{% else %}
+{{- else }}
bind-address: 0.0.0.0
-{% endif %}
-{{ kubernetes.controller_manager.extra_args|to_yaml:4|safe }}
+{{- end }}
+{{ .kubernetes.controller_manager.extra_args | toYaml | indent 4 }}
extraVolumes:
- name: host-time
hostPath: /etc/localtime
@@ -107,43 +107,40 @@ controllerManager:
readOnly: true
scheduler:
extraArgs:
-{% if (security_enhancement) %}
+{{ if .security_enhancement }}
bind-address: 127.0.0.1
profiling: false
-{% else %}
+{{- else }}
bind-address: 0.0.0.0
-{% endif %}
-{{ kubernetes.scheduler.extra_args|to_yaml:4|safe }}
+{{- end }}
+{{ .kubernetes.scheduler.extra_args | toYaml | indent 4 }}
---
-
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
- advertiseAddress: {{ internal_ipv4 }}
- bindPort: {{ kubernetes.apiserver.port }}
+ advertiseAddress: {{ .internal_ipv4 }}
+ bindPort: {{ .kubernetes.apiserver.port }}
nodeRegistration:
- criSocket: {{ cri.cri_socket }}
+ criSocket: {{ .cri.cri_socket }}
kubeletExtraArgs:
- cgroup-driver: {{ cri.cgroup_driver }}
+ cgroup-driver: {{ .cri.cgroup_driver }}
---
-
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
-clusterCIDR: {{ kubernetes.networking.pod_cidr }}
-mode: {{ kubernetes.kube_proxy.mode }}
-{{ kubernetes.kube_proxy.config|to_yaml|safe }}
+clusterCIDR: {{ .kubernetes.networking.pod_cidr }}
+mode: {{ .kubernetes.kube_proxy.mode }}
+{{ .kubernetes.kube_proxy.config | toYaml }}
---
-
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
-clusterDomain: {{ kubernetes.networking.dns_domain }}
+clusterDomain: {{ .kubernetes.networking.dns_domain }}
clusterDNS:
- - {{ kubernetes.networking.dns_service_ip }}
-maxPods: {{ kubernetes.max_pods }}
-podPidsLimit: {{ kubernetes.kubelet.pod_pids_limit }}
+ - {{ .kubernetes.networking.dns_service_ip }}
+maxPods: {{ .kubernetes.max_pods }}
+podPidsLimit: {{ .kubernetes.kubelet.pod_pids_limit }}
rotateCertificates: true
kubeReserved:
cpu: 200m
@@ -160,8 +157,7 @@ evictionSoftGracePeriod:
memory.available: 2m
evictionMaxPodGracePeriod: 120
evictionPressureTransitionPeriod: 30s
-
-{% if (security_enhancement) %}
+{{- if .security_enhancement }}
readOnlyPort: 0
protectKernelDefaults: true
eventRecordQPS: 1
@@ -174,25 +170,26 @@ tlsCipherSuites:
featureGates:
RotateKubeletServerCertificate: true
SeccompDefault: true
-{% if (kube_version|version:">=v1.24.0") %}
+ {{- if .kube_version | semverCompare ">=v1.24.0" }}
TTLAfterFinished: true
-{% endif %}
-{% if (kube_version|version:">=v1.21.0") %}
+ {{- end }}
+ {{ if .kube_version | semverCompare ">=v1.21.0" }}
CSIStorageCapacity: true
-{% endif %}
-{{ features|to_yaml:2|safe }}
-{% else %}
+ {{- end }}
+{{ .kubernetes.kubelet.feature_gates | toYaml | indent 2 }}
+{{- else }}
featureGates:
RotateKubeletServerCertificate: true
-{% if (kube_version|version:">=v1.24.0") %}
+ {{- if .kube_version | semverCompare ">=v1.24.0" }}
TTLAfterFinished: true
-{% endif %}
-{% if (kube_version|version:">=v1.21.0") %}
+ {{- end }}
+ {{- if .kube_version | semverCompare ">=v1.21.0" }}
CSIStorageCapacity: true
ExpandCSIVolumes: true
-{% endif %}
-{{ features|to_yaml:2|safe }}
-{% endif %}
-cgroupDriver: {{ cri.cgroup_driver }}
-containerLogMaxSize: {{ kubernetes.kubelet.container_log_max_size }}
-containerLogMaxFiles: {{ kubernetes.kubelet.container_log_max_files }}
+ {{- end }}
+{{ .kubernetes.kubelet.feature_gates | toYaml | indent 2 }}
+{{- end }}
+cgroupDriver: {{ .cri.cgroup_driver }}
+containerLogMaxSize: {{ .kubernetes.kubelet.container_log_max_size }}
+containerLogMaxFiles: {{ .kubernetes.kubelet.container_log_max_files }}
+{{ .kubernetes.kubelet.extra_args | toYaml }}
diff --git a/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-join.v1beta2 b/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-join.v1beta2
index 72c09291b..2ffa06fd2 100644
--- a/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-join.v1beta2
+++ b/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-join.v1beta2
@@ -3,17 +3,17 @@ apiVersion: kubeadm.k8s.io/v1beta2
kind: JoinConfiguration
discovery:
bootstrapToken:
- apiServerEndpoint: {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint }}{% else %}{{ init_kubernetes_node }}{% endif %}:{{ kubernetes.apiserver.port }}
- token: "{{ kubeadm_token }}"
+ apiServerEndpoint: {{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }}:{{ .kubernetes.apiserver.port }}
+ token: "{{ .kubeadm_token }}"
unsafeSkipCAVerification: true
-{% if (inventory_name in groups['kube_control_plane']) %}
+{{- if .groups.kube_control_plane | default list | has .inventory_name }}
controlPlane:
localAPIEndpoint:
- advertiseAddress: {{ internal_ipv4 }}
- bindPort: {{ kubernetes.apiserver.port }}
- certificateKey: {{ kubeadm_cert }}
-{% endif %}
+ advertiseAddress: {{ .internal_ipv4 }}
+ bindPort: {{ .kubernetes.apiserver.port }}
+ certificateKey: {{ .kubeadm_cert }}
+{{- end }}
nodeRegistration:
- criSocket: {{ cri.cri_socket }}
+ criSocket: {{ .cri.cri_socket }}
kubeletExtraArgs:
- cgroup-driver: {{ cri.cgroup_driver }}
+ cgroup-driver: {{ .cri.cgroup_driver }}
diff --git a/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-join.v1beta3 b/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-join.v1beta3
index 8aa58601a..20d84f875 100644
--- a/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-join.v1beta3
+++ b/builtin/roles/install/kubernetes/templates/kubeadm/kubeadm-join.v1beta3
@@ -3,17 +3,17 @@ apiVersion: kubeadm.k8s.io/v1beta3
kind: JoinConfiguration
discovery:
bootstrapToken:
- apiServerEndpoint: {%if kubernetes.control_plane_endpoint %}{{ kubernetes.control_plane_endpoint }}{% else %}{{ init_kubernetes_node }}{% endif %}:{{ kubernetes.apiserver.port }}
- token: "{{ kubeadm_token }}"
+ apiServerEndpoint: {{ if and .kubernetes.control_plane_endpoint (ne .kubernetes.control_plane_endpoint "") }}{{ .kubernetes.control_plane_endpoint }}{{ else }}{{ .init_kubernetes_node }}{{ end }}:{{ .kubernetes.apiserver.port }}
+ token: "{{ .kubeadm_token }}"
unsafeSkipCAVerification: true
-{% if (inventory_name in groups['kube_control_plane']) %}
+{{- if .groups.kube_control_plane | default list | has .inventory_name }}
controlPlane:
localAPIEndpoint:
- advertiseAddress: {{ internal_ipv4 }}
- bindPort: {{ kubernetes.apiserver.port }}
- certificateKey: {{ kubeadm_cert }}
-{% endif %}
+ advertiseAddress: {{ .internal_ipv4 }}
+ bindPort: {{ .kubernetes.apiserver.port }}
+ certificateKey: {{ .kubeadm_cert }}
+{{- end }}
nodeRegistration:
- criSocket: {{ cri.cri_socket }}
+ criSocket: {{ .cri.cri_socket }}
kubeletExtraArgs:
- cgroup-driver: {{ cri.cgroup_driver }}
+ cgroup-driver: {{ .cri.cgroup_driver }}
diff --git a/builtin/roles/install/kubernetes/templates/kubeadm/kubelet.env b/builtin/roles/install/kubernetes/templates/kubeadm/kubelet.env
index 8d451f19f..66089b19b 100644
--- a/builtin/roles/install/kubernetes/templates/kubeadm/kubelet.env
+++ b/builtin/roles/install/kubernetes/templates/kubeadm/kubelet.env
@@ -7,7 +7,7 @@ EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
EnvironmentFile=-/etc/default/kubelet
-Environment="KUBELET_EXTRA_ARGS=--node-ip={{ internal_ipv4 }} --hostname-override={{ inventory_name }} {%for k,v in kubernetes.kubelet.extra_args %}--{{k}} {{v}} {% endfor %}"
+Environment="KUBELET_EXTRA_ARGS=--node-ip={{ .internal_ipv4 }} --hostname-override={{ .inventory_name }} {{ range $k,$v := .kubernetes.kubelet.extra_args }}--{{ $k }} {{ $v }} {{ end }}"
ExecStart=
ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS
diff --git a/builtin/roles/install/kubernetes/templates/kubevip/kubevip.ARP b/builtin/roles/install/kubernetes/templates/kubevip/kubevip.ARP
index a9e8a4ca4..f0909ea8c 100644
--- a/builtin/roles/install/kubernetes/templates/kubevip/kubevip.ARP
+++ b/builtin/roles/install/kubernetes/templates/kubevip/kubevip.ARP
@@ -15,7 +15,7 @@ spec:
- name: port
value: "6443"
- name: vip_interface
- value: {{ interface.stdout }}
+ value: {{ .interface.stdout }}
- name: vip_cidr
value: "32"
- name: cp_enable
@@ -39,8 +39,8 @@ spec:
- name: lb_port
value: "6443"
- name: address
- value: {{ kubernetes.control_plane_endpoint }}
- image: {{ kubernetes.kubevip.image }}
+ value: {{ .kubernetes.kube_vip.address }}
+ image: {{ .kubernetes.kubevip.image }}
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
diff --git a/builtin/roles/install/kubernetes/templates/kubevip/kubevip.BGP b/builtin/roles/install/kubernetes/templates/kubevip/kubevip.BGP
index 31e1bc317..dcbf83fd8 100644
--- a/builtin/roles/install/kubernetes/templates/kubevip/kubevip.BGP
+++ b/builtin/roles/install/kubernetes/templates/kubevip/kubevip.BGP
@@ -15,7 +15,7 @@ spec:
- name: port
value: "6443"
- name: vip_interface
- value: {{ interface.stdout }}
+ value: {{ .interface.stdout }}
- name: vip_cidr
value: "32"
- name: cp_enable
@@ -29,7 +29,12 @@ spec:
- name: bgp_enable
value: "true"
- name: bgp_routerid
- value: {% for h in groups['kube_control_plane'] %}{% set hv=inventory_hosts[h] %}"{{ hv.internal_ipv4 }}"{% if (not forloop.Last) %},{% endif %}{% endfor %}
+ value: |
+ {{ $ips := list }}
+ {{- range .groups.kube_control_plane | default list -}}
+ {{- $ips = append $ips (index $.inventory_hosts . "internal_ipv4") -}}
+ {{- end -}}
+ {{ $ips | join "," }}
- name: bgp_as
value: "65000"
- name: bgp_peeraddress
@@ -37,7 +42,12 @@ spec:
- name: bgp_peeras
value: "65000"
- name: bgp_peers
- value: {{ .BGPPeers }}
+ value: |
+ {{ $ips := list }}
+ {{- range .groups.kube_control_plane | default list -}}
+ {{- $ips = append $ips (printf "%s:65000::false" (index $.inventory_hosts . "internal_ipv4")) -}}
+ {{- end -}}
+ {{ $ips | join "," }}
- name: lb_enable
value: "true"
- name: lb_port
@@ -45,10 +55,10 @@ spec:
- name: lb_fwdmethod
value: local
- name: address
- value: {{ kubernetes.control_plane_endpoint }}
+ value: {{ .kubernetes.kube_vip.address }}
- name: prometheus_server
value: :2112
- image: {{ kubernetes.kubevip.image }}
+ image: {{ .kubernetes.kubevip.image }}
imagePullPolicy: IfNotPresent
name: kube-vip
resources: {}
diff --git a/builtin/roles/install/nfs/tasks/debian.yaml b/builtin/roles/install/nfs/tasks/debian.yaml
index bbeba5d84..5ed766aaa 100644
--- a/builtin/roles/install/nfs/tasks/debian.yaml
+++ b/builtin/roles/install/nfs/tasks/debian.yaml
@@ -6,16 +6,16 @@
- name: Install nfs
command: apt update && apt install -y nfs-kernel-server
- when: nfs_server_install.stderr != ""
+ when: .nfs_server_install.stderr | ne ""
- name: Create nfs share directory
command: |
- if [ ! -d {{ item }} ]; then
- mkdir -p {{ item }}
- chmod -R 0755 {{ item }}
- chown nobody:nogroup {{ item }}
+ if [ ! -d {{ .item }} ]; then
+ mkdir -p {{ .item }}
+ chmod -R 0755 {{ .item }}
+ chown nobody:nogroup {{ .item }}
fi
- loop: "{{ nfs.share_dir }}"
+ loop: "{{ .nfs.share_dir | toJson }}"
- name: Generate nfs config
template:
diff --git a/builtin/roles/install/nfs/tasks/main.yaml b/builtin/roles/install/nfs/tasks/main.yaml
index bec3f2ae1..74cc6865f 100644
--- a/builtin/roles/install/nfs/tasks/main.yaml
+++ b/builtin/roles/install/nfs/tasks/main.yaml
@@ -1,6 +1,6 @@
---
- include_tasks: debian.yaml
- when: os.release.ID_LIKE == 'debian'
+ when: .os.release.ID_LIKE | eq "debian"
- include_tasks: rhel.yaml
- when: os.release.ID_LIKE == 'rhel fedora'
+ when: .os.release.ID_LIKE | eq "rhel fedora"
diff --git a/builtin/roles/install/nfs/tasks/rhel.yaml b/builtin/roles/install/nfs/tasks/rhel.yaml
index d770c566a..3912da456 100644
--- a/builtin/roles/install/nfs/tasks/rhel.yaml
+++ b/builtin/roles/install/nfs/tasks/rhel.yaml
@@ -6,16 +6,16 @@
- name: Install nfs
command: yum update && yum install -y nfs-utils
- when: nfs_server_install.stderr != ""
+ when: .nfs_server_install.stderr | ne ""
- name: Create nfs share directory
command: |
- if [ ! -d {{ item }} ]; then
- mkdir -p {{ item }}
- chmod -R 0755 {{ item }}
- chown nobody:nobody {{ item }}
+ if [ ! -d {{ .item }} ]; then
+ mkdir -p {{ .item }}
+ chmod -R 0755 {{ .item }}
+ chown nobody:nobody {{ .item }}
fi
- loop: "{{ nfs.share_dir }}"
+ loop: "{{ .nfs.share_dir }}"
- name: Generate nfs config
template:
diff --git a/builtin/roles/install/nfs/templates/exports b/builtin/roles/install/nfs/templates/exports
index ef2ecc040..01ae4cea1 100644
--- a/builtin/roles/install/nfs/templates/exports
+++ b/builtin/roles/install/nfs/templates/exports
@@ -1,3 +1,3 @@
-{% for p in nfs.share_dir %}
-{{ p }} *(rw,sync,no_subtree_check)
-{% endfor %}
+{{- range .nfs.share_dir }}
+{{ . }} *(rw,sync,no_subtree_check)
+{{- end }}
diff --git a/builtin/roles/install/security/tasks/main.yaml b/builtin/roles/install/security/tasks/main.yaml
index f4ffba87e..dfd993df5 100644
--- a/builtin/roles/install/security/tasks/main.yaml
+++ b/builtin/roles/install/security/tasks/main.yaml
@@ -5,7 +5,7 @@
chmod 600 /etc/ssl/etcd/ssl/* && chown root:root /etc/ssl/etcd/ssl/*
chmod 700 /var/lib/etcd && chown etcd:etcd /var/lib/etcd
chmod 550 /usr/local/bin/etcd* && chown root:root /usr/local/bin/etcd*
- when: inventory_name in groups['etcd']
+ when: .groups.etcd | default list | has .inventory_name
- name: security enhancement for control plane
command: |
@@ -21,7 +21,7 @@
chmod 640 /var/lib/kubelet/config.yaml && chown root:root /var/lib/kubelet/config.yaml
chmod 640 -R /etc/systemd/system/kubelet.service* && chown root:root -R /etc/systemd/system/kubelet.service*
chmod 640 /etc/systemd/system/k8s-certs-renew* && chown root:root /etc/systemd/system/k8s-certs-renew*
- when: inventory_name in groups['kube_control_plane']
+ when: .groups.kube_control_plane | default list | has .inventory_name
- name: security enhancement for worker
command: |
@@ -36,4 +36,4 @@
chmod 550 -R /opt/cni/bin && chown root:root -R /opt/cni/bin
chmod 640 /var/lib/kubelet/config.yaml && chown root:root /var/lib/kubelet/config.yaml
chmod 640 -R /etc/systemd/system/kubelet.service* && chown root:root -R /etc/systemd/system/kubelet.service*
- when: inventory_name in groups['kube_worker']
+ when: .groups.kube_worker | default list | has .inventory_name
diff --git a/builtin/roles/precheck/artifact_check/tasks/main.yaml b/builtin/roles/precheck/artifact_check/tasks/main.yaml
index 800206c78..7101b5308 100644
--- a/builtin/roles/precheck/artifact_check/tasks/main.yaml
+++ b/builtin/roles/precheck/artifact_check/tasks/main.yaml
@@ -1,21 +1,21 @@
---
- name: Check artifact is exits
command:
- if [ ! -f "{{ artifact_file }}" ]; then
+ if [ ! -f "{{ .artifact.artifact_file }}" ]; then
exit 1
fi
- name: Check artifact file type
command:
- if [[ "{{ artifact_file }}" != *{{ item }} ]]; then
+ if [[ "{{ .artifact.artifact_file }}" != *{{ .item }} ]]; then
exit 1
fi
loop: ['.tgz','.tar.gz']
- name: Check md5 of artifact
command:
- if [[ $(md5sum {{ artifact_file }}) != {{ artifact.artifact_md5 }} ]]; then
+ if [[ "$(md5sum {{ .artifact.artifact_file }})" != "{{ .artifact.artifact_md5 }}" ]]; then
exit 1
fi
when:
- - artifact.artifact_md5 | defined
+ - and .artifact.artifact_md5 (ne .artifact.artifact_md5 "")
diff --git a/builtin/roles/precheck/env_check/defaults/main.yaml b/builtin/roles/precheck/env_check/defaults/main.yaml
index 9db419938..965e42d87 100644
--- a/builtin/roles/precheck/env_check/defaults/main.yaml
+++ b/builtin/roles/precheck/env_check/defaults/main.yaml
@@ -15,7 +15,6 @@ cluster_require:
minimal_node_memory_mb: 10
require_etcd_deployment_type: ['internal','external']
require_container_manager: ['docker', 'containerd']
- require_containerd_version: ['latest', 'edge', 'stable']
# the minimal required version of containerd to be installed.
containerd_min_version_required: v1.6.0
supported_architectures:
diff --git a/builtin/roles/precheck/env_check/tasks/cri.yaml b/builtin/roles/precheck/env_check/tasks/cri.yaml
index 3d1f0b944..de961da7d 100644
--- a/builtin/roles/precheck/env_check/tasks/cri.yaml
+++ b/builtin/roles/precheck/env_check/tasks/cri.yaml
@@ -1,16 +1,18 @@
---
- name: Stop if container manager is not docker or containerd
assert:
- that: cri.container_manager in cluster_require.require_container_manager
- fail_msg: "The container manager:{{ cri.container_manager }}, must be docker or containerd"
+ that: .cluster_require.require_container_manager | has .cri.container_manager
+ fail_msg: |
+ the container manager:{{ .cri.container_manager }}, must be "docker" or "containerd"
run_once: true
- when: cri.container_manager | defined
+ when: and .cri.container_manager (ne .cri.container_manager "")
- name: Ensure minimum containerd version
assert:
- that: containerd_version | version:'>={{cluster_require.containerd_min_version_required}}'
- fail_msg: "containerd_version is too low. Minimum version {{ cluster_require.containerd_min_version_required }}"
+ that: .containerd_version | semverCompare (printf ">=%s" .cluster_require.containerd_min_version_required)
+ fail_msg: |
+ containerd_version is too low. Minimum version {{ .cluster_require.containerd_min_version_required }}
run_once: true
when:
- - not containerd_version in cluster_require.require_containerd_version
- - cri.container_manager == 'containerd'
+ - and .containerd_version (ne .containerd_version "")
+ - .cri.container_manager | eq "containerd"
diff --git a/builtin/roles/precheck/env_check/tasks/etcd.yaml b/builtin/roles/precheck/env_check/tasks/etcd.yaml
index a0e51aae5..b4e4bee59 100644
--- a/builtin/roles/precheck/env_check/tasks/etcd.yaml
+++ b/builtin/roles/precheck/env_check/tasks/etcd.yaml
@@ -1,36 +1,36 @@
---
- name: Stop if etcd deployment type is not internal or external
assert:
- that: kubernetes.etcd.deployment_type in cluster_require.require_etcd_deployment_type
- fail_msg: "The etcd deployment type, 'kubernetes.etcd.deployment_type', must be internal or external"
+ that: .cluster_require.require_etcd_deployment_type | has .kubernetes.etcd.deployment_type
+ fail_msg: |
+ the etcd deployment type, should be internal or external but got {{ .kubernetes.etcd.deployment_type }}
run_once: true
- when: kubernetes.etcd.deployment_type | defined
+ when: and .kubernetes.etcd.deployment_type (ne .kubernetes.etcd.deployment_type "")
- name: Stop if etcd group is empty in internal etcd mode
assert:
- that: "'etcd' in groups"
- fail_msg: "Group 'etcd' cannot be empty in external etcd mode"
+ that: .groups.etcd
+ fail_msg: "group \"etcd\" cannot be empty in external etcd mode"
run_once: true
- when:
- - kubernetes.etcd.deployment_type == "external"
+ when: .kubernetes.etcd.deployment_type | eq "external"
- name: Stop if even number of etcd hosts
assert:
- that: not groups.etcd | length | divisibleby:2
- when:
- - inventory_name in groups['etcd']
+ that: (mod (.groups.etcd | len) 2) | eq 1
+ fail_msg: "etcd number should be odd number"
+ when: .groups.etcd
## https://cwiki.yunify.com/pages/viewpage.action?pageId=145920824
- name: Check dev io for etcd
when:
- - inventory_name in groups['etcd']
+ - .groups.etcd | default list | has .inventory_name
block:
- name: Check fio is exist
ignore_errors: true
command: fio --version
register: fio_install_version
- name: Test dev io by fio
- when: fio_install_version.stderr == ""
+ when: .fio_install_version.stderr | eq ""
block:
- name: Get fio result
command: |
@@ -39,8 +39,9 @@
register: fio_result
- name: Check fio result
assert:
- that: fio_result.stdout.jobs|first|get:'sync'|get:'lat_ns'|get:'percentile'|get:'90.000000' <= cluster_require.etcd_disk_wal_fysnc_duration_seconds
- fail_msg: "etcd_disk_wal_fysnc_duration_seconds: {{ fio_result.stdout.jobs|first|get:'sync'|get:'lat_ns'|get:'percentile'|get:'90.000000' }}ns is more than {{ cluster_require.etcd_disk_wal_fysnc_duration_seconds }}ns"
+ that: (index (.fio_result.stdout.jobs | first) "sync" "lat_ns" "percentile" "90.000000") | le .cluster_require.etcd_disk_wal_fysnc_duration_seconds
+ fail_msg: |
+ etcd_disk_wal_fysnc_duration_seconds: {{ index (.fio_result.stdout.jobs | first) "sync" "lat_ns" "percentile" "90.000000" }}ns is more than {{ .cluster_require.etcd_disk_wal_fysnc_duration_seconds }}ns
always:
- name: Clean test data dir
command: rm -rf /tmp/kubekey/etcd/test-data
diff --git a/builtin/roles/precheck/env_check/tasks/main.yaml b/builtin/roles/precheck/env_check/tasks/main.yaml
index d76e71060..2c8f595b3 100644
--- a/builtin/roles/precheck/env_check/tasks/main.yaml
+++ b/builtin/roles/precheck/env_check/tasks/main.yaml
@@ -1,10 +1,33 @@
---
+- name: Should defined internal_ipv4
+ assert:
+ that: and .internal_ipv4 (ne .internal_ipv4 "")
+ fail_msg: |
+ "internal_ipv4" should not be empty
+
+- name: Check kubevip
+ assert:
+ that:
+ - and .kubernetes.kube_vip.address (ne .kubernetes.kube_vip.address "")
+ - .kubernetes.kube_vip.address | regexMatch "^((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])\\.(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])\\.(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])\\.(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])|(([0-9a-fA-F]{1,4}:){7}([0-9a-fA-F]{1,4}|:)|(([0-9a-fA-F]{1,4}:){1,6}|:):([0-9a-fA-F]{1,4}|:){1,6}([0-9a-fA-F]{1,4}|:)))$"
+ - |
+ {{- $existIP := false -}}
+ {{- range .groups.all | default list -}}
+ {{- if eq $.kubernetes.kube_vip.address (index $.inventory_hosts . "internal_ipv4") -}}
+ {{ $existIP = true }}
+ {{- end -}}
+ {{- end -}}
+ {{ not $existIP }}
+ fail_msg: |
+ "kubernetes.control_plane_endpoint" should be a un-used ip address when "kubernetes.kube_vip.enabled" is true
+ when: .kubernetes.kube_vip.enabled
+
- name: Stop if unsupported version of Kubernetes
assert:
- that: kube_version | version:'>={{ cluster_require.kube_version_min_required }}'
- fail_msg: "The current release of Kubespray only support newer version of Kubernetes than {{ kube_version_min_required }} - You are trying to apply {{ kube_version }}"
- when:
- - kube_version | defined
+ that: .kube_version | semverCompare (printf ">=%s" .cluster_require.kube_version_min_required)
+ fail_msg: |
+ the current release of Kubespray only support newer version of Kubernetes than {{ .kube_version_min_required }} - You are trying to apply {{ .kube_version }}
+ when: and .kube_version (ne .kube_version "")
- include_tasks: etcd.yaml
tags: ["etcd"]
diff --git a/builtin/roles/precheck/env_check/tasks/network.yaml b/builtin/roles/precheck/env_check/tasks/network.yaml
index 7f7a77ff3..892f71da2 100644
--- a/builtin/roles/precheck/env_check/tasks/network.yaml
+++ b/builtin/roles/precheck/env_check/tasks/network.yaml
@@ -1,10 +1,10 @@
---
- name: Stop if unknown network plugin
assert:
- that: kubernetes.kube_network_plugin in cluster_require.require_network_plugin
- fail_msg: "{{ kubernetes.kube_network_plugin }} is not supported"
- when:
- - kubernetes.kube_network_plugin | defined
+ that: .cluster_require.require_network_plugin | has .kubernetes.kube_network_plugin
+ fail_msg: |
+ kube_network_plugin:"{{ .kubernetes.kube_network_plugin }}" is not supported
+ when: and .kubernetes.kube_network_plugin (ne .kubernetes.kube_network_plugin "")
# This assertion will fail on the safe side: One can indeed schedule more pods
# on a node than the CIDR-range has space for when additional pods use the host
@@ -13,10 +13,10 @@
# NOTICE: the check blatantly ignores the inet6-case
- name: Guarantee that enough network address space is available for all pods
assert:
- that: "(kubernetes.kubelet.max_pods | integer) <= (2 | pow:{{ 32 - kubernetes.controller_manager.kube_network_node_prefix | integer }} - 2)"
- fail_msg: "Do not schedule more pods on a node than inet addresses are available."
+ that: le .kubernetes.kubelet.max_pods (sub (pow 2 (sub 32 .kubernetes.controller_manager.kube_network_node_prefix)) 2)
+ fail_msg: do not schedule more pods on a node than inet addresses are available.
when:
- - inventory_name in groups['k8s_cluster']
- - kubernetes.controller_manager.kube_network_node_prefix | defined
- - kubernetes.kube_network_plugin != 'calico'
+ - .groups.k8s_cluster | default list | has .inventory_name
+ - .kubernetes.controller_manager.kube_network_node_prefix
+ - .kubernetes.kube_network_plugin | ne "calico"
diff --git a/builtin/roles/precheck/env_check/tasks/nfs.yaml b/builtin/roles/precheck/env_check/tasks/nfs.yaml
index c67438d2c..8271da20a 100644
--- a/builtin/roles/precheck/env_check/tasks/nfs.yaml
+++ b/builtin/roles/precheck/env_check/tasks/nfs.yaml
@@ -1,6 +1,6 @@
---
- name: Stop if nfs server is not be one
assert:
- that: groups['nfs'] | length == 1
- fail_msg: "Only one nfs server is supported"
- when: groups['nfs'] | length > 0
+ that: .groups.nfs | default list | len | eq 1
+ fail_msg: "only one nfs server is supported"
+ when: .groups.nfs
diff --git a/builtin/roles/precheck/env_check/tasks/os.yaml b/builtin/roles/precheck/env_check/tasks/os.yaml
index 7ae968692..a434d6b81 100644
--- a/builtin/roles/precheck/env_check/tasks/os.yaml
+++ b/builtin/roles/precheck/env_check/tasks/os.yaml
@@ -1,35 +1,37 @@
---
- name: Stop if bad hostname
- vars:
- regex: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$'
assert:
- that: inventory_name | match:regex
+ that: .inventory_name | regexMatch "^[a-z0-9]([a-z0-9-]*[a-z0-9])?(\\.[a-z0-9]([a-z0-9-]*[a-z0-9])?)*$"
fail_msg: "Hostname must consist of lower case alphanumeric characters, '.' or '-', and must start and end with an alphanumeric character"
- name: Stop if the os does not support
assert:
- that: (cluster_require.allow_unsupported_distribution_setup) or (os.release.ID in cluster_require.supported_os_distributions)
- fail_msg: "{{ os.release.ID }} is not a known OS"
+ that: or (.cluster_require.allow_unsupported_distribution_setup) (.cluster_require.supported_os_distributions | has .os.release.ID)
+ fail_msg: "{{ .os.release.ID }} is not a known OS"
- name: Stop if arch supported
assert:
- that: os.architecture in cluster_require.supported_architectures.amd64 or os.architecture in cluster_require.supported_architectures.arm64
- success_msg: "{% if (os.architecture in cluster_require.supported_architectures.amd64) %}amd64{% else %}arm64{% endif %}"
- fail_msg: "{{ os.architecture }} is not a known OS"
+ that: or (.cluster_require.supported_architectures.amd64 | has .os.architecture) (.cluster_require.supported_architectures.arm64 | has .os.architecture)
+ success_msg: |
+ {{- if .cluster_require.supported_architectures.amd64 | has .os.architecture -}}
+ amd64
+ {{- else -}}
+ arm64
+ {{- end -}}
+ fail_msg: "{{ .os.architecture }} is not a known arch"
register: binary_type
- name: Stop if memory is too small for masters
assert:
- that: process.memInfo.MemTotal | cut:' kB' >= cluster_require.minimal_master_memory_mb
- when:
- - inventory_name in groups['kube_control_plane']
+ that: .process.memInfo.MemTotal | trimSuffix " kB" | atoi | le .cluster_require.minimal_master_memory_mb
+ when: .groups.kube_control_plane | default list | has .inventory_name
- name: Stop if memory is too small for nodes
assert:
- that: process.memInfo.MemTotal | cut:' kB' >= cluster_require.minimal_node_memory_mb
+ that: .process.memInfo.MemTotal | trimSuffix " kB" | atoi | le .cluster_require.minimal_node_memory_mb
when:
- - inventory_name in groups['kube_worker']
+ - .groups.kube_worker | default list | has .inventory_name
- name: Stop if kernel version is too low
assert:
- that: os.kernel_version | split:'-' | first | version:'>=4.9.17'
+ that: .os.kernel_version | splitList "-" | first | semverCompare ">=4.9.17"
diff --git a/docs/zh/101-syntax.md b/docs/zh/101-syntax.md
index 9568781c6..536004aca 100644
--- a/docs/zh/101-syntax.md
+++ b/docs/zh/101-syntax.md
@@ -1,62 +1,19 @@
# 语法
-语法遵循Django-syntax规范.采用[pongo2](https://github.com/flosch/pongo2)实现, 并pongo2的关键字进行了扩展
-# 自定义关键字
-## defined
-判断某个参数是否在[variable](201-variable.md)中定义. 值为bool类型
-```yaml
-{{ variable | defined }}
-```
-## version
-比较版本大小. 参数为比较标准, 值为bool类型
-```yaml
-# version_variable>v1.0.0
-{{ version_variable | version:'>v1.0.0' }}
-# version_variable>=v1.0.0
-{{ version_variable | version:'>=v1.0.0' }}
-# version_variable==v1.0.0
-{{ version_variable | version:'==v1.0.0' }}
-# version_variable<=v1.0.0
-{{ version_variable | version:'<=v1.0.0' }}
-# version_variable/dev/null
-}
-echoerr() {
- echo "$@" 1>&2
-}
-log_prefix() {
- echo "$0"
-}
-_logp=6
-log_set_priority() {
- _logp="$1"
-}
-log_priority() {
- if test -z "$1"; then
- echo "$_logp"
- return
- fi
- [ "$1" -le "$_logp" ]
-}
-log_tag() {
- case $1 in
- 0) echo "emerg" ;;
- 1) echo "alert" ;;
- 2) echo "crit" ;;
- 3) echo "err" ;;
- 4) echo "warning" ;;
- 5) echo "notice" ;;
- 6) echo "info" ;;
- 7) echo "debug" ;;
- *) echo "$1" ;;
- esac
-}
-log_debug() {
- log_priority 7 || return 0
- echoerr "$(log_prefix)" "$(log_tag 7)" "$@"
-}
-log_info() {
- log_priority 6 || return 0
- echoerr "$(log_prefix)" "$(log_tag 6)" "$@"
-}
-log_err() {
- log_priority 3 || return 0
- echoerr "$(log_prefix)" "$(log_tag 3)" "$@"
-}
-log_crit() {
- log_priority 2 || return 0
- echoerr "$(log_prefix)" "$(log_tag 2)" "$@"
-}
-uname_os() {
- os=$(uname -s | tr '[:upper:]' '[:lower:]')
- case "$os" in
- cygwin_nt*) os="windows" ;;
- mingw*) os="windows" ;;
- msys_nt*) os="windows" ;;
- esac
- echo "$os"
-}
-uname_arch() {
- arch=$(uname -m)
- case $arch in
- x86_64) arch="amd64" ;;
- x86) arch="386" ;;
- i686) arch="386" ;;
- i386) arch="386" ;;
- aarch64) arch="arm64" ;;
- armv5*) arch="armv5" ;;
- armv6*) arch="armv6" ;;
- armv7*) arch="armv7" ;;
- esac
- echo ${arch}
-}
-uname_os_check() {
- os=$(uname_os)
- case "$os" in
- darwin) return 0 ;;
- dragonfly) return 0 ;;
- freebsd) return 0 ;;
- linux) return 0 ;;
- android) return 0 ;;
- nacl) return 0 ;;
- netbsd) return 0 ;;
- openbsd) return 0 ;;
- plan9) return 0 ;;
- solaris) return 0 ;;
- windows) return 0 ;;
- esac
- log_crit "uname_os_check '$(uname -s)' got converted to '$os' which is not a GOOS value. Please file bug at https://github.com/client9/shlib"
- return 1
-}
-uname_arch_check() {
- arch=$(uname_arch)
- case "$arch" in
- 386) return 0 ;;
- amd64) return 0 ;;
- arm64) return 0 ;;
- armv5) return 0 ;;
- armv6) return 0 ;;
- armv7) return 0 ;;
- ppc64) return 0 ;;
- ppc64le) return 0 ;;
- mips) return 0 ;;
- mipsle) return 0 ;;
- mips64) return 0 ;;
- mips64le) return 0 ;;
- s390x) return 0 ;;
- amd64p32) return 0 ;;
- esac
- log_crit "uname_arch_check '$(uname -m)' got converted to '$arch' which is not a GOARCH value. Please file bug report at https://github.com/client9/shlib"
- return 1
-}
-untar() {
- tarball=$1
- case "${tarball}" in
- *.tar.gz | *.tgz) tar --no-same-owner -xzf "${tarball}" ;;
- *.tar) tar --no-same-owner -xf "${tarball}" ;;
- *.zip) unzip "${tarball}" ;;
- *)
- log_err "untar unknown archive format for ${tarball}"
- return 1
- ;;
- esac
-}
-http_download_curl() {
- local_file=$1
- source_url=$2
- header=$3
- if [ -z "$header" ]; then
- code=$(curl -w '%{http_code}' -sL -o "$local_file" "$source_url")
- else
- code=$(curl -w '%{http_code}' -sL -H "$header" -o "$local_file" "$source_url")
- fi
- if [ "$code" != "200" ]; then
- log_debug "http_download_curl received HTTP status $code"
- return 1
- fi
- return 0
-}
-http_download_wget() {
- local_file=$1
- source_url=$2
- header=$3
- if [ -z "$header" ]; then
- wget -q -O "$local_file" "$source_url"
- else
- wget -q --header "$header" -O "$local_file" "$source_url"
- fi
-}
-http_download() {
- log_debug "http_download $2"
- if is_command curl; then
- http_download_curl "$@"
- return
- elif is_command wget; then
- http_download_wget "$@"
- return
- fi
- log_crit "http_download unable to find wget or curl"
- return 1
-}
-http_copy() {
- tmp=$(mktemp)
- http_download "${tmp}" "$1" "$2" || return 1
- body=$(cat "$tmp")
- rm -f "${tmp}"
- echo "$body"
-}
-github_release() {
- owner_repo=$1
- version=$2
- test -z "$version" && version="latest"
- giturl="https://github.com/${owner_repo}/releases/${version}"
- json=$(http_copy "$giturl" "Accept:application/json")
- test -z "$json" && return 1
- version=$(echo "$json" | tr -s '\n' ' ' | sed 's/.*"tag_name":"//' | sed 's/".*//')
- test -z "$version" && return 1
- echo "$version"
-}
-hash_sha256() {
- TARGET=${1:-/dev/stdin}
- if is_command gsha256sum; then
- hash=$(gsha256sum "$TARGET") || return 1
- echo "$hash" | cut -d ' ' -f 1
- elif is_command sha256sum; then
- hash=$(sha256sum "$TARGET") || return 1
- echo "$hash" | cut -d ' ' -f 1
- elif is_command shasum; then
- hash=$(shasum -a 256 "$TARGET" 2>/dev/null) || return 1
- echo "$hash" | cut -d ' ' -f 1
- elif is_command openssl; then
- hash=$(openssl -dst openssl dgst -sha256 "$TARGET") || return 1
- echo "$hash" | cut -d ' ' -f a
- else
- log_crit "hash_sha256 unable to find command to compute sha-256 hash"
- return 1
- fi
-}
-hash_sha256_verify() {
- TARGET=$1
- checksums=$2
- if [ -z "$checksums" ]; then
- log_err "hash_sha256_verify checksum file not specified in arg2"
- return 1
- fi
- BASENAME=${TARGET##*/}
- want=$(grep "${BASENAME}" "${checksums}" 2>/dev/null | tr '\t' ' ' | cut -d ' ' -f 1)
- if [ -z "$want" ]; then
- log_err "hash_sha256_verify unable to find checksum for '${TARGET}' in '${checksums}'"
- return 1
- fi
- got=$(hash_sha256 "$TARGET")
- if [ "$want" != "$got" ]; then
- log_err "hash_sha256_verify checksum for '$TARGET' did not verify ${want} vs $got"
- return 1
- fi
-}
-cat /dev/null <="):
- compareVersion := strings.TrimSpace(paramString[2:])
- ci, err := inVersion.Compare(compareVersion)
- if err != nil {
- return pongo2.AsValue(nil), &pongo2.Error{
- Sender: "filter:version",
- OrigError: fmt.Errorf("converter second param error: %w", err),
- }
- }
- return pongo2.AsValue(ci >= 0), nil
- case strings.HasPrefix(paramString, "<="):
- compareVersion := strings.TrimSpace(paramString[2:])
- ci, err := inVersion.Compare(compareVersion)
- if err != nil {
- return pongo2.AsValue(nil), &pongo2.Error{
- Sender: "filter:version",
- OrigError: fmt.Errorf("converter second param error: %w", err),
- }
- }
- return pongo2.AsValue(ci <= 0), nil
- case strings.HasPrefix(paramString, "=="):
- compareVersion := strings.TrimSpace(paramString[2:])
- ci, err := inVersion.Compare(compareVersion)
- if err != nil {
- return pongo2.AsValue(nil), &pongo2.Error{
- Sender: "filter:version",
- OrigError: fmt.Errorf("converter second param error: %w", err),
- }
- }
- return pongo2.AsValue(ci == 0), nil
- case strings.HasPrefix(paramString, ">"):
- compareVersion := strings.TrimSpace(paramString[1:])
- ci, err := inVersion.Compare(compareVersion)
- if err != nil {
- return pongo2.AsValue(nil), &pongo2.Error{
- Sender: "filter:version",
- OrigError: fmt.Errorf("converter second param error: %w", err),
- }
- }
- return pongo2.AsValue(ci == 1), nil
- case strings.HasPrefix(paramString, "<"):
- compareVersion := strings.TrimSpace(paramString[1:])
- ci, err := inVersion.Compare(compareVersion)
- if err != nil {
- return pongo2.AsValue(nil), &pongo2.Error{
- Sender: "filter:version",
- OrigError: fmt.Errorf("converter second param error: %w", err),
- }
- }
- return pongo2.AsValue(ci == -1), nil
- default:
- return pongo2.AsValue(nil), &pongo2.Error{
- Sender: "filter:version",
- OrigError: fmt.Errorf("converter first param error: %w", err),
- }
- }
-}
-
-func filterPow(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo2.Error) {
- return pongo2.AsValue(math.Pow(in.Float(), param.Float())), nil
-}
-
-func filterMatch(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo2.Error) {
- match, err := regexp.Match(param.String(), []byte(in.String()))
- if err != nil {
- return pongo2.AsValue(nil), &pongo2.Error{Sender: "filter:match", OrigError: err}
- }
- return pongo2.AsValue(match), nil
-}
-
-func filterToJson(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo2.Error) {
- data, err := json.Marshal(in.Interface())
- if err != nil {
- return pongo2.AsValue(nil), &pongo2.Error{
- Sender: "to_json",
- OrigError: fmt.Errorf("parse in to json: %w", err),
- }
- }
- result := string(data)
- if param.IsInteger() {
- result = Indent(param.Integer(), result)
- }
- return pongo2.AsValue(result), nil
-}
-
-func filterToYaml(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo2.Error) {
- if in.IsNil() {
- return pongo2.AsValue(nil), nil
- }
- data, err := yaml.Marshal(in.Interface())
- if err != nil {
- return pongo2.AsValue(nil), &pongo2.Error{
- Sender: "to_yaml",
- OrigError: fmt.Errorf("parse in to json: %w", err),
- }
- }
- result := string(data)
- if result == "{}\n" || result == "{}" {
- return pongo2.AsValue(nil), nil
- }
- if !param.IsNil() && param.IsInteger() {
- result = Indent(param.Integer(), result)
- }
- return pongo2.AsValue(result), nil
-}
-
-func filterIpRange(in *pongo2.Value, param *pongo2.Value) (*pongo2.Value, *pongo2.Error) {
- if in.IsNil() || !in.IsString() {
- return pongo2.AsValue(nil), &pongo2.Error{
- Sender: "ip_range",
- OrigError: fmt.Errorf("input is not format string"),
- }
- }
- var ipRange = make([]string, 0)
- for _, s := range strings.Split(in.String(), ",") {
- ipRange = append(ipRange, ParseIp(s)...)
- }
- // if param is integer. return a single value
- if param.IsInteger() {
- index := param.Integer()
- // handle negative number
- if index < 0 {
- index = max(len(ipRange)+index, 0)
- }
- index = max(index, 0)
- index = min(index, len(ipRange)-1)
- return pongo2.AsValue(ipRange[index]), nil
- }
- if param.IsString() {
- comp := strings.Split(param.String(), ":")
- switch len(comp) {
- case 1: // return a single value
- index := pongo2.AsValue(comp[0]).Integer()
- // handle negative number
- if index < 0 {
- index = max(len(ipRange)+index, 0)
- }
- index = max(index, 0)
- index = min(index, len(ipRange)-1)
- return pongo2.AsValue(ipRange[index]), nil
- case 2: // return a slice
- // start with [x:len]
- from := pongo2.AsValue(comp[0]).Integer()
- from = max(from, 0)
- from = min(from, len(ipRange)-1)
-
- to := pongo2.AsValue(comp[1]).Integer()
- // handle missing y
- if strings.TrimSpace(comp[1]) == "" {
- to = len(ipRange) - 1
- }
- to = max(to, from)
- to = min(to, len(ipRange)-1)
-
- return pongo2.AsValue(ipRange[from:to]), nil
- default:
- return nil, &pongo2.Error{
- Sender: "filter:ip_range",
- OrigError: fmt.Errorf("ip_range string must have the format 'from:to' or a single number format 'index'"),
- }
- }
- }
-
- return pongo2.AsValue(ipRange), nil
-}
-
-// filterGet get value from map or array
-func filterGet(in *pongo2.Value, param *pongo2.Value) (out *pongo2.Value, err *pongo2.Error) {
- var result *pongo2.Value
- in.Iterate(func(idx, count int, key, value *pongo2.Value) bool {
- if param.IsInteger() && idx == param.Integer() {
- result = in.Index(idx)
- return false
- }
- if param.IsString() && key.String() == param.String() {
- result = pongo2.AsValue(value.Interface())
- return false
- }
- return true
- }, func() {
- result = pongo2.AsValue(nil)
- })
- return result, nil
-}
-
-func filterRand(in *pongo2.Value, param *pongo2.Value) (out *pongo2.Value, err *pongo2.Error) {
- if !param.IsInteger() {
- return pongo2.AsValue(nil), &pongo2.Error{
- Sender: "rand",
- OrigError: fmt.Errorf("param is not format int"),
- }
- }
- return pongo2.AsValue(rand.String(param.Integer())), nil
-}
diff --git a/pkg/converter/tmpl/filter_extension_test.go b/pkg/converter/tmpl/filter_extension_test.go
deleted file mode 100644
index a81714ccc..000000000
--- a/pkg/converter/tmpl/filter_extension_test.go
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
-Copyright 2023 The KubeSphere Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package tmpl
-
-import (
- "encoding/json"
- "testing"
-
- "github.com/flosch/pongo2/v6"
- "github.com/stretchr/testify/assert"
-)
-
-func TestFilter(t *testing.T) {
- testcases := []struct {
- name string
- input string
- ctx pongo2.Context
- except string
- }{
- {
- name: "default",
- input: "{{ os.release.Name | default_if_none:false }}",
- ctx: map[string]any{
- "os": map[string]any{
- "release": map[string]any{
- "ID": "a",
- },
- },
- },
- except: "False",
- },
- {
- name: "default_if_none",
- input: "{{ os.release.Name | default_if_none:'b' }}",
- ctx: map[string]any{
- "os": map[string]any{
- "release": map[string]any{
- "ID": "a",
- },
- },
- },
- except: "b",
- },
- {
- name: "defined",
- input: "{{ test | defined }}",
- ctx: map[string]any{
- "test": "aaa",
- },
- except: "True",
- },
- {
- name: "version_greater",
- input: "{{ test | version:'>=v1.19.0' }}",
- ctx: map[string]any{
- "test": "v1.23.10",
- },
- except: "True",
- },
- {
- name: "divisibleby",
- input: "{{ not test['a'] | length | divisibleby:2 }}",
- ctx: map[string]any{
- "test": map[string]any{
- "a": "1",
- },
- },
- except: "True",
- },
- {
- name: "power",
- input: "{{ (test | integer) >= (2 | pow: test2 | integer ) }}",
- ctx: map[string]any{
- "test": "12",
- "test2": "3s",
- },
- except: "True",
- },
- {
- name: "split",
- input: "{{ kernel_version | split:'-' | first }}",
- ctx: map[string]any{
- "kernel_version": "5.15.0-89-generic",
- },
- except: "5.15.0",
- },
- {
- name: "match",
- input: "{{ test | match:regex }}",
- ctx: map[string]any{
- "test": "abc",
- "regex": "[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\\\\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$",
- },
- except: "True",
- },
- {
- name: "to_json",
- input: "{{ test|to_json|safe }}",
- ctx: map[string]any{
- "test": []string{"a", "b"},
- },
- except: "[\"a\",\"b\"]",
- },
- {
- name: "to_yaml",
- input: "{{ test | to_yaml:4 }}",
- ctx: map[string]any{
- "test": map[string]string{
- "a": "b/c/d:123",
- },
- },
- except: " a: b/c/d:123\n ",
- },
- {
- name: "bool",
- input: "{% if test %}a{% else %}b{% endif %}",
- ctx: map[string]any{
- "test": true,
- },
- except: "a",
- },
- {
- name: "number",
- input: "a = {{ test }}",
- ctx: map[string]any{
- "test": "23",
- },
- except: "a = 23",
- },
- {
- name: "get from map",
- input: "{{ test|get:'a1' }}",
- ctx: map[string]any{
- "test": map[string]any{
- "a1": 10,
- "a2": "b2",
- },
- },
- except: "10",
- },
- {
- name: "get index from ip_range",
- input: "{{ test|ip_range:0 }}",
- ctx: map[string]any{
- "test": "10.233.0.0/18",
- },
- except: "10.233.0.1",
- },
- {
- name: "get index string from ip_range",
- input: "{{ test|ip_range:'1' }}",
- ctx: map[string]any{
- "test": "10.233.0.0/18",
- },
- except: "10.233.0.2",
- },
- {
- name: "get negative number from ip_range",
- input: "{{ test|ip_range:'-1' }}",
- ctx: map[string]any{
- "test": "10.233.0.0/18",
- },
- except: "10.233.63.254",
- },
- {
- name: "get range from ip_range",
- input: "{{ test|ip_range:':1'|last }}",
- ctx: map[string]any{
- "test": "10.233.0.0/18",
- },
- except: "10.233.0.1",
- },
- }
-
- for _, tc := range testcases {
- t.Run("filter: "+tc.name, func(t *testing.T) {
- tql, err := pongo2.FromString(tc.input)
- if err != nil {
- t.Fatal(err)
- }
- result, err := tql.Execute(tc.ctx)
- if err != nil {
- t.Fatal(err)
- }
- var v []string
- if err := json.Unmarshal([]byte("[\""+result+"\"]"), &v); err != nil {
- assert.Equal(t, tc.except, result)
- } else {
- assert.Equal(t, tc.except, v[0])
- }
- assert.Equal(t, tc.except, result)
- })
- }
-}
diff --git a/pkg/converter/tmpl/template.go b/pkg/converter/tmpl/template.go
index 8da5fb402..d57343a13 100644
--- a/pkg/converter/tmpl/template.go
+++ b/pkg/converter/tmpl/template.go
@@ -17,80 +17,55 @@ limitations under the License.
package tmpl
import (
+ "bytes"
"fmt"
"strings"
- "github.com/flosch/pongo2/v6"
"k8s.io/klog/v2"
+
+ "github.com/kubesphere/kubekey/v4/pkg/converter/internal"
)
-// ParseBool by pongo2 with not contain "{{ }}". It will add "{{ }}" to input string.
-func ParseBool(ctx pongo2.Context, inputs []string) (bool, error) {
+// ParseBool parse template string to bool
+func ParseBool(ctx map[string]any, inputs []string) (bool, error) {
for _, input := range inputs {
- // first convert: parse variable like "{{ }}" in input
- intql, err := pongo2.FromString(input)
- if err != nil {
- klog.V(4).ErrorS(err, "Failed to get string")
- return false, err
- }
- inres, err := intql.Execute(ctx)
- if err != nil {
- klog.V(4).ErrorS(err, "Failed to execute string")
- return false, err
+ if !IsTmplSyntax(input) {
+ input = "{{ " + input + " }}"
}
-
- // second convert: add {{ }} to input.
- // trim line break.
- inres = strings.TrimSuffix(inres, "\n")
- inres = fmt.Sprintf("{{ %s }}", inres)
- tql, err := pongo2.FromString(inres)
+ tl, err := internal.Template.Parse(input)
if err != nil {
- klog.V(4).ErrorS(err, "failed to get string")
- return false, err
+ return false, fmt.Errorf("failed to parse template '%s': %v", input, err)
}
- result, err := tql.Execute(ctx)
- if err != nil {
- klog.V(4).ErrorS(err, "failed to execute string")
- return false, err
+ result := bytes.NewBuffer(nil)
+ if err := tl.Execute(result, ctx); err != nil {
+ return false, fmt.Errorf("failed to execute template '%s': %v", input, err)
}
- klog.V(6).InfoS(" parse template succeed", "result", result)
- if result != "True" {
+ klog.V(6).InfoS(" parse template succeed", "result", result.String())
+ if result.String() != "true" {
return false, nil
}
}
return true, nil
}
-// ParseString with contain "{{ }}"
-func ParseString(ctx pongo2.Context, input string) (string, error) {
- if len(ctx) == 0 || !IsTmplSyntax(input) {
+// ParseString parse template string to actual string
+func ParseString(ctx map[string]any, input string) (string, error) {
+ if !IsTmplSyntax(input) {
return input, nil
}
- tql, err := pongo2.FromString(input)
+ tl, err := internal.Template.Parse(input)
if err != nil {
- klog.V(4).ErrorS(err, "Failed to get string")
- return input, err
+ return "", fmt.Errorf("failed to parse template '%s': %v", input, err)
}
- result, err := tql.Execute(ctx)
- if err != nil {
- klog.V(4).ErrorS(err, "Failed to execute string")
- return input, err
+ result := bytes.NewBuffer(nil)
+ if err := tl.Execute(result, ctx); err != nil {
+ return "", fmt.Errorf("failed to execute template '%s': %v", input, err)
}
- klog.V(6).InfoS(" parse template succeed", "result", result)
- return result, nil
+ klog.V(6).InfoS(" parse template succeed", "result", result.String())
+ return strings.TrimPrefix(strings.TrimSuffix(result.String(), "\n"), "\n"), nil
}
-func ParseFile(ctx pongo2.Context, file []byte) (string, error) {
- tql, err := pongo2.FromBytes(file)
- if err != nil {
- klog.V(4).ErrorS(err, "Transfer file to template error")
- return "", err
- }
- result, err := tql.Execute(ctx)
- if err != nil {
- klog.V(4).ErrorS(err, "exec template error")
- return "", err
- }
- klog.V(6).InfoS(" parse template succeed", "result", result)
- return result, nil
+// IsTmplSyntax Check if the string conforms to the template syntax.
+func IsTmplSyntax(s string) bool {
+ return strings.Contains(s, "{{") && strings.Contains(s, "}}")
}
diff --git a/pkg/converter/tmpl/template_test.go b/pkg/converter/tmpl/template_test.go
index 3af89c595..39e661fc7 100644
--- a/pkg/converter/tmpl/template_test.go
+++ b/pkg/converter/tmpl/template_test.go
@@ -19,7 +19,6 @@ package tmpl
import (
"testing"
- "github.com/flosch/pongo2/v6"
"github.com/stretchr/testify/assert"
)
@@ -27,32 +26,155 @@ func TestParseBool(t *testing.T) {
testcases := []struct {
name string
condition []string
- variable pongo2.Context
+ variable map[string]any
excepted bool
}{
+ // ======= semverCompare =======
{
- name: "parse success",
- condition: []string{"foo == \"bar\""},
- variable: pongo2.Context{
+ name: "semverCompare true-1",
+ condition: []string{"{{ .foo | semverCompare \">=v1.21\" }}"},
+ variable: map[string]any{
+ "foo": "v1.23",
+ },
+ excepted: true,
+ },
+ {
+ name: "semverCompare true-2",
+ condition: []string{"{{ .foo | semverCompare \"v1.21\" }}"},
+ variable: map[string]any{
+ "foo": "v1.21",
+ },
+ excepted: true,
+ },
+ {
+ name: "semverCompare true-3",
+ condition: []string{"{{ semverCompare \">=v1.21\" .foo }}"},
+ variable: map[string]any{
+ "foo": "v1.23",
+ },
+ excepted: true,
+ },
+ {
+ name: "semverCompare true-3",
+ condition: []string{"{{ semverCompare \" 0 {
ok, err := tmpl.ParseBool(ha.(map[string]any), task.Spec.When)
@@ -486,18 +515,15 @@ func (e executor) executeTask(ctx context.Context, task *kubekeyv1alpha1.Task, o
return
}
}
-
+ // if loop is empty. execute once, and the item is null
for _, item := range loop {
// set item to runtime variable
if err := e.variable.Merge(variable.MergeRuntimeVariable(h, map[string]any{
- "item": item,
+ _const.VariableItem: item,
})); err != nil {
stderr = fmt.Sprintf("set loop item to variable error: %v", err)
return
}
- if err := bar.Add(1); err != nil {
- klog.ErrorS(err, "fail to add bar")
- }
stdout, stderr = e.executeModule(ctx, task, modules.ExecOptions{
Args: task.Spec.Module.Args,
Host: h,
@@ -505,24 +531,18 @@ func (e executor) executeTask(ctx context.Context, task *kubekeyv1alpha1.Task, o
Task: *task,
Pipeline: *e.pipeline,
})
- if err := bar.Add(1); err != nil {
- klog.ErrorS(err, "fail to add bar")
- }
// delete item
if err := e.variable.Merge(variable.MergeRuntimeVariable(h, map[string]any{
- "item": nil,
+ _const.VariableItem: nil,
})); err != nil {
stderr = fmt.Sprintf("clean loop item to variable error: %v", err)
return
}
- if err := bar.Add(1); err != nil {
- klog.ErrorS(err, "fail to add bar")
- }
}
})
}
wg.Wait()
-
+ // host result for task
task.Status.Phase = kubekeyv1alpha1.TaskPhaseSuccess
for _, data := range task.Status.HostResults {
if data.StdErr != "" {
@@ -538,7 +558,11 @@ func (e executor) executeTask(ctx context.Context, task *kubekeyv1alpha1.Task, o
return nil
}
-func (e executor) execLoop(ctx context.Context, ha map[string]any, task *kubekeyv1alpha1.Task) ([]any, error) {
+// parseLoop parse loop to slice. if loop contains template string. convert it.
+// loop is json string. try convertor to string slice by json.
+// loop is normal string. set it to empty slice and return.
+// loop is string slice. return it.
+func (e executor) parseLoop(ctx context.Context, ha map[string]any, task *kubekeyv1alpha1.Task) ([]any, error) {
switch {
case task.Spec.Loop.Raw == nil:
// loop is not set. add one element to execute once module.
@@ -548,13 +572,14 @@ func (e executor) execLoop(ctx context.Context, ha map[string]any, task *kubekey
}
}
+// executeModule find register module and execute it.
func (e executor) executeModule(ctx context.Context, task *kubekeyv1alpha1.Task, opts modules.ExecOptions) (string, string) {
+ // get all variable. which contains item.
lg, err := opts.Variable.Get(variable.GetAllVariable(opts.Host))
if err != nil {
klog.V(5).ErrorS(err, "get location variable error", "task", ctrlclient.ObjectKeyFromObject(task))
return "", err.Error()
}
-
// check failed when condition
if len(task.Spec.FailedWhen) > 0 {
ok, err := tmpl.ParseBool(lg.(map[string]any), task.Spec.FailedWhen)
@@ -570,7 +595,7 @@ func (e executor) executeModule(ctx context.Context, task *kubekeyv1alpha1.Task,
return modules.FindModule(task.Spec.Module.Name)(ctx, opts)
}
-// merge defined variable to host variable
+// mergeVariable to runtime variable
func (e executor) mergeVariable(ctx context.Context, v variable.Variable, vd map[string]any, hosts ...string) error {
if len(vd) == 0 {
// skip
diff --git a/pkg/manager/command_manager.go b/pkg/manager/command_manager.go
index 53f4af135..cfee50bdd 100644
--- a/pkg/manager/command_manager.go
+++ b/pkg/manager/command_manager.go
@@ -18,7 +18,10 @@ package manager
import (
"context"
+ "fmt"
+ "io"
"os"
+ "time"
"k8s.io/klog/v2"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
@@ -34,22 +37,23 @@ type commandManager struct {
*kubekeyv1.Inventory
ctrlclient.Client
+
+ logOutput io.Writer
}
func (m *commandManager) Run(ctx context.Context) error {
- klog.Infof("[Pipeline %s] start", ctrlclient.ObjectKeyFromObject(m.Pipeline))
+ fmt.Fprintf(m.logOutput, "%s [Pipeline %s] start\n", time.Now().Format(time.RFC822), ctrlclient.ObjectKeyFromObject(m.Pipeline))
cp := m.Pipeline.DeepCopy()
defer func() {
- klog.Infof("[Pipeline %s] finish. total: %v,success: %v,ignored: %v,failed: %v", ctrlclient.ObjectKeyFromObject(m.Pipeline),
+ fmt.Fprintf(m.logOutput, "%s [Pipeline %s] finish. total: %v,success: %v,ignored: %v,failed: %v\n", time.Now().Format(time.RFC3339), ctrlclient.ObjectKeyFromObject(m.Pipeline),
m.Pipeline.Status.TaskResult.Total, m.Pipeline.Status.TaskResult.Success, m.Pipeline.Status.TaskResult.Ignored, m.Pipeline.Status.TaskResult.Failed)
if !m.Pipeline.Spec.Debug && m.Pipeline.Status.Phase == kubekeyv1.PipelinePhaseSucceed {
- klog.Infof("[Pipeline %s] clean runtime directory", ctrlclient.ObjectKeyFromObject(m.Pipeline))
+ fmt.Fprintf(m.logOutput, "%s [Pipeline %s] clean runtime directory\n", time.Now().Format(time.RFC822), ctrlclient.ObjectKeyFromObject(m.Pipeline))
// clean runtime directory
if err := os.RemoveAll(_const.GetRuntimeDir()); err != nil {
klog.ErrorS(err, "clean runtime directory error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline), "runtime_dir", _const.GetRuntimeDir())
}
}
-
if m.Pipeline.Spec.JobSpec.Schedule != "" { // if pipeline is cornJob. it's always running.
m.Pipeline.Status.Phase = kubekeyv1.PipelinePhaseRunning
}
@@ -59,9 +63,8 @@ func (m *commandManager) Run(ctx context.Context) error {
}
}()
- klog.Infof("[Pipeline %s] start task controller", ctrlclient.ObjectKeyFromObject(m.Pipeline))
m.Pipeline.Status.Phase = kubekeyv1.PipelinePhaseSucceed
- if err := executor.NewTaskExecutor(m.Client, m.Pipeline).Exec(ctx); err != nil {
+ if err := executor.NewTaskExecutor(m.Client, m.Pipeline, m.logOutput).Exec(ctx); err != nil {
klog.ErrorS(err, "executor tasks error", "pipeline", ctrlclient.ObjectKeyFromObject(m.Pipeline))
m.Pipeline.Status.Phase = kubekeyv1.PipelinePhaseFailed
m.Pipeline.Status.Reason = err.Error()
diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go
index 36bc526ed..6dc66aeb3 100644
--- a/pkg/manager/manager.go
+++ b/pkg/manager/manager.go
@@ -18,6 +18,7 @@ package manager
import (
"context"
+ "os"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
@@ -44,6 +45,7 @@ func NewCommandManager(o CommandManagerOptions) Manager {
Config: o.Config,
Inventory: o.Inventory,
Client: o.Client,
+ logOutput: os.Stdout,
}
}
diff --git a/pkg/modules/assert_test.go b/pkg/modules/assert_test.go
index 109b3ca5b..51c579564 100644
--- a/pkg/modules/assert_test.go
+++ b/pkg/modules/assert_test.go
@@ -46,7 +46,7 @@ func TestAssert(t *testing.T) {
opt: ExecOptions{
Host: "local",
Args: runtime.RawExtension{
- Raw: []byte(`{"that": ["true", "testvalue==\"a\""]}`),
+ Raw: []byte(`{"that": ["true", "eq .testvalue \"a\""]}`),
},
Variable: &testVariable{
value: map[string]any{
@@ -61,7 +61,7 @@ func TestAssert(t *testing.T) {
opt: ExecOptions{
Host: "local",
Args: runtime.RawExtension{
- Raw: []byte(`{"that": ["true", "k1==\"v1\""], "success_msg": "success {{k2}}"}`),
+ Raw: []byte(`{"that": ["true", "eq .k1 \"v1\""], "success_msg": "success {{ .k2 }}"}`),
},
Variable: &testVariable{
value: map[string]any{
@@ -77,7 +77,7 @@ func TestAssert(t *testing.T) {
opt: ExecOptions{
Host: "local",
Args: runtime.RawExtension{
- Raw: []byte(`{"that": ["true", "k1==\"v2\""]}`),
+ Raw: []byte(`{"that": ["true", "eq .k1 \"v2\""]}`),
},
Variable: &testVariable{
value: map[string]any{
@@ -94,7 +94,7 @@ func TestAssert(t *testing.T) {
opt: ExecOptions{
Host: "local",
Args: runtime.RawExtension{
- Raw: []byte(`{"that": ["true", "k1==\"v2\""], "fail_msg": "failed {{k2}}"}`),
+ Raw: []byte(`{"that": ["true", "eq .k1 \"v2\""], "fail_msg": "failed {{ .k2 }}"}`),
},
Variable: &testVariable{
value: map[string]any{
diff --git a/pkg/modules/debug_test.go b/pkg/modules/debug_test.go
index a9e0d25af..294a85478 100644
--- a/pkg/modules/debug_test.go
+++ b/pkg/modules/debug_test.go
@@ -45,7 +45,7 @@ func TestDebug(t *testing.T) {
name: "var value",
opt: ExecOptions{
Args: runtime.RawExtension{
- Raw: []byte(`{"var": "k"}`),
+ Raw: []byte(`{"var": ".k"}`),
},
Host: "local",
Variable: &testVariable{
@@ -60,7 +60,7 @@ func TestDebug(t *testing.T) {
name: "msg value",
opt: ExecOptions{
Args: runtime.RawExtension{
- Raw: []byte(`{"msg": "{{ k }}"}`),
+ Raw: []byte(`{"msg": "{{ .k }}"}`),
},
Host: "local",
Variable: &testVariable{
diff --git a/pkg/modules/gen_cert_test.go b/pkg/modules/gen_cert_test.go
index 8359e18a9..eb27859d7 100644
--- a/pkg/modules/gen_cert_test.go
+++ b/pkg/modules/gen_cert_test.go
@@ -37,15 +37,19 @@ func TestModuleGenCert(t *testing.T) {
opt: ExecOptions{
Args: runtime.RawExtension{
Raw: []byte(`{
-"policy": "IfNotPresent",
-"sans": ["localhost"],
-"cn": "test",
+"policy": "{{- .policy -}}\n",
+"sans": "[\"localhost\"]",
+"cn": "test",
"out_key": "./test_gen_cert/test-key.pem",
"out_cert": "./test_gen_cert/test-crt.pem"
- }`),
+}`),
+ },
+ Host: "local",
+ Variable: &testVariable{
+ value: map[string]any{
+ "policy": "IfNotPresent",
+ },
},
- Host: "local",
- Variable: &testVariable{},
},
exceptStdout: "success",
},
diff --git a/pkg/modules/image.go b/pkg/modules/image.go
index b188b7f5c..56d48dbc0 100644
--- a/pkg/modules/image.go
+++ b/pkg/modules/image.go
@@ -29,6 +29,7 @@ import (
"strings"
imagev1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "k8s.io/klog/v2"
"oras.land/oras-go/v2"
"oras.land/oras-go/v2/registry"
"oras.land/oras-go/v2/registry/remote"
@@ -111,6 +112,11 @@ func ModuleImage(ctx context.Context, options ExecOptions) (stdout string, stder
}
func findLocalImageManifests(localDir string) ([]string, error) {
+ if _, err := os.Stat(localDir); err != nil {
+ // images is not exist, skip
+ klog.V(4).ErrorS(err, "failed to stat local directory")
+ return nil, nil
+ }
var manifests []string
if err := filepath.WalkDir(localDir, func(path string, d fs.DirEntry, err error) error {
if err != nil {
diff --git a/pkg/modules/module.go b/pkg/modules/module.go
index 469cd8c64..d2dee9211 100644
--- a/pkg/modules/module.go
+++ b/pkg/modules/module.go
@@ -27,6 +27,7 @@ import (
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
kubekeyv1alpha1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1alpha1"
"github.com/kubesphere/kubekey/v4/pkg/connector"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
"github.com/kubesphere/kubekey/v4/pkg/variable"
)
@@ -93,7 +94,13 @@ func getConnector(ctx context.Context, host string, data map[string]any) (connec
if v := ctx.Value(ConnKey); v != nil {
conn = v.(connector.Connector)
} else {
- conn, err = connector.NewConnector(host, data)
+ connectorVars := make(map[string]any)
+ if c1, ok := data[_const.VariableConnector]; ok {
+ if c2, ok := c1.(map[string]any); ok {
+ connectorVars = c2
+ }
+ }
+ conn, err = connector.NewConnector(host, connectorVars)
if err != nil {
return conn, err
}
diff --git a/pkg/modules/template.go b/pkg/modules/template.go
index c89f2df5e..d0f15ae5e 100644
--- a/pkg/modules/template.go
+++ b/pkg/modules/template.go
@@ -84,7 +84,7 @@ func ModuleTemplate(ctx context.Context, options ExecOptions) (string, string) {
if err != nil {
return fmt.Errorf("read file error: %w", err)
}
- result, err := tmpl.ParseFile(ha.(map[string]any), data)
+ result, err := tmpl.ParseString(ha.(map[string]any), string(data))
if err != nil {
return fmt.Errorf("parse file error: %w", err)
}
@@ -109,7 +109,7 @@ func ModuleTemplate(ctx context.Context, options ExecOptions) (string, string) {
if err != nil {
return "", fmt.Sprintf("read file error: %v", err)
}
- result, err := tmpl.ParseFile(ha.(map[string]any), data)
+ result, err := tmpl.ParseString(ha.(map[string]any), string(data))
if err != nil {
return "", fmt.Sprintf("parse file error: %v", err)
}
@@ -155,7 +155,7 @@ func ModuleTemplate(ctx context.Context, options ExecOptions) (string, string) {
if err != nil {
return fmt.Errorf("read file error: %w", err)
}
- result, err := tmpl.ParseFile(ha.(map[string]any), data)
+ result, err := tmpl.ParseString(ha.(map[string]any), string(data))
if err != nil {
return fmt.Errorf("parse file error: %w", err)
}
@@ -179,7 +179,7 @@ func ModuleTemplate(ctx context.Context, options ExecOptions) (string, string) {
if err != nil {
return "", fmt.Sprintf("read file error: %v", err)
}
- result, err := tmpl.ParseFile(ha.(map[string]any), data)
+ result, err := tmpl.ParseString(ha.(map[string]any), string(data))
if err != nil {
return "", fmt.Sprintf("parse file error: %v", err)
}
diff --git a/pkg/modules/template_test.go b/pkg/modules/template_test.go
index 4f1f9b6c8..9b7d1d320 100644
--- a/pkg/modules/template_test.go
+++ b/pkg/modules/template_test.go
@@ -56,10 +56,16 @@ func TestTemplate(t *testing.T) {
name: "dest is empty",
opt: ExecOptions{
Args: runtime.RawExtension{
- Raw: []byte(fmt.Sprintf(`{"src": %s}`, absPath)),
+ Raw: []byte(`{
+"src": "{{ .absPath }}"
+}`),
+ },
+ Host: "local",
+ Variable: &testVariable{
+ value: map[string]any{
+ "absPath": absPath,
+ },
},
- Host: "local",
- Variable: &testVariable{},
},
ctxFunc: context.Background,
exceptStderr: "\"dest\" should be string",
diff --git a/pkg/project/helper.go b/pkg/project/helper.go
index 7a2e01440..c376133ee 100644
--- a/pkg/project/helper.go
+++ b/pkg/project/helper.go
@@ -67,35 +67,35 @@ func loadPlaybook(baseFS fs.FS, pbPath string, pb *kkcorev1.Playbook) error {
if p.ImportPlaybook != "" {
importPlaybook := getPlaybookBaseFromPlaybook(baseFS, pbPath, p.ImportPlaybook)
if importPlaybook == "" {
- return fmt.Errorf("cannot found import playbook %s", p.ImportPlaybook)
+ return fmt.Errorf("import playbook %s failed", p.ImportPlaybook)
}
if err := loadPlaybook(baseFS, importPlaybook, pb); err != nil {
- return err
+ return fmt.Errorf("load playbook failed: %w", err)
}
}
// load var_files (optional)
for _, file := range p.VarsFiles {
if _, err := fs.Stat(baseFS, filepath.Join(filepath.Dir(pbPath), file)); err != nil {
- return fmt.Errorf("cannot stat variables file %s", file)
+ return fmt.Errorf("file %s not exists", file)
}
mainData, err := fs.ReadFile(baseFS, filepath.Join(filepath.Dir(pbPath), file))
if err != nil {
- return fmt.Errorf("cannot read variables file %s", filepath.Join(filepath.Dir(pbPath), file))
+ return fmt.Errorf("read file %s failed: %w", filepath.Join(filepath.Dir(pbPath), file), err)
}
var vars map[string]any
var node yaml.Node // marshal file on defined order
if err := yaml.Unmarshal(mainData, &node); err != nil {
- return fmt.Errorf("cannot unmarshal variables file %s", filepath.Join(filepath.Dir(pbPath), file))
+ return fmt.Errorf("unmarshal yaml file: %s failed: %w", filepath.Join(filepath.Dir(pbPath), file), err)
}
if err := node.Decode(&vars); err != nil {
- return fmt.Errorf("cannot unmarshal variables file %s", filepath.Join(filepath.Dir(pbPath), file))
+ return fmt.Errorf("unmarshal yaml file: %s failed: %w", filepath.Join(filepath.Dir(pbPath), file), err)
}
p.Vars, err = combineMaps(p.Vars, vars)
if err != nil {
- return fmt.Errorf("cannot combine variables file %s", filepath.Join(filepath.Dir(pbPath), file))
+ return fmt.Errorf("combine maps file:%s failed: %w", filepath.Join(filepath.Dir(pbPath), file), err)
}
}
@@ -112,11 +112,11 @@ func loadPlaybook(baseFS fs.FS, pbPath string, pb *kkcorev1.Playbook) error {
rdata, err := fs.ReadFile(baseFS, mainTask)
if err != nil {
- return fmt.Errorf("cannot read role %s", r.Role)
+ return fmt.Errorf("read file %s failed: %w", mainTask, err)
}
var blocks []kkcorev1.Block
if err := yaml.Unmarshal(rdata, &blocks); err != nil {
- return fmt.Errorf("cannot unmarshal role %s", r.Role)
+ return fmt.Errorf("unmarshal yaml file: %s failed: %w", filepath.Join(filepath.Dir(pbPath), mainTask), err)
}
p.Roles[i].Block = blocks
}
@@ -143,11 +143,11 @@ func convertRoles(baseFS fs.FS, pbPath string, pb *kkcorev1.Playbook) error {
rdata, err := fs.ReadFile(baseFS, mainTask)
if err != nil {
- return fmt.Errorf("cannot read role %s", r.Role)
+ return fmt.Errorf("read file %s failed: %w", mainTask, err)
}
var blocks []kkcorev1.Block
if err := yaml.Unmarshal(rdata, &blocks); err != nil {
- return fmt.Errorf("cannot unmarshal role %s", r.Role)
+ return fmt.Errorf("unmarshal yaml file: %s failed: %w", filepath.Join(filepath.Dir(pbPath), mainTask), err)
}
p.Roles[i].Block = blocks
@@ -156,21 +156,21 @@ func convertRoles(baseFS fs.FS, pbPath string, pb *kkcorev1.Playbook) error {
if mainDefault != "" {
mainData, err := fs.ReadFile(baseFS, mainDefault)
if err != nil {
- return fmt.Errorf("cannot read defaults variable for Role %s", r.Role)
+ return fmt.Errorf("read defaults variable file %s failed: %w", mainDefault, err)
}
var vars map[string]any
var node yaml.Node // marshal file on defined order
if err := yaml.Unmarshal(mainData, &node); err != nil {
- return fmt.Errorf("cannot unmarshal defaults variable for Role %s", r.Role)
+ return fmt.Errorf("unmarshal defaults variable yaml file: %s failed: %w", mainDefault, err)
}
if err := node.Decode(&vars); err != nil {
- return fmt.Errorf("cannot unmarshal defaults variable for Role %s", r.Role)
+ return fmt.Errorf("decode defaults variable yaml file: %s failed: %w", mainDefault, err)
}
p.Roles[i].Vars, err = combineMaps(p.Roles[i].Vars, vars)
if err != nil {
- return fmt.Errorf("cannot combine defaults variable for Role %s", r.Role)
+ return fmt.Errorf("combine defaults variable failed: %w", err)
}
}
}
@@ -184,19 +184,19 @@ func convertIncludeTasks(baseFS fs.FS, pbPath string, pb *kkcorev1.Playbook) err
var pbBase = filepath.Dir(filepath.Dir(pbPath))
for _, play := range pb.Play {
if err := fileToBlock(baseFS, pbBase, play.PreTasks); err != nil {
- return fmt.Errorf("cannot convert pre_tasks file %s", pbPath)
+ return fmt.Errorf("convert pre_tasks file %s failed: %w", pbPath, err)
}
if err := fileToBlock(baseFS, pbBase, play.Tasks); err != nil {
- return fmt.Errorf("cannot convert tasks file %s", pbPath)
+ return fmt.Errorf("convert tasks file %s failed: %w", pbPath, err)
}
if err := fileToBlock(baseFS, pbBase, play.PostTasks); err != nil {
- return fmt.Errorf("cannot convert post_tasks file %s", pbPath)
+ return fmt.Errorf("convert post_tasks file %s failed: %w", pbPath, err)
}
for _, r := range play.Roles {
roleBase := getRoleBaseFromPlaybook(baseFS, pbPath, r.Role)
if err := fileToBlock(baseFS, filepath.Join(roleBase, _const.ProjectRolesTasksDir), r.Block); err != nil {
- return fmt.Errorf("cannot convert role %s", r.Role)
+ return fmt.Errorf("convert role %s failed: %w", filepath.Join(pbPath, r.Role), err)
}
}
}
@@ -208,23 +208,23 @@ func fileToBlock(baseFS fs.FS, baseDir string, blocks []kkcorev1.Block) error {
if b.IncludeTasks != "" {
data, err := fs.ReadFile(baseFS, filepath.Join(baseDir, b.IncludeTasks))
if err != nil {
- return fmt.Errorf("cannot read includeTask file %s", filepath.Join(baseDir, b.IncludeTasks))
+ return fmt.Errorf("read includeTask file %s failed: %w", filepath.Join(baseDir, b.IncludeTasks), err)
}
var bs []kkcorev1.Block
if err := yaml.Unmarshal(data, &bs); err != nil {
- return fmt.Errorf("cannot unmarshal includeTask file %s", filepath.Join(baseDir, b.IncludeTasks))
+ return fmt.Errorf("unmarshal includeTask file %s failed: %w", filepath.Join(baseDir, b.IncludeTasks), err)
}
b.Block = bs
blocks[i] = b
}
if err := fileToBlock(baseFS, baseDir, b.Block); err != nil {
- return fmt.Errorf("cannot convert block file %s", filepath.Join(baseDir, b.IncludeTasks))
+ return fmt.Errorf("convert block file %s failed: %w", filepath.Join(baseDir, b.IncludeTasks), err)
}
if err := fileToBlock(baseFS, baseDir, b.Rescue); err != nil {
- return fmt.Errorf("cannot convert rescue file %s", filepath.Join(baseDir, b.IncludeTasks))
+ return fmt.Errorf("convert rescue file %s failed: %w", filepath.Join(baseDir, b.IncludeTasks), err)
}
if err := fileToBlock(baseFS, baseDir, b.Always); err != nil {
- return fmt.Errorf("cannot convert always file %s", filepath.Join(baseDir, b.IncludeTasks))
+ return fmt.Errorf("convert always file %s failed: %w", filepath.Join(baseDir, b.IncludeTasks), err)
}
}
return nil
diff --git a/pkg/variable/helper.go b/pkg/variable/helper.go
index 7fc60c806..56e18b42d 100644
--- a/pkg/variable/helper.go
+++ b/pkg/variable/helper.go
@@ -17,18 +17,19 @@ limitations under the License.
package variable
import (
- "encoding/json"
"fmt"
+ "net"
"reflect"
- "regexp"
+ "slices"
"strconv"
"strings"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/json"
"k8s.io/klog/v2"
- "sigs.k8s.io/yaml"
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
+ _const "github.com/kubesphere/kubekey/v4/pkg/const"
"github.com/kubesphere/kubekey/v4/pkg/converter/tmpl"
)
@@ -62,17 +63,22 @@ func combineVariables(v1, v2 map[string]any) map[string]any {
func convertGroup(inv kubekeyv1.Inventory) map[string]any {
groups := make(map[string]any)
- all := []string{"localhost"} // set default host
+ all := make([]string, 0)
for hn := range inv.Spec.Hosts {
all = append(all, hn)
}
- groups["all"] = all
+ if !slices.Contains(all, _const.VariableLocalHost) { // set default localhost
+ all = append(all, _const.VariableLocalHost)
+ }
+ groups[_const.VariableGroupsAll] = all
for gn := range inv.Spec.Groups {
groups[gn] = hostsInGroup(inv, gn)
}
return groups
}
+// hostsInGroup get a host_name slice in a given group
+// if the given group contains other group. convert other group to host_name slice.
func hostsInGroup(inv kubekeyv1.Inventory, groupName string) []string {
if v, ok := inv.Spec.Groups[groupName]; ok {
var hosts []string
@@ -108,15 +114,94 @@ func mergeSlice(g1, g2 []string) []string {
return mg
}
+// parseVariable parse all string values to the actual value.
+func parseVariable(v any, parseTmplFunc func(string) (string, error)) error {
+ switch reflect.ValueOf(v).Kind() {
+ case reflect.Map:
+ for _, kv := range reflect.ValueOf(v).MapKeys() {
+ val := reflect.ValueOf(v).MapIndex(kv)
+ if vv, ok := val.Interface().(string); ok {
+ if tmpl.IsTmplSyntax(vv) {
+ newValue, err := parseTmplFunc(vv)
+ if err != nil {
+ return err
+ }
+ switch {
+ case strings.EqualFold(newValue, "TRUE"):
+ reflect.ValueOf(v).SetMapIndex(kv, reflect.ValueOf(true))
+ case strings.EqualFold(newValue, "FALSE"):
+ reflect.ValueOf(v).SetMapIndex(kv, reflect.ValueOf(false))
+ default:
+ reflect.ValueOf(v).SetMapIndex(kv, reflect.ValueOf(newValue))
+ }
+ }
+ } else {
+ if err := parseVariable(val.Interface(), parseTmplFunc); err != nil {
+ return err
+ }
+ }
+ }
+ case reflect.Slice, reflect.Array:
+ for i := 0; i < reflect.ValueOf(v).Len(); i++ {
+ val := reflect.ValueOf(v).Index(i)
+ if vv, ok := val.Interface().(string); ok {
+ if tmpl.IsTmplSyntax(vv) {
+ newValue, err := parseTmplFunc(vv)
+ if err != nil {
+ return err
+ }
+ switch {
+ case strings.EqualFold(newValue, "TRUE"):
+
+ val.Set(reflect.ValueOf(true))
+ case strings.EqualFold(newValue, "FALSE"):
+ val.Set(reflect.ValueOf(false))
+ default:
+ val.Set(reflect.ValueOf(newValue))
+ }
+ }
+ } else {
+ if err := parseVariable(val.Interface(), parseTmplFunc); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+// getLocalIP get the ipv4 or ipv6 for localhost machine
+func getLocalIP(ipType string) string {
+ addrs, err := net.InterfaceAddrs()
+ if err != nil {
+ klog.ErrorS(err, "get network address error")
+ }
+ for _, addr := range addrs {
+ if ipNet, ok := addr.(*net.IPNet); ok && !ipNet.IP.IsLoopback() {
+ if ipType == _const.VariableIPv4 && ipNet.IP.To4() != nil {
+ return ipNet.IP.String()
+ }
+ if ipType == _const.VariableIPv6 && ipNet.IP.To16() != nil {
+ return ipNet.IP.String()
+ }
+ }
+ }
+ klog.V(4).Infof("connot get local %s address", ipType)
+ return ""
+}
+
// StringVar get string value by key
func StringVar(d map[string]any, args map[string]any, key string) (string, error) {
val, ok := args[key]
if !ok {
+ klog.V(4).ErrorS(nil, "cannot find variable", "key", key)
return "", fmt.Errorf("cannot find variable \"%s\"", key)
}
-
+ // convert to string
sv, ok := val.(string)
if !ok {
+ klog.V(4).ErrorS(nil, "variable is not string", "key", key)
return "", fmt.Errorf("variable \"%s\" is not string", key)
}
return tmpl.ParseString(d, sv)
@@ -126,6 +211,7 @@ func StringVar(d map[string]any, args map[string]any, key string) (string, error
func StringSliceVar(d map[string]any, vars map[string]any, key string) ([]string, error) {
val, ok := vars[key]
if !ok {
+ klog.V(4).ErrorS(nil, "cannot find variable", "key", key)
return nil, fmt.Errorf("cannot find variable \"%s\"", key)
}
switch valv := val.(type) {
@@ -147,34 +233,17 @@ func StringSliceVar(d map[string]any, vars map[string]any, key string) ([]string
case string:
as, err := tmpl.ParseString(d, valv)
if err != nil {
+ klog.V(4).ErrorS(err, "parse variable error", "key", key)
return nil, err
}
var ss []string
- switch {
- case regexp.MustCompile(`^<\[\](.*?) Value>$`).MatchString(as):
- // in pongo2 cannot get slice value. add extension filter value.
- var input = val.(string)
- // try to escape string
- if ns, err := strconv.Unquote(valv); err == nil {
- input = ns
- }
- vv := GetValue(d, input)
- if _, ok := vv.([]any); ok {
- ss = make([]string, len(vv.([]any)))
- for i, a := range vv.([]any) {
- ss[i] = a.(string)
- }
- }
- default:
- // value is simple string
- if err := json.Unmarshal([]byte(as), &ss); err != nil {
- // if is not json format. only return a value contains this
- return []string{as}, nil //nolint:nilerr
- }
+ if err := json.Unmarshal([]byte(as), &ss); err == nil {
+ return ss, nil
}
- return ss, nil
+ return []string{as}, nil
default:
- return nil, fmt.Errorf("unsupport variable \"%s\" type", key)
+ klog.V(4).ErrorS(nil, "unsupported variable type", "key", key)
+ return nil, fmt.Errorf("unsupported variable \"%s\" type", key)
}
}
@@ -182,31 +251,39 @@ func StringSliceVar(d map[string]any, vars map[string]any, key string) ([]string
func IntVar(d map[string]any, vars map[string]any, key string) (int, error) {
val, ok := vars[key]
if !ok {
+ klog.V(4).ErrorS(nil, "cannot find variable", "key", key)
return 0, fmt.Errorf("cannot find variable \"%s\"", key)
}
- // default convert to float64
- switch valv := val.(type) {
- case float64:
- return int(valv), nil
- case string:
- vs, err := tmpl.ParseString(d, valv)
+ // default convert to int
+ v := reflect.ValueOf(val)
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return int(v.Int()), nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return int(v.Uint()), nil
+ case reflect.Float32, reflect.Float64:
+ return int(v.Float()), nil
+ case reflect.String:
+ vs, err := tmpl.ParseString(d, v.String())
if err != nil {
+ klog.V(4).ErrorS(err, "parse string variable error", "key", key)
return 0, err
}
return strconv.Atoi(vs)
default:
- return 0, fmt.Errorf("unsupport variable \"%s\" type", key)
+ klog.V(4).ErrorS(nil, "unsupported variable type", "key", key)
+ return 0, fmt.Errorf("unsupported variable \"%s\" type", key)
}
}
// Extension2Variables convert extension to variables
func Extension2Variables(ext runtime.RawExtension) map[string]any {
if len(ext.Raw) == 0 {
- return nil
+ return make(map[string]any)
}
var data map[string]any
- if err := yaml.Unmarshal(ext.Raw, &data); err != nil {
+ if err := json.Unmarshal(ext.Raw, &data); err != nil {
klog.V(4).ErrorS(err, "failed to unmarshal extension to variables")
}
return data
@@ -219,33 +296,19 @@ func Extension2Slice(d map[string]any, ext runtime.RawExtension) []any {
}
var data []any
- if err := yaml.Unmarshal(ext.Raw, &data); err == nil {
+ // try parse yaml string which defined by single value or multi value
+ if err := json.Unmarshal(ext.Raw, &data); err == nil {
return data
}
-
+ // try converter template string
val, err := Extension2String(d, ext)
if err != nil {
klog.ErrorS(err, "extension2string error", "input", string(ext.Raw))
}
- // parse value by pongo2. if
- switch {
- case regexp.MustCompile(`^<\[\](.*?) Value>$`).MatchString(val):
- // in pongo2 cannot get slice value. add extension filter value.
- var input = string(ext.Raw)
- // try to escape string
- if ns, err := strconv.Unquote(string(ext.Raw)); err == nil {
- input = ns
- }
- vv := GetValue(d, input)
- if _, ok := vv.([]any); ok {
- return vv.([]any)
- }
- default:
- // value is simple string
- return []any{val}
+ if err := json.Unmarshal([]byte(val), &data); err == nil {
+ return data
}
-
- return data
+ return []any{val}
}
func Extension2String(d map[string]any, ext runtime.RawExtension) (string, error) {
@@ -265,75 +328,3 @@ func Extension2String(d map[string]any, ext runtime.RawExtension) (string, error
return result, nil
}
-
-// GetValue from VariableData by key path
-func GetValue(value map[string]any, keys string) any {
- switch {
- case strings.HasPrefix(keys, "{{") && strings.HasSuffix(keys, "}}"):
- // the keys like {{ a.b.c }}. return value[a][b][c]
- var result any = value
- for _, k := range strings.Split(strings.TrimSpace(strings.TrimSuffix(strings.TrimPrefix(keys, "{{"), "}}")), ".") {
- result = result.(map[string]any)[k]
- }
- return result
- default:
- return nil
- }
-}
-
-// parseVariable parse all string values to the actual value.
-func parseVariable(v any, parseTmplFunc func(string) (string, error)) error {
- switch reflect.ValueOf(v).Kind() {
- case reflect.Map:
- for _, kv := range reflect.ValueOf(v).MapKeys() {
- val := reflect.ValueOf(v).MapIndex(kv)
- if vv, ok := val.Interface().(string); ok {
- if tmpl.IsTmplSyntax(vv) {
- newValue, err := parseTmplFunc(vv)
- if err != nil {
- return err
- }
- switch {
- case strings.EqualFold(newValue, "TRUE"):
- reflect.ValueOf(v).SetMapIndex(kv, reflect.ValueOf(true))
- case strings.EqualFold(newValue, "FALSE"):
- reflect.ValueOf(v).SetMapIndex(kv, reflect.ValueOf(false))
- default:
- reflect.ValueOf(v).SetMapIndex(kv, reflect.ValueOf(newValue))
- }
- }
- } else {
- if err := parseVariable(val.Interface(), parseTmplFunc); err != nil {
- return err
- }
- }
- }
- case reflect.Slice, reflect.Array:
- for i := 0; i < reflect.ValueOf(v).Len(); i++ {
- val := reflect.ValueOf(v).Index(i)
- if vv, ok := val.Interface().(string); ok {
- if tmpl.IsTmplSyntax(vv) {
- newValue, err := parseTmplFunc(vv)
- if err != nil {
- return err
- }
- switch {
- case strings.EqualFold(newValue, "TRUE"):
-
- val.Set(reflect.ValueOf(true))
- case strings.EqualFold(newValue, "FALSE"):
- val.Set(reflect.ValueOf(false))
- default:
- val.Set(reflect.ValueOf(newValue))
- }
- }
- } else {
- if err := parseVariable(val.Interface(), parseTmplFunc); err != nil {
- return err
- }
- }
- }
- }
-
- return nil
-}
diff --git a/pkg/variable/helper_test.go b/pkg/variable/helper_test.go
index 770d90810..d0f2cc14e 100644
--- a/pkg/variable/helper_test.go
+++ b/pkg/variable/helper_test.go
@@ -194,7 +194,7 @@ func TestParseVariable(t *testing.T) {
{
name: "parse string",
data: map[string]any{
- "a": "{{ a }}",
+ "a": "{{ .a }}",
},
base: map[string]any{
"a": "b",
@@ -206,7 +206,7 @@ func TestParseVariable(t *testing.T) {
{
name: "parse map",
data: map[string]any{
- "a": "{{ a.b }}",
+ "a": "{{ .a.b }}",
},
base: map[string]any{
"a": map[string]any{
@@ -220,7 +220,7 @@ func TestParseVariable(t *testing.T) {
{
name: "parse slice",
data: map[string]any{
- "a": []string{"{{ b }}"},
+ "a": []string{"{{ .b }}"},
},
base: map[string]any{
"b": "c",
@@ -234,7 +234,7 @@ func TestParseVariable(t *testing.T) {
data: map[string]any{
"a": []map[string]any{
{
- "a1": []any{"{{ b }}"},
+ "a1": []any{"{{ .b }}"},
},
},
},
@@ -252,7 +252,7 @@ func TestParseVariable(t *testing.T) {
{
name: "parse slice with bool value",
data: map[string]any{
- "a": []any{"{{ b }}"},
+ "a": []any{"{{ .b }}"},
},
base: map[string]any{
"b": "true",
@@ -264,7 +264,7 @@ func TestParseVariable(t *testing.T) {
{
name: "parse map with bool value",
data: map[string]any{
- "a": "{{ b }}",
+ "a": "{{ .b }}",
},
base: map[string]any{
"b": "true",
diff --git a/pkg/variable/internal.go b/pkg/variable/internal.go
index 1f831b497..be2e15f23 100644
--- a/pkg/variable/internal.go
+++ b/pkg/variable/internal.go
@@ -21,13 +21,13 @@ import (
"fmt"
"reflect"
"regexp"
+ "slices"
"strconv"
"strings"
"sync"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/klog/v2"
- "k8s.io/utils/strings/slices"
kubekeyv1 "github.com/kubesphere/kubekey/v4/pkg/apis/kubekey/v1"
_const "github.com/kubesphere/kubekey/v4/pkg/const"
@@ -75,15 +75,23 @@ func (v value) getParameterVariable() map[string]any {
hostVars := Extension2Variables(v.Inventory.Spec.Hosts[hostname])
// set inventory_name to hostVars
// "inventory_name" is the hostname configured in the inventory file.
- hostVars = combineVariables(hostVars, map[string]any{
- _const.VariableHostName: hostname,
- })
+ hostVars[_const.VariableHostName] = hostname
// merge group vars to host vars
for _, gv := range v.Inventory.Spec.Groups {
if slices.Contains(gv.Hosts, hostname) {
hostVars = combineVariables(hostVars, Extension2Variables(gv.Vars))
}
}
+ // set default localhost
+ if hostname == _const.VariableLocalHost {
+ if _, ok := hostVars[_const.VariableIPv4]; !ok {
+ hostVars[_const.VariableIPv4] = getLocalIP(_const.VariableIPv4)
+ }
+ if _, ok := hostVars[_const.VariableIPv6]; !ok {
+ hostVars[_const.VariableIPv6] = getLocalIP(_const.VariableIPv6)
+ }
+ }
+
// merge inventory vars to host vars
hostVars = combineVariables(hostVars, Extension2Variables(v.Inventory.Spec.Vars))
// merge config vars to host vars
@@ -307,6 +315,7 @@ var MergeAllRuntimeVariable = func(hostName string, vd map[string]any) MergeFunc
}
}
+// GetAllVariable get all variable for a given host
var GetAllVariable = func(hostName string) GetFunc {
return func(v Variable) (any, error) {
if _, ok := v.(*variable); !ok {
@@ -326,3 +335,18 @@ var GetAllVariable = func(hostName string) GetFunc {
return result, nil
}
}
+
+// GetHostMaxLength get the max length for all hosts
+var GetHostMaxLength = func() GetFunc {
+ return func(v Variable) (any, error) {
+ if _, ok := v.(*variable); !ok {
+ return nil, fmt.Errorf("variable type error")
+ }
+ data := v.(*variable).value
+ var hostNameMaxLen int
+ for k := range data.Hosts {
+ hostNameMaxLen = max(len(k), hostNameMaxLen)
+ }
+ return hostNameMaxLen, nil
+ }
+}
diff --git a/pkg/variable/internal_test.go b/pkg/variable/internal_test.go
index 2c6263c82..31bc909b1 100644
--- a/pkg/variable/internal_test.go
+++ b/pkg/variable/internal_test.go
@@ -36,49 +36,44 @@ func TestGetAllVariable(t *testing.T) {
value: value{
Config: kubekeyv1.Config{
Spec: runtime.RawExtension{
- Raw: []byte(`
-artifact:
- images:
- - abc
-`),
- },
+ Raw: []byte(`{
+"artifact": {
+ "images": [ "abc" ]
+}
+}`)},
},
- Inventory: kubekeyv1.Inventory{},
- Hosts: map[string]host{
- "test": {
- RuntimeVars: map[string]any{
- "artifact": map[string]any{
- "k1": "v1",
- "k2": 2,
- "k3": true,
- "k4": map[string]any{
- "k41": "v41",
- },
- },
+ Inventory: kubekeyv1.Inventory{
+ Spec: kubekeyv1.InventorySpec{
+ Hosts: map[string]runtime.RawExtension{
+ "localhost": {Raw: []byte(`{
+"internal_ipv4": "127.0.0.1",
+"internal_ipv6": "::1"
+}`)},
},
},
},
+ Hosts: map[string]host{
+ "localhost": {},
+ },
},
except: map[string]any{
+ "internal_ipv4": "127.0.0.1",
+ "internal_ipv6": "::1",
"artifact": map[string]any{
- "k1": "v1",
- "k2": 2,
- "k3": true,
- "k4": map[string]any{
- "k41": "v41",
- },
"images": []any{"abc"},
},
"groups": map[string]interface{}{"all": []string{"localhost"}},
"inventory_hosts": map[string]interface{}{
- "test": map[string]interface{}{
+ "localhost": map[string]interface{}{
+ "internal_ipv4": "127.0.0.1",
+ "internal_ipv6": "::1",
"artifact": map[string]interface{}{
"images": []interface{}{"abc"},
},
- "inventory_name": "test",
+ "inventory_name": "localhost",
},
},
- "inventory_name": "test",
+ "inventory_name": "localhost",
},
},
}
@@ -86,7 +81,7 @@ artifact:
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
v := variable{value: &tc.value}
- result, err := v.Get(GetAllVariable("test"))
+ result, err := v.Get(GetAllVariable("localhost"))
if err != nil {
t.Fatal(err)
}
diff --git a/plugins/roles/etcd/backup/tasks/main.yaml b/plugins/roles/etcd/backup/tasks/main.yaml
index 273fe6b49..a5a3bd714 100644
--- a/plugins/roles/etcd/backup/tasks/main.yaml
+++ b/plugins/roles/etcd/backup/tasks/main.yaml
@@ -7,9 +7,9 @@
export $(cat /etc/etcd.env | grep ETCDCTL_CACERT)
export $(cat /etc/etcd.env | grep ETCDCTL_CERT)
export $(cat /etc/etcd.env | grep ETCDCTL_KEY)
- ETCDCTL_API=3 etcdctl --endpoints=https://{{ internal_ipv4 }}:2379 snapshot save /tmp/kubekey/etcd/snapshot.db
+ ETCDCTL_API=3 etcdctl --endpoints=https://{{ .internal_ipv4 }}:2379 snapshot save /tmp/kubekey/etcd/snapshot.db
- name: Fetch backup to local
fetch:
src: /tmp/kubekey/etcd/snapshot.db
- dest: "{{ work_dir }}/kubekey/etcd/snapshot.db"
+ dest: "{{ .work_dir }}/kubekey/etcd/snapshot.db"
diff --git a/plugins/roles/etcd/restore/tasks/main.yaml b/plugins/roles/etcd/restore/tasks/main.yaml
index 2947ce106..654b114c6 100644
--- a/plugins/roles/etcd/restore/tasks/main.yaml
+++ b/plugins/roles/etcd/restore/tasks/main.yaml
@@ -1,7 +1,7 @@
---
- name: Sync etcd snapshot to remote
copy:
- src: "{{ work_dir }}/kubekey/etcd/snapshot.db"
+ src: "{{ .work_dir }}/kubekey/etcd/snapshot.db"
dest: /tmp/kubekey/etcd/snapshot.db
- name: Stop etcd
@@ -17,11 +17,15 @@
export $(cat /etc/etcd.env | grep ETCDCTL_CERT)
export $(cat /etc/etcd.env | grep ETCDCTL_KEY)
etcdctl snapshot restore /tmp/kubekey/etcd/snapshot.db \
- --name={{ inventory_name }} --endpoints=https://{{ internal_ipv4 }}:2379 \
- --initial-cluster={% for h in groups['etcd'] %}{% set hv=inventory_hosts[h] %}{{ hv.inventory_name }}={{ hv.internal_ipv4|stringformat:"https://%s:2380" }}{% if (not forloop.Last) %},{% endif %}{% endfor %} \
- --initial-advertise-peer-urls=https://{{ internal_ipv4 }}:2380\
- --initial-cluster-token={{ etcd.env.token }} \
- --data-dir={{ etcd.env.data_dir }}
+ --name={{ .inventory_name }} --endpoints=https://{{ .internal_ipv4 }}:2379 \
+ {{- $ips := list -}}
+ {{- range $element := .groups.etcd -}}
+ {{- $ips = append $ips (printf "%s=https://%s:2380" (index .inventory_hosts $element "inventory_name") (index .inventory_hosts $element "internal_ipv4") -}}
+ {{- end -}}
+ --initial-cluster={{ $ips | join "," }} \
+ --initial-advertise-peer-urls=https://{{ .internal_ipv4 }}:2380\
+ --initial-cluster-token={{ .etcd.env.token }} \
+ --data-dir={{ .etcd.env.data_dir }}
- name: Start etcd
command: systemctl start etcd
diff --git a/plugins/roles/kubernetes/start/tasks/main.yaml b/plugins/roles/kubernetes/start/tasks/main.yaml
index 80766a4b4..498142585 100644
--- a/plugins/roles/kubernetes/start/tasks/main.yaml
+++ b/plugins/roles/kubernetes/start/tasks/main.yaml
@@ -4,11 +4,11 @@
- name: Stop docker in kubernetes
command: |
systemctl start docker
- when: cri.container_manager == 'docker'
+ when: .cri.container_manager | eq "docker"
- name: Start containerd in kubernetes
command: |
systemctl start containerd
- when: cri.container_manager == 'containerd'
+ when: .cri.container_manager | eq "containerd"
- name: Start kubelet in kubernetes
command: systemctl start kubelet
diff --git a/plugins/roles/kubernetes/stop/tasks/main.yaml b/plugins/roles/kubernetes/stop/tasks/main.yaml
index 0813d8f99..91a06755b 100644
--- a/plugins/roles/kubernetes/stop/tasks/main.yaml
+++ b/plugins/roles/kubernetes/stop/tasks/main.yaml
@@ -7,8 +7,8 @@
- name: Stop docker in kubernetes
command: |
systemctl stop docker
- when: cri.container_manager == 'docker'
+ when: .cri.container_manager | eq "docker"
- name: Stop containerd in kubernetes
command: |
systemctl stop containerd
- when: cri.container_manager == 'containerd'
+ when: .cri.container_manager | eq "containerd"
diff --git a/plugins/roles/sonobuoy/defaults/main.yaml b/plugins/roles/sonobuoy/defaults/main.yaml
index 83517f2ea..d3cf5d0ff 100644
--- a/plugins/roles/sonobuoy/defaults/main.yaml
+++ b/plugins/roles/sonobuoy/defaults/main.yaml
@@ -2,9 +2,17 @@ sonobuoy_version: v0.57.1
work_dir: /kubekey
sonobuoy:
amd64: |
- {% if (kkzone == "cn") %}https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ sonobuoy_version }}/sonobuoy_{{ sonobuoy_version|slice:'1:' }}_linux_amd64.tar.gz{% else %}https://github.com/vmware-tanzu/sonobuoy/releases/download/{{ sonobuoy_version }}/sonobuoy_{{ sonobuoy_version|slice:'1:' }}_linux_amd64.tar.gz{% endif %}
+ {{- if .kkzone | eq "cn" }}
+ https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ .sonobuoy_version }}/sonobuoy_{{ .sonobuoy_version | trimPrefix "v" }}_linux_amd64.tar.gz
+ {{- else }}
+ https://github.com/vmware-tanzu/sonobuoy/releases/download/{{ .sonobuoy_version }}/sonobuoy_{{ .sonobuoy_version | trimPrefix "v" }}_linux_amd64.tar.gz
+ {{- end }}
arm64: |
- {% if (kkzone == 'cn') %}https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ sonobuoy_version }}/sonobuoy_{{ sonobuoy_version|slice:'1:' }}_linux_arm64.tar.gz{% else %}https://github.com/vmware-tanzu/sonobuoy/releases/download/{{ sonobuoy_version }}/sonobuoy_{{ sonobuoy_version|slice:'1:' }}_linux_arm64.tar.gz{% endif %}
+ {{- if .kkzone | eq "cn" }}
+ https://kubernetes-release.pek3b.qingstor.com/etcd/release/download/{{ .sonobuoy_version }}/sonobuoy_{{ .sonobuoy_version | trimPrefix "v" }}_linux_arm64.tar.gz
+ {{- else }}
+ https://github.com/vmware-tanzu/sonobuoy/releases/download/{{ .sonobuoy_version }}/sonobuoy_{{ .sonobuoy_version | trimPrefix "v" }}_linux_arm64.tar.gz
+ {{- end }}
plugins:
systemd_logs:
enabled: false
@@ -12,7 +20,7 @@ plugins:
enabled: false
e2e_ks:
enabled: false
- image: registry.cn-beijing.aliyuncs.com/kubesphereio/conformance:{{ kube_version }}
+ image: registry.cn-beijing.aliyuncs.com/kubesphereio/conformance:{{ .kube_version }}
kube_bench:
enabled: false
image: sonobuoy/kube-bench:v0.6.17
diff --git a/plugins/roles/sonobuoy/tasks/main.yaml b/plugins/roles/sonobuoy/tasks/main.yaml
index 1937b4d90..1164fe550 100644
--- a/plugins/roles/sonobuoy/tasks/main.yaml
+++ b/plugins/roles/sonobuoy/tasks/main.yaml
@@ -2,16 +2,16 @@
- name: Generate sonobuoy plugins
template:
src: plugins/
- dest: "sonobuoy/plugins/"
+ dest: sonobuoy/plugins/
- name: Run sonobuoy
command: |
# run and waiting
sonobuoy run --wait \
- {% if (plugins.systemd_logs.enabled) %}-p systemd-logs {% endif %}\
- {% if (plugins.e2e.enabled) %}-p e2e {% endif %}\
- {% if (plugins.e2e_ks.enabled) %}-p sonobuoy/plugins/e2e-ks.yaml {% endif %}\
- {% if (plugins.kube_bench.enabled) %}-p sonobuoy/plugins/kube-bench.yaml -p sonobuoy/plugins/kube-bench-master.yaml {% endif %}\
+ {{ if .plugins.systemd_logs.enabled }}-p systemd-logs {{ end }}\
+ {{ if .plugins.e2e.enabled }}-p e2e {{ end }}\
+ {{ if .plugins.e2e_ks.enabled }}-p sonobuoy/plugins/e2e-ks.yaml {{ end }}\
+ {{ if .plugins.kube_bench.enabled }}-p sonobuoy/plugins/kube-bench.yaml -p sonobuoy/plugins/kube-bench-master.yaml {{ end }}\
- name: Retrieve result
command: |
diff --git a/plugins/roles/sonobuoy/templates/plugins/e2e-ks.yaml b/plugins/roles/sonobuoy/templates/plugins/e2e-ks.yaml
index d47670c41..27be18ec3 100644
--- a/plugins/roles/sonobuoy/templates/plugins/e2e-ks.yaml
+++ b/plugins/roles/sonobuoy/templates/plugins/e2e-ks.yaml
@@ -31,7 +31,7 @@ podSpec:
- name: RESULTS_DIR
value: /tmp/sonobuoy/results
- name: SONOBUOY_K8S_VERSION
- value: {{ kube_version }}
+ value: {{ .kube_version }}
- name: SONOBUOY_PROGRESS_PORT
value: "8099"
- name: SONOBUOY
@@ -40,7 +40,7 @@ podSpec:
value: /tmp/sonobuoy/config
- name: SONOBUOY_RESULTS_DIR
value: /tmp/sonobuoy/results
- image: {{ plugins.e2e_ks.image }}
+ image: {{ .plugins.e2e_ks.image }}
name: e2e-ks
volumeMounts:
- mountPath: /tmp/sonobuoy/results
diff --git a/plugins/roles/sonobuoy/templates/plugins/kube-bench-master.yaml b/plugins/roles/sonobuoy/templates/plugins/kube-bench-master.yaml
index 5e11e9913..e454464c6 100644
--- a/plugins/roles/sonobuoy/templates/plugins/kube-bench-master.yaml
+++ b/plugins/roles/sonobuoy/templates/plugins/kube-bench-master.yaml
@@ -46,7 +46,7 @@ spec:
- /run-kube-bench.sh; while true; do echo "Sleeping for 1h to avoid daemonset restart"; sleep 3600; done
env:
- name: KUBERNETES_VERSION
- value: {{ kube_version }}
+ value: {{ .kube_version }}
- name: TARGET_MASTER
value: "true"
- name: TARGET_NODE
@@ -57,7 +57,7 @@ spec:
value: "false"
- name: TARGET_POLICIES
value: "false"
- image: {{ plugins.kube_bench.image }}
+ image: {{ .plugins.kube_bench.image }}
name: plugin
resources: {}
volumeMounts:
diff --git a/plugins/roles/sonobuoy/templates/plugins/kube-bench.yaml b/plugins/roles/sonobuoy/templates/plugins/kube-bench.yaml
index 8ba1e0db9..9f232538f 100644
--- a/plugins/roles/sonobuoy/templates/plugins/kube-bench.yaml
+++ b/plugins/roles/sonobuoy/templates/plugins/kube-bench.yaml
@@ -46,7 +46,7 @@ spec:
- /run-kube-bench.sh; while true; do echo "Sleeping for 1h to avoid daemonset restart"; /bin/sleep 3600; done
env:
- name: KUBERNETES_VERSION
- value: {{ kube_version }}
+ value: {{ .kube_version }}
- name: TARGET_MASTER
value: "false"
- name: TARGET_NODE
@@ -57,7 +57,7 @@ spec:
value: "false"
- name: TARGET_POLICIES
value: "false"
- image: {{ plugins.kube_bench.image }}
+ image: {{ .plugins.kube_bench.image }}
name: plugin
resources: {}
volumeMounts:
diff --git a/vendor/github.com/Masterminds/goutils/.travis.yml b/vendor/github.com/Masterminds/goutils/.travis.yml
new file mode 100644
index 000000000..4025e01ec
--- /dev/null
+++ b/vendor/github.com/Masterminds/goutils/.travis.yml
@@ -0,0 +1,18 @@
+language: go
+
+go:
+ - 1.6
+ - 1.7
+ - 1.8
+ - tip
+
+script:
+ - go test -v
+
+notifications:
+ webhooks:
+ urls:
+ - https://webhooks.gitter.im/e/06e3328629952dabe3e0
+ on_success: change # options: [always|never|change] default: always
+ on_failure: always # options: [always|never|change] default: always
+ on_start: never # options: [always|never|change] default: always
diff --git a/vendor/github.com/Masterminds/goutils/CHANGELOG.md b/vendor/github.com/Masterminds/goutils/CHANGELOG.md
new file mode 100644
index 000000000..d700ec47f
--- /dev/null
+++ b/vendor/github.com/Masterminds/goutils/CHANGELOG.md
@@ -0,0 +1,8 @@
+# 1.0.1 (2017-05-31)
+
+## Fixed
+- #21: Fix generation of alphanumeric strings (thanks @dbarranco)
+
+# 1.0.0 (2014-04-30)
+
+- Initial release.
diff --git a/vendor/github.com/Masterminds/goutils/LICENSE.txt b/vendor/github.com/Masterminds/goutils/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/Masterminds/goutils/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/Masterminds/goutils/README.md b/vendor/github.com/Masterminds/goutils/README.md
new file mode 100644
index 000000000..163ffe72a
--- /dev/null
+++ b/vendor/github.com/Masterminds/goutils/README.md
@@ -0,0 +1,70 @@
+GoUtils
+===========
+[![Stability: Maintenance](https://masterminds.github.io/stability/maintenance.svg)](https://masterminds.github.io/stability/maintenance.html)
+[![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) [![Build Status](https://travis-ci.org/Masterminds/goutils.svg?branch=master)](https://travis-ci.org/Masterminds/goutils) [![Build status](https://ci.appveyor.com/api/projects/status/sc2b1ew0m7f0aiju?svg=true)](https://ci.appveyor.com/project/mattfarina/goutils)
+
+
+GoUtils provides users with utility functions to manipulate strings in various ways. It is a Go implementation of some
+string manipulation libraries of Java Apache Commons. GoUtils includes the following Java Apache Commons classes:
+* WordUtils
+* RandomStringUtils
+* StringUtils (partial implementation)
+
+## Installation
+If you have Go set up on your system, from the GOPATH directory within the command line/terminal, enter this:
+
+ go get github.com/Masterminds/goutils
+
+If you do not have Go set up on your system, please follow the [Go installation directions from the documenation](http://golang.org/doc/install), and then follow the instructions above to install GoUtils.
+
+
+## Documentation
+GoUtils doc is available here: [![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils)
+
+
+## Usage
+The code snippets below show examples of how to use GoUtils. Some functions return errors while others do not. The first instance below, which does not return an error, is the `Initials` function (located within the `wordutils.go` file).
+
+ package main
+
+ import (
+ "fmt"
+ "github.com/Masterminds/goutils"
+ )
+
+ func main() {
+
+ // EXAMPLE 1: A goutils function which returns no errors
+ fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF"
+
+ }
+Some functions return errors mainly due to illegal arguements used as parameters. The code example below illustrates how to deal with function that returns an error. In this instance, the function is the `Random` function (located within the `randomstringutils.go` file).
+
+ package main
+
+ import (
+ "fmt"
+ "github.com/Masterminds/goutils"
+ )
+
+ func main() {
+
+ // EXAMPLE 2: A goutils function which returns an error
+ rand1, err1 := goutils.Random (-1, 0, 0, true, true)
+
+ if err1 != nil {
+ fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...)
+ } else {
+ fmt.Println(rand1)
+ }
+
+ }
+
+## License
+GoUtils is licensed under the Apache License, Version 2.0. Please check the LICENSE.txt file or visit http://www.apache.org/licenses/LICENSE-2.0 for a copy of the license.
+
+## Issue Reporting
+Make suggestions or report issues using the Git issue tracker: https://github.com/Masterminds/goutils/issues
+
+## Website
+* [GoUtils webpage](http://Masterminds.github.io/goutils/)
diff --git a/vendor/github.com/Masterminds/goutils/appveyor.yml b/vendor/github.com/Masterminds/goutils/appveyor.yml
new file mode 100644
index 000000000..657564a84
--- /dev/null
+++ b/vendor/github.com/Masterminds/goutils/appveyor.yml
@@ -0,0 +1,21 @@
+version: build-{build}.{branch}
+
+clone_folder: C:\gopath\src\github.com\Masterminds\goutils
+shallow_clone: true
+
+environment:
+ GOPATH: C:\gopath
+
+platform:
+ - x64
+
+build: off
+
+install:
+ - go version
+ - go env
+
+test_script:
+ - go test -v
+
+deploy: off
diff --git a/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go
new file mode 100644
index 000000000..8dbd92485
--- /dev/null
+++ b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go
@@ -0,0 +1,230 @@
+/*
+Copyright 2014 Alexander Okoli
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package goutils
+
+import (
+ "crypto/rand"
+ "fmt"
+ "math"
+ "math/big"
+ "unicode"
+)
+
+/*
+CryptoRandomNonAlphaNumeric creates a random string whose length is the number of characters specified.
+Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)).
+
+Parameter:
+ count - the length of random string to create
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...)
+*/
+func CryptoRandomNonAlphaNumeric(count int) (string, error) {
+ return CryptoRandomAlphaNumericCustom(count, false, false)
+}
+
+/*
+CryptoRandomAscii creates a random string whose length is the number of characters specified.
+Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive).
+
+Parameter:
+ count - the length of random string to create
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...)
+*/
+func CryptoRandomAscii(count int) (string, error) {
+ return CryptoRandom(count, 32, 127, false, false)
+}
+
+/*
+CryptoRandomNumeric creates a random string whose length is the number of characters specified.
+Characters will be chosen from the set of numeric characters.
+
+Parameter:
+ count - the length of random string to create
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...)
+*/
+func CryptoRandomNumeric(count int) (string, error) {
+ return CryptoRandom(count, 0, 0, false, true)
+}
+
+/*
+CryptoRandomAlphabetic creates a random string whose length is the number of characters specified.
+Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments.
+
+Parameters:
+ count - the length of random string to create
+ letters - if true, generated string may include alphabetic characters
+ numbers - if true, generated string may include numeric characters
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...)
+*/
+func CryptoRandomAlphabetic(count int) (string, error) {
+ return CryptoRandom(count, 0, 0, true, false)
+}
+
+/*
+CryptoRandomAlphaNumeric creates a random string whose length is the number of characters specified.
+Characters will be chosen from the set of alpha-numeric characters.
+
+Parameter:
+ count - the length of random string to create
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...)
+*/
+func CryptoRandomAlphaNumeric(count int) (string, error) {
+ return CryptoRandom(count, 0, 0, true, true)
+}
+
+/*
+CryptoRandomAlphaNumericCustom creates a random string whose length is the number of characters specified.
+Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments.
+
+Parameters:
+ count - the length of random string to create
+ letters - if true, generated string may include alphabetic characters
+ numbers - if true, generated string may include numeric characters
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...)
+*/
+func CryptoRandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) {
+ return CryptoRandom(count, 0, 0, letters, numbers)
+}
+
+/*
+CryptoRandom creates a random string based on a variety of options, using using golang's crypto/rand source of randomness.
+If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used,
+unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively.
+If chars is not nil, characters stored in chars that are between start and end are chosen.
+
+Parameters:
+ count - the length of random string to create
+ start - the position in set of chars (ASCII/Unicode int) to start at
+ end - the position in set of chars (ASCII/Unicode int) to end before
+ letters - if true, generated string may include alphabetic characters
+ numbers - if true, generated string may include numeric characters
+ chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars.
+
+Returns:
+ string - the random string
+ error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars)
+*/
+func CryptoRandom(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) {
+ if count == 0 {
+ return "", nil
+ } else if count < 0 {
+ err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...")
+ return "", err
+ }
+ if chars != nil && len(chars) == 0 {
+ err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty")
+ return "", err
+ }
+
+ if start == 0 && end == 0 {
+ if chars != nil {
+ end = len(chars)
+ } else {
+ if !letters && !numbers {
+ end = math.MaxInt32
+ } else {
+ end = 'z' + 1
+ start = ' '
+ }
+ }
+ } else {
+ if end <= start {
+ err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start)
+ return "", err
+ }
+
+ if chars != nil && end > len(chars) {
+ err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars))
+ return "", err
+ }
+ }
+
+ buffer := make([]rune, count)
+ gap := end - start
+
+ // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319
+ // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343
+
+ for count != 0 {
+ count--
+ var ch rune
+ if chars == nil {
+ ch = rune(getCryptoRandomInt(gap) + int64(start))
+ } else {
+ ch = chars[getCryptoRandomInt(gap)+int64(start)]
+ }
+
+ if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers {
+ if ch >= 56320 && ch <= 57343 { // low surrogate range
+ if count == 0 {
+ count++
+ } else {
+ // Insert low surrogate
+ buffer[count] = ch
+ count--
+ // Insert high surrogate
+ buffer[count] = rune(55296 + getCryptoRandomInt(128))
+ }
+ } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial)
+ if count == 0 {
+ count++
+ } else {
+ // Insert low surrogate
+ buffer[count] = rune(56320 + getCryptoRandomInt(128))
+ count--
+ // Insert high surrogate
+ buffer[count] = ch
+ }
+ } else if ch >= 56192 && ch <= 56319 {
+ // private high surrogate, skip it
+ count++
+ } else {
+ // not one of the surrogates*
+ buffer[count] = ch
+ }
+ } else {
+ count++
+ }
+ }
+ return string(buffer), nil
+}
+
+func getCryptoRandomInt(count int) int64 {
+ nBig, err := rand.Int(rand.Reader, big.NewInt(int64(count)))
+ if err != nil {
+ panic(err)
+ }
+ return nBig.Int64()
+}
diff --git a/vendor/github.com/Masterminds/goutils/randomstringutils.go b/vendor/github.com/Masterminds/goutils/randomstringutils.go
new file mode 100644
index 000000000..272670231
--- /dev/null
+++ b/vendor/github.com/Masterminds/goutils/randomstringutils.go
@@ -0,0 +1,248 @@
+/*
+Copyright 2014 Alexander Okoli
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package goutils
+
+import (
+ "fmt"
+ "math"
+ "math/rand"
+ "time"
+ "unicode"
+)
+
+// RANDOM provides the time-based seed used to generate random numbers
+var RANDOM = rand.New(rand.NewSource(time.Now().UnixNano()))
+
+/*
+RandomNonAlphaNumeric creates a random string whose length is the number of characters specified.
+Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)).
+
+Parameter:
+ count - the length of random string to create
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
+*/
+func RandomNonAlphaNumeric(count int) (string, error) {
+ return RandomAlphaNumericCustom(count, false, false)
+}
+
+/*
+RandomAscii creates a random string whose length is the number of characters specified.
+Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive).
+
+Parameter:
+ count - the length of random string to create
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
+*/
+func RandomAscii(count int) (string, error) {
+ return Random(count, 32, 127, false, false)
+}
+
+/*
+RandomNumeric creates a random string whose length is the number of characters specified.
+Characters will be chosen from the set of numeric characters.
+
+Parameter:
+ count - the length of random string to create
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
+*/
+func RandomNumeric(count int) (string, error) {
+ return Random(count, 0, 0, false, true)
+}
+
+/*
+RandomAlphabetic creates a random string whose length is the number of characters specified.
+Characters will be chosen from the set of alphabetic characters.
+
+Parameters:
+ count - the length of random string to create
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
+*/
+func RandomAlphabetic(count int) (string, error) {
+ return Random(count, 0, 0, true, false)
+}
+
+/*
+RandomAlphaNumeric creates a random string whose length is the number of characters specified.
+Characters will be chosen from the set of alpha-numeric characters.
+
+Parameter:
+ count - the length of random string to create
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
+*/
+func RandomAlphaNumeric(count int) (string, error) {
+ return Random(count, 0, 0, true, true)
+}
+
+/*
+RandomAlphaNumericCustom creates a random string whose length is the number of characters specified.
+Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments.
+
+Parameters:
+ count - the length of random string to create
+ letters - if true, generated string may include alphabetic characters
+ numbers - if true, generated string may include numeric characters
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
+*/
+func RandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) {
+ return Random(count, 0, 0, letters, numbers)
+}
+
+/*
+Random creates a random string based on a variety of options, using default source of randomness.
+This method has exactly the same semantics as RandomSeed(int, int, int, bool, bool, []char, *rand.Rand), but
+instead of using an externally supplied source of randomness, it uses the internal *rand.Rand instance.
+
+Parameters:
+ count - the length of random string to create
+ start - the position in set of chars (ASCII/Unicode int) to start at
+ end - the position in set of chars (ASCII/Unicode int) to end before
+ letters - if true, generated string may include alphabetic characters
+ numbers - if true, generated string may include numeric characters
+ chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars.
+
+Returns:
+ string - the random string
+ error - an error stemming from an invalid parameter within underlying function, RandomSeed(...)
+*/
+func Random(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) {
+ return RandomSeed(count, start, end, letters, numbers, chars, RANDOM)
+}
+
+/*
+RandomSeed creates a random string based on a variety of options, using supplied source of randomness.
+If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used,
+unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively.
+If chars is not nil, characters stored in chars that are between start and end are chosen.
+This method accepts a user-supplied *rand.Rand instance to use as a source of randomness. By seeding a single *rand.Rand instance
+with a fixed seed and using it for each call, the same random sequence of strings can be generated repeatedly and predictably.
+
+Parameters:
+ count - the length of random string to create
+ start - the position in set of chars (ASCII/Unicode decimals) to start at
+ end - the position in set of chars (ASCII/Unicode decimals) to end before
+ letters - if true, generated string may include alphabetic characters
+ numbers - if true, generated string may include numeric characters
+ chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars.
+ random - a source of randomness.
+
+Returns:
+ string - the random string
+ error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars)
+*/
+func RandomSeed(count int, start int, end int, letters bool, numbers bool, chars []rune, random *rand.Rand) (string, error) {
+
+ if count == 0 {
+ return "", nil
+ } else if count < 0 {
+ err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...")
+ return "", err
+ }
+ if chars != nil && len(chars) == 0 {
+ err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty")
+ return "", err
+ }
+
+ if start == 0 && end == 0 {
+ if chars != nil {
+ end = len(chars)
+ } else {
+ if !letters && !numbers {
+ end = math.MaxInt32
+ } else {
+ end = 'z' + 1
+ start = ' '
+ }
+ }
+ } else {
+ if end <= start {
+ err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start)
+ return "", err
+ }
+
+ if chars != nil && end > len(chars) {
+ err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars))
+ return "", err
+ }
+ }
+
+ buffer := make([]rune, count)
+ gap := end - start
+
+ // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319
+ // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343
+
+ for count != 0 {
+ count--
+ var ch rune
+ if chars == nil {
+ ch = rune(random.Intn(gap) + start)
+ } else {
+ ch = chars[random.Intn(gap)+start]
+ }
+
+ if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers {
+ if ch >= 56320 && ch <= 57343 { // low surrogate range
+ if count == 0 {
+ count++
+ } else {
+ // Insert low surrogate
+ buffer[count] = ch
+ count--
+ // Insert high surrogate
+ buffer[count] = rune(55296 + random.Intn(128))
+ }
+ } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial)
+ if count == 0 {
+ count++
+ } else {
+ // Insert low surrogate
+ buffer[count] = rune(56320 + random.Intn(128))
+ count--
+ // Insert high surrogate
+ buffer[count] = ch
+ }
+ } else if ch >= 56192 && ch <= 56319 {
+ // private high surrogate, skip it
+ count++
+ } else {
+ // not one of the surrogates*
+ buffer[count] = ch
+ }
+ } else {
+ count++
+ }
+ }
+ return string(buffer), nil
+}
diff --git a/vendor/github.com/Masterminds/goutils/stringutils.go b/vendor/github.com/Masterminds/goutils/stringutils.go
new file mode 100644
index 000000000..741bb530e
--- /dev/null
+++ b/vendor/github.com/Masterminds/goutils/stringutils.go
@@ -0,0 +1,240 @@
+/*
+Copyright 2014 Alexander Okoli
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package goutils
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+ "unicode"
+)
+
+// Typically returned by functions where a searched item cannot be found
+const INDEX_NOT_FOUND = -1
+
+/*
+Abbreviate abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "Now is the time for..."
+
+Specifically, the algorithm is as follows:
+
+ - If str is less than maxWidth characters long, return it.
+ - Else abbreviate it to (str[0:maxWidth - 3] + "...").
+ - If maxWidth is less than 4, return an illegal argument error.
+ - In no case will it return a string of length greater than maxWidth.
+
+Parameters:
+ str - the string to check
+ maxWidth - maximum length of result string, must be at least 4
+
+Returns:
+ string - abbreviated string
+ error - if the width is too small
+*/
+func Abbreviate(str string, maxWidth int) (string, error) {
+ return AbbreviateFull(str, 0, maxWidth)
+}
+
+/*
+AbbreviateFull abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "...is the time for..."
+This function works like Abbreviate(string, int), but allows you to specify a "left edge" offset. Note that this left edge is not
+necessarily going to be the leftmost character in the result, or the first character following the ellipses, but it will appear
+somewhere in the result.
+In no case will it return a string of length greater than maxWidth.
+
+Parameters:
+ str - the string to check
+ offset - left edge of source string
+ maxWidth - maximum length of result string, must be at least 4
+
+Returns:
+ string - abbreviated string
+ error - if the width is too small
+*/
+func AbbreviateFull(str string, offset int, maxWidth int) (string, error) {
+ if str == "" {
+ return "", nil
+ }
+ if maxWidth < 4 {
+ err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width is 4")
+ return "", err
+ }
+ if len(str) <= maxWidth {
+ return str, nil
+ }
+ if offset > len(str) {
+ offset = len(str)
+ }
+ if len(str)-offset < (maxWidth - 3) { // 15 - 5 < 10 - 3 = 10 < 7
+ offset = len(str) - (maxWidth - 3)
+ }
+ abrevMarker := "..."
+ if offset <= 4 {
+ return str[0:maxWidth-3] + abrevMarker, nil // str.substring(0, maxWidth - 3) + abrevMarker;
+ }
+ if maxWidth < 7 {
+ err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width with offset is 7")
+ return "", err
+ }
+ if (offset + maxWidth - 3) < len(str) { // 5 + (10-3) < 15 = 12 < 15
+ abrevStr, _ := Abbreviate(str[offset:len(str)], (maxWidth - 3))
+ return abrevMarker + abrevStr, nil // abrevMarker + abbreviate(str.substring(offset), maxWidth - 3);
+ }
+ return abrevMarker + str[(len(str)-(maxWidth-3)):len(str)], nil // abrevMarker + str.substring(str.length() - (maxWidth - 3));
+}
+
+/*
+DeleteWhiteSpace deletes all whitespaces from a string as defined by unicode.IsSpace(rune).
+It returns the string without whitespaces.
+
+Parameter:
+ str - the string to delete whitespace from, may be nil
+
+Returns:
+ the string without whitespaces
+*/
+func DeleteWhiteSpace(str string) string {
+ if str == "" {
+ return str
+ }
+ sz := len(str)
+ var chs bytes.Buffer
+ count := 0
+ for i := 0; i < sz; i++ {
+ ch := rune(str[i])
+ if !unicode.IsSpace(ch) {
+ chs.WriteRune(ch)
+ count++
+ }
+ }
+ if count == sz {
+ return str
+ }
+ return chs.String()
+}
+
+/*
+IndexOfDifference compares two strings, and returns the index at which the strings begin to differ.
+
+Parameters:
+ str1 - the first string
+ str2 - the second string
+
+Returns:
+ the index where str1 and str2 begin to differ; -1 if they are equal
+*/
+func IndexOfDifference(str1 string, str2 string) int {
+ if str1 == str2 {
+ return INDEX_NOT_FOUND
+ }
+ if IsEmpty(str1) || IsEmpty(str2) {
+ return 0
+ }
+ var i int
+ for i = 0; i < len(str1) && i < len(str2); i++ {
+ if rune(str1[i]) != rune(str2[i]) {
+ break
+ }
+ }
+ if i < len(str2) || i < len(str1) {
+ return i
+ }
+ return INDEX_NOT_FOUND
+}
+
+/*
+IsBlank checks if a string is whitespace or empty (""). Observe the following behavior:
+
+ goutils.IsBlank("") = true
+ goutils.IsBlank(" ") = true
+ goutils.IsBlank("bob") = false
+ goutils.IsBlank(" bob ") = false
+
+Parameter:
+ str - the string to check
+
+Returns:
+ true - if the string is whitespace or empty ("")
+*/
+func IsBlank(str string) bool {
+ strLen := len(str)
+ if str == "" || strLen == 0 {
+ return true
+ }
+ for i := 0; i < strLen; i++ {
+ if unicode.IsSpace(rune(str[i])) == false {
+ return false
+ }
+ }
+ return true
+}
+
+/*
+IndexOf returns the index of the first instance of sub in str, with the search beginning from the
+index start point specified. -1 is returned if sub is not present in str.
+
+An empty string ("") will return -1 (INDEX_NOT_FOUND). A negative start position is treated as zero.
+A start position greater than the string length returns -1.
+
+Parameters:
+ str - the string to check
+ sub - the substring to find
+ start - the start position; negative treated as zero
+
+Returns:
+ the first index where the sub string was found (always >= start)
+*/
+func IndexOf(str string, sub string, start int) int {
+
+ if start < 0 {
+ start = 0
+ }
+
+ if len(str) < start {
+ return INDEX_NOT_FOUND
+ }
+
+ if IsEmpty(str) || IsEmpty(sub) {
+ return INDEX_NOT_FOUND
+ }
+
+ partialIndex := strings.Index(str[start:len(str)], sub)
+ if partialIndex == -1 {
+ return INDEX_NOT_FOUND
+ }
+ return partialIndex + start
+}
+
+// IsEmpty checks if a string is empty (""). Returns true if empty, and false otherwise.
+func IsEmpty(str string) bool {
+ return len(str) == 0
+}
+
+// Returns either the passed in string, or if the string is empty, the value of defaultStr.
+func DefaultString(str string, defaultStr string) string {
+ if IsEmpty(str) {
+ return defaultStr
+ }
+ return str
+}
+
+// Returns either the passed in string, or if the string is whitespace, empty (""), the value of defaultStr.
+func DefaultIfBlank(str string, defaultStr string) string {
+ if IsBlank(str) {
+ return defaultStr
+ }
+ return str
+}
diff --git a/vendor/github.com/Masterminds/goutils/wordutils.go b/vendor/github.com/Masterminds/goutils/wordutils.go
new file mode 100644
index 000000000..034cad8e2
--- /dev/null
+++ b/vendor/github.com/Masterminds/goutils/wordutils.go
@@ -0,0 +1,357 @@
+/*
+Copyright 2014 Alexander Okoli
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package goutils provides utility functions to manipulate strings in various ways.
+The code snippets below show examples of how to use goutils. Some functions return
+errors while others do not, so usage would vary as a result.
+
+Example:
+
+ package main
+
+ import (
+ "fmt"
+ "github.com/aokoli/goutils"
+ )
+
+ func main() {
+
+ // EXAMPLE 1: A goutils function which returns no errors
+ fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF"
+
+
+
+ // EXAMPLE 2: A goutils function which returns an error
+ rand1, err1 := goutils.Random (-1, 0, 0, true, true)
+
+ if err1 != nil {
+ fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...)
+ } else {
+ fmt.Println(rand1)
+ }
+ }
+*/
+package goutils
+
+import (
+ "bytes"
+ "strings"
+ "unicode"
+)
+
+// VERSION indicates the current version of goutils
+const VERSION = "1.0.0"
+
+/*
+Wrap wraps a single line of text, identifying words by ' '.
+New lines will be separated by '\n'. Very long words, such as URLs will not be wrapped.
+Leading spaces on a new line are stripped. Trailing spaces are not stripped.
+
+Parameters:
+ str - the string to be word wrapped
+ wrapLength - the column (a column can fit only one character) to wrap the words at, less than 1 is treated as 1
+
+Returns:
+ a line with newlines inserted
+*/
+func Wrap(str string, wrapLength int) string {
+ return WrapCustom(str, wrapLength, "", false)
+}
+
+/*
+WrapCustom wraps a single line of text, identifying words by ' '.
+Leading spaces on a new line are stripped. Trailing spaces are not stripped.
+
+Parameters:
+ str - the string to be word wrapped
+ wrapLength - the column number (a column can fit only one character) to wrap the words at, less than 1 is treated as 1
+ newLineStr - the string to insert for a new line, "" uses '\n'
+ wrapLongWords - true if long words (such as URLs) should be wrapped
+
+Returns:
+ a line with newlines inserted
+*/
+func WrapCustom(str string, wrapLength int, newLineStr string, wrapLongWords bool) string {
+
+ if str == "" {
+ return ""
+ }
+ if newLineStr == "" {
+ newLineStr = "\n" // TODO Assumes "\n" is seperator. Explore SystemUtils.LINE_SEPARATOR from Apache Commons
+ }
+ if wrapLength < 1 {
+ wrapLength = 1
+ }
+
+ inputLineLength := len(str)
+ offset := 0
+
+ var wrappedLine bytes.Buffer
+
+ for inputLineLength-offset > wrapLength {
+
+ if rune(str[offset]) == ' ' {
+ offset++
+ continue
+ }
+
+ end := wrapLength + offset + 1
+ spaceToWrapAt := strings.LastIndex(str[offset:end], " ") + offset
+
+ if spaceToWrapAt >= offset {
+ // normal word (not longer than wrapLength)
+ wrappedLine.WriteString(str[offset:spaceToWrapAt])
+ wrappedLine.WriteString(newLineStr)
+ offset = spaceToWrapAt + 1
+
+ } else {
+ // long word or URL
+ if wrapLongWords {
+ end := wrapLength + offset
+ // long words are wrapped one line at a time
+ wrappedLine.WriteString(str[offset:end])
+ wrappedLine.WriteString(newLineStr)
+ offset += wrapLength
+ } else {
+ // long words aren't wrapped, just extended beyond limit
+ end := wrapLength + offset
+ index := strings.IndexRune(str[end:len(str)], ' ')
+ if index == -1 {
+ wrappedLine.WriteString(str[offset:len(str)])
+ offset = inputLineLength
+ } else {
+ spaceToWrapAt = index + end
+ wrappedLine.WriteString(str[offset:spaceToWrapAt])
+ wrappedLine.WriteString(newLineStr)
+ offset = spaceToWrapAt + 1
+ }
+ }
+ }
+ }
+
+ wrappedLine.WriteString(str[offset:len(str)])
+
+ return wrappedLine.String()
+
+}
+
+/*
+Capitalize capitalizes all the delimiter separated words in a string. Only the first letter of each word is changed.
+To convert the rest of each word to lowercase at the same time, use CapitalizeFully(str string, delimiters ...rune).
+The delimiters represent a set of characters understood to separate words. The first string character
+and the first non-delimiter character after a delimiter will be capitalized. A "" input string returns "".
+Capitalization uses the Unicode title case, normally equivalent to upper case.
+
+Parameters:
+ str - the string to capitalize
+ delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter
+
+Returns:
+ capitalized string
+*/
+func Capitalize(str string, delimiters ...rune) string {
+
+ var delimLen int
+
+ if delimiters == nil {
+ delimLen = -1
+ } else {
+ delimLen = len(delimiters)
+ }
+
+ if str == "" || delimLen == 0 {
+ return str
+ }
+
+ buffer := []rune(str)
+ capitalizeNext := true
+ for i := 0; i < len(buffer); i++ {
+ ch := buffer[i]
+ if isDelimiter(ch, delimiters...) {
+ capitalizeNext = true
+ } else if capitalizeNext {
+ buffer[i] = unicode.ToTitle(ch)
+ capitalizeNext = false
+ }
+ }
+ return string(buffer)
+
+}
+
+/*
+CapitalizeFully converts all the delimiter separated words in a string into capitalized words, that is each word is made up of a
+titlecase character and then a series of lowercase characters. The delimiters represent a set of characters understood
+to separate words. The first string character and the first non-delimiter character after a delimiter will be capitalized.
+Capitalization uses the Unicode title case, normally equivalent to upper case.
+
+Parameters:
+ str - the string to capitalize fully
+ delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter
+
+Returns:
+ capitalized string
+*/
+func CapitalizeFully(str string, delimiters ...rune) string {
+
+ var delimLen int
+
+ if delimiters == nil {
+ delimLen = -1
+ } else {
+ delimLen = len(delimiters)
+ }
+
+ if str == "" || delimLen == 0 {
+ return str
+ }
+ str = strings.ToLower(str)
+ return Capitalize(str, delimiters...)
+}
+
+/*
+Uncapitalize uncapitalizes all the whitespace separated words in a string. Only the first letter of each word is changed.
+The delimiters represent a set of characters understood to separate words. The first string character and the first non-delimiter
+character after a delimiter will be uncapitalized. Whitespace is defined by unicode.IsSpace(char).
+
+Parameters:
+ str - the string to uncapitalize fully
+ delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter
+
+Returns:
+ uncapitalized string
+*/
+func Uncapitalize(str string, delimiters ...rune) string {
+
+ var delimLen int
+
+ if delimiters == nil {
+ delimLen = -1
+ } else {
+ delimLen = len(delimiters)
+ }
+
+ if str == "" || delimLen == 0 {
+ return str
+ }
+
+ buffer := []rune(str)
+ uncapitalizeNext := true // TODO Always makes capitalize/un apply to first char.
+ for i := 0; i < len(buffer); i++ {
+ ch := buffer[i]
+ if isDelimiter(ch, delimiters...) {
+ uncapitalizeNext = true
+ } else if uncapitalizeNext {
+ buffer[i] = unicode.ToLower(ch)
+ uncapitalizeNext = false
+ }
+ }
+ return string(buffer)
+}
+
+/*
+SwapCase swaps the case of a string using a word based algorithm.
+
+Conversion algorithm:
+
+ Upper case character converts to Lower case
+ Title case character converts to Lower case
+ Lower case character after Whitespace or at start converts to Title case
+ Other Lower case character converts to Upper case
+ Whitespace is defined by unicode.IsSpace(char).
+
+Parameters:
+ str - the string to swap case
+
+Returns:
+ the changed string
+*/
+func SwapCase(str string) string {
+ if str == "" {
+ return str
+ }
+ buffer := []rune(str)
+
+ whitespace := true
+
+ for i := 0; i < len(buffer); i++ {
+ ch := buffer[i]
+ if unicode.IsUpper(ch) {
+ buffer[i] = unicode.ToLower(ch)
+ whitespace = false
+ } else if unicode.IsTitle(ch) {
+ buffer[i] = unicode.ToLower(ch)
+ whitespace = false
+ } else if unicode.IsLower(ch) {
+ if whitespace {
+ buffer[i] = unicode.ToTitle(ch)
+ whitespace = false
+ } else {
+ buffer[i] = unicode.ToUpper(ch)
+ }
+ } else {
+ whitespace = unicode.IsSpace(ch)
+ }
+ }
+ return string(buffer)
+}
+
+/*
+Initials extracts the initial letters from each word in the string. The first letter of the string and all first
+letters after the defined delimiters are returned as a new string. Their case is not changed. If the delimiters
+parameter is excluded, then Whitespace is used. Whitespace is defined by unicode.IsSpacea(char). An empty delimiter array returns an empty string.
+
+Parameters:
+ str - the string to get initials from
+ delimiters - set of characters to determine words, exclusion of this parameter means whitespace would be delimeter
+Returns:
+ string of initial letters
+*/
+func Initials(str string, delimiters ...rune) string {
+ if str == "" {
+ return str
+ }
+ if delimiters != nil && len(delimiters) == 0 {
+ return ""
+ }
+ strLen := len(str)
+ var buf bytes.Buffer
+ lastWasGap := true
+ for i := 0; i < strLen; i++ {
+ ch := rune(str[i])
+
+ if isDelimiter(ch, delimiters...) {
+ lastWasGap = true
+ } else if lastWasGap {
+ buf.WriteRune(ch)
+ lastWasGap = false
+ }
+ }
+ return buf.String()
+}
+
+// private function (lower case func name)
+func isDelimiter(ch rune, delimiters ...rune) bool {
+ if delimiters == nil {
+ return unicode.IsSpace(ch)
+ }
+ for _, delimiter := range delimiters {
+ if ch == delimiter {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/Masterminds/semver/v3/.gitignore b/vendor/github.com/Masterminds/semver/v3/.gitignore
new file mode 100644
index 000000000..6b061e617
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/v3/.gitignore
@@ -0,0 +1 @@
+_fuzz/
\ No newline at end of file
diff --git a/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/vendor/github.com/Masterminds/semver/v3/.golangci.yml
new file mode 100644
index 000000000..c87d1c4b9
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/v3/.golangci.yml
@@ -0,0 +1,30 @@
+run:
+ deadline: 2m
+
+linters:
+ disable-all: true
+ enable:
+ - misspell
+ - structcheck
+ - govet
+ - staticcheck
+ - deadcode
+ - errcheck
+ - varcheck
+ - unparam
+ - ineffassign
+ - nakedret
+ - gocyclo
+ - dupl
+ - goimports
+ - revive
+ - gosec
+ - gosimple
+ - typecheck
+ - unused
+
+linters-settings:
+ gofmt:
+ simplify: true
+ dupl:
+ threshold: 600
diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
new file mode 100644
index 000000000..f12626423
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md
@@ -0,0 +1,214 @@
+# Changelog
+
+## 3.2.0 (2022-11-28)
+
+### Added
+
+- #190: Added text marshaling and unmarshaling
+- #167: Added JSON marshalling for constraints (thanks @SimonTheLeg)
+- #173: Implement encoding.TextMarshaler and encoding.TextUnmarshaler on Version (thanks @MarkRosemaker)
+- #179: Added New() version constructor (thanks @kazhuravlev)
+
+### Changed
+
+- #182/#183: Updated CI testing setup
+
+### Fixed
+
+- #186: Fixing issue where validation of constraint section gave false positives
+- #176: Fix constraints check with *-0 (thanks @mtt0)
+- #181: Fixed Caret operator (^) gives unexpected results when the minor version in constraint is 0 (thanks @arshchimni)
+- #161: Fixed godoc (thanks @afirth)
+
+## 3.1.1 (2020-11-23)
+
+### Fixed
+
+- #158: Fixed issue with generated regex operation order that could cause problem
+
+## 3.1.0 (2020-04-15)
+
+### Added
+
+- #131: Add support for serializing/deserializing SQL (thanks @ryancurrah)
+
+### Changed
+
+- #148: More accurate validation messages on constraints
+
+## 3.0.3 (2019-12-13)
+
+### Fixed
+
+- #141: Fixed issue with <= comparison
+
+## 3.0.2 (2019-11-14)
+
+### Fixed
+
+- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos)
+
+## 3.0.1 (2019-09-13)
+
+### Fixed
+
+- #125: Fixes issue with module path for v3
+
+## 3.0.0 (2019-09-12)
+
+This is a major release of the semver package which includes API changes. The Go
+API is compatible with ^1. The Go API was not changed because many people are using
+`go get` without Go modules for their applications and API breaking changes cause
+errors which we have or would need to support.
+
+The changes in this release are the handling based on the data passed into the
+functions. These are described in the added and changed sections below.
+
+### Added
+
+- StrictNewVersion function. This is similar to NewVersion but will return an
+ error if the version passed in is not a strict semantic version. For example,
+ 1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly
+ speaking semantic versions. This function is faster, performs fewer operations,
+ and uses fewer allocations than NewVersion.
+- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint.
+ The Makefile contains the operations used. For more information on you can start
+ on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing
+- Now using Go modules
+
+### Changed
+
+- NewVersion has proper prerelease and metadata validation with error messages
+ to signal an issue with either of them
+- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the
+ version is >=1 the ^ ranges works the same as v1. For major versions of 0 the
+ rules have changed. The minor version is treated as the stable version unless
+ a patch is specified and then it is equivalent to =. One difference from npm/js
+ is that prereleases there are only to a specific version (e.g. 1.2.3).
+ Prereleases here look over multiple versions and follow semantic version
+ ordering rules. This pattern now follows along with the expected and requested
+ handling of this packaged by numerous users.
+
+## 1.5.0 (2019-09-11)
+
+### Added
+
+- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c)
+
+### Changed
+
+- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil)
+- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil)
+- #72: Adding docs comment pointing to vert for a cli
+- #71: Update the docs on pre-release comparator handling
+- #89: Test with new go versions (thanks @thedevsaddam)
+- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll)
+
+### Fixed
+
+- #78: Fix unchecked error in example code (thanks @ravron)
+- #70: Fix the handling of pre-releases and the 0.0.0 release edge case
+- #97: Fixed copyright file for proper display on GitHub
+- #107: Fix handling prerelease when sorting alphanum and num
+- #109: Fixed where Validate sometimes returns wrong message on error
+
+## 1.4.2 (2018-04-10)
+
+### Changed
+
+- #72: Updated the docs to point to vert for a console appliaction
+- #71: Update the docs on pre-release comparator handling
+
+### Fixed
+
+- #70: Fix the handling of pre-releases and the 0.0.0 release edge case
+
+## 1.4.1 (2018-04-02)
+
+### Fixed
+
+- Fixed #64: Fix pre-release precedence issue (thanks @uudashr)
+
+## 1.4.0 (2017-10-04)
+
+### Changed
+
+- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill)
+
+## 1.3.1 (2017-07-10)
+
+### Fixed
+
+- Fixed #57: number comparisons in prerelease sometimes inaccurate
+
+## 1.3.0 (2017-05-02)
+
+### Added
+
+- #45: Added json (un)marshaling support (thanks @mh-cbon)
+- Stability marker. See https://masterminds.github.io/stability/
+
+### Fixed
+
+- #51: Fix handling of single digit tilde constraint (thanks @dgodd)
+
+### Changed
+
+- #55: The godoc icon moved from png to svg
+
+## 1.2.3 (2017-04-03)
+
+### Fixed
+
+- #46: Fixed 0.x.x and 0.0.x in constraints being treated as *
+
+## Release 1.2.2 (2016-12-13)
+
+### Fixed
+
+- #34: Fixed issue where hyphen range was not working with pre-release parsing.
+
+## Release 1.2.1 (2016-11-28)
+
+### Fixed
+
+- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha"
+ properly.
+
+## Release 1.2.0 (2016-11-04)
+
+### Added
+
+- #20: Added MustParse function for versions (thanks @adamreese)
+- #15: Added increment methods on versions (thanks @mh-cbon)
+
+### Fixed
+
+- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and
+ might not satisfy the intended compatibility. The change here ignores pre-releases
+ on constraint checks (e.g., ~ or ^) when a pre-release is not part of the
+ constraint. For example, `^1.2.3` will ignore pre-releases while
+ `^1.2.3-alpha` will include them.
+
+## Release 1.1.1 (2016-06-30)
+
+### Changed
+
+- Issue #9: Speed up version comparison performance (thanks @sdboyer)
+- Issue #8: Added benchmarks (thanks @sdboyer)
+- Updated Go Report Card URL to new location
+- Updated Readme to add code snippet formatting (thanks @mh-cbon)
+- Updating tagging to v[SemVer] structure for compatibility with other tools.
+
+## Release 1.1.0 (2016-03-11)
+
+- Issue #2: Implemented validation to provide reasons a versions failed a
+ constraint.
+
+## Release 1.0.1 (2015-12-31)
+
+- Fixed #1: * constraint failing on valid versions.
+
+## Release 1.0.0 (2015-10-20)
+
+- Initial release
diff --git a/vendor/github.com/Masterminds/semver/v3/LICENSE.txt b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt
new file mode 100644
index 000000000..9ff7da9c4
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (C) 2014-2019, Matt Butcher and Matt Farina
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile
new file mode 100644
index 000000000..eac19178f
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/v3/Makefile
@@ -0,0 +1,37 @@
+GOPATH=$(shell go env GOPATH)
+GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint
+GOFUZZBUILD = $(GOPATH)/bin/go-fuzz-build
+GOFUZZ = $(GOPATH)/bin/go-fuzz
+
+.PHONY: lint
+lint: $(GOLANGCI_LINT)
+ @echo "==> Linting codebase"
+ @$(GOLANGCI_LINT) run
+
+.PHONY: test
+test:
+ @echo "==> Running tests"
+ GO111MODULE=on go test -v
+
+.PHONY: test-cover
+test-cover:
+ @echo "==> Running Tests with coverage"
+ GO111MODULE=on go test -cover .
+
+.PHONY: fuzz
+fuzz: $(GOFUZZBUILD) $(GOFUZZ)
+ @echo "==> Fuzz testing"
+ $(GOFUZZBUILD)
+ $(GOFUZZ) -workdir=_fuzz
+
+$(GOLANGCI_LINT):
+ # Install golangci-lint. The configuration for it is in the .golangci.yml
+ # file in the root of the repository
+ echo ${GOPATH}
+ curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1
+
+$(GOFUZZBUILD):
+ cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz-build
+
+$(GOFUZZ):
+ cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-dep
\ No newline at end of file
diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md
new file mode 100644
index 000000000..d8f54dcbd
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/v3/README.md
@@ -0,0 +1,244 @@
+# SemVer
+
+The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to:
+
+* Parse semantic versions
+* Sort semantic versions
+* Check if a semantic version fits within a set of constraints
+* Optionally work with a `v` prefix
+
+[![Stability:
+Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html)
+[![](https://github.com/Masterminds/semver/workflows/Tests/badge.svg)](https://github.com/Masterminds/semver/actions)
+[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3)
+[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver)
+
+If you are looking for a command line tool for version comparisons please see
+[vert](https://github.com/Masterminds/vert) which uses this library.
+
+## Package Versions
+
+There are three major versions fo the `semver` package.
+
+* 3.x.x is the new stable and active version. This version is focused on constraint
+ compatibility for range handling in other tools from other languages. It has
+ a similar API to the v1 releases. The development of this version is on the master
+ branch. The documentation for this version is below.
+* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are
+ no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer).
+ There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x).
+* 1.x.x is the most widely used version with numerous tagged releases. This is the
+ previous stable and is still maintained for bug fixes. The development, to fix
+ bugs, occurs on the release-1 branch. You can read the documentation [here](https://github.com/Masterminds/semver/blob/release-1/README.md).
+
+## Parsing Semantic Versions
+
+There are two functions that can parse semantic versions. The `StrictNewVersion`
+function only parses valid version 2 semantic versions as outlined in the
+specification. The `NewVersion` function attempts to coerce a version into a
+semantic version and parse it. For example, if there is a leading v or a version
+listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid
+semantic version (e.g., 1.2.0). In both cases a `Version` object is returned
+that can be sorted, compared, and used in constraints.
+
+When parsing a version an error is returned if there is an issue parsing the
+version. For example,
+
+ v, err := semver.NewVersion("1.2.3-beta.1+build345")
+
+The version object has methods to get the parts of the version, compare it to
+other versions, convert the version back into a string, and get the original
+string. Getting the original string is useful if the semantic version was coerced
+into a valid form.
+
+## Sorting Semantic Versions
+
+A set of versions can be sorted using the `sort` package from the standard library.
+For example,
+
+```go
+raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",}
+vs := make([]*semver.Version, len(raw))
+for i, r := range raw {
+ v, err := semver.NewVersion(r)
+ if err != nil {
+ t.Errorf("Error parsing version: %s", err)
+ }
+
+ vs[i] = v
+}
+
+sort.Sort(semver.Collection(vs))
+```
+
+## Checking Version Constraints
+
+There are two methods for comparing versions. One uses comparison methods on
+`Version` instances and the other uses `Constraints`. There are some important
+differences to notes between these two methods of comparison.
+
+1. When two versions are compared using functions such as `Compare`, `LessThan`,
+ and others it will follow the specification and always include prereleases
+ within the comparison. It will provide an answer that is valid with the
+ comparison section of the spec at https://semver.org/#spec-item-11
+2. When constraint checking is used for checks or validation it will follow a
+ different set of rules that are common for ranges with tools like npm/js
+ and Rust/Cargo. This includes considering prereleases to be invalid if the
+ ranges does not include one. If you want to have it include pre-releases a
+ simple solution is to include `-0` in your range.
+3. Constraint ranges can have some complex rules including the shorthand use of
+ ~ and ^. For more details on those see the options below.
+
+There are differences between the two methods or checking versions because the
+comparison methods on `Version` follow the specification while comparison ranges
+are not part of the specification. Different packages and tools have taken it
+upon themselves to come up with range rules. This has resulted in differences.
+For example, npm/js and Cargo/Rust follow similar patterns while PHP has a
+different pattern for ^. The comparison features in this package follow the
+npm/js and Cargo/Rust lead because applications using it have followed similar
+patters with their versions.
+
+Checking a version against version constraints is one of the most featureful
+parts of the package.
+
+```go
+c, err := semver.NewConstraint(">= 1.2.3")
+if err != nil {
+ // Handle constraint not being parsable.
+}
+
+v, err := semver.NewVersion("1.3")
+if err != nil {
+ // Handle version not being parsable.
+}
+// Check if the version meets the constraints. The a variable will be true.
+a := c.Check(v)
+```
+
+### Basic Comparisons
+
+There are two elements to the comparisons. First, a comparison string is a list
+of space or comma separated AND comparisons. These are then separated by || (OR)
+comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a
+comparison that's greater than or equal to 1.2 and less than 3.0.0 or is
+greater than or equal to 4.2.3.
+
+The basic comparisons are:
+
+* `=`: equal (aliased to no operator)
+* `!=`: not equal
+* `>`: greater than
+* `<`: less than
+* `>=`: greater than or equal to
+* `<=`: less than or equal to
+
+### Working With Prerelease Versions
+
+Pre-releases, for those not familiar with them, are used for software releases
+prior to stable or generally available releases. Examples of prereleases include
+development, alpha, beta, and release candidate releases. A prerelease may be
+a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the
+order of precedence, prereleases come before their associated releases. In this
+example `1.2.3-beta.1 < 1.2.3`.
+
+According to the Semantic Version specification prereleases may not be
+API compliant with their release counterpart. It says,
+
+> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version.
+
+SemVer comparisons using constraints without a prerelease comparator will skip
+prerelease versions. For example, `>=1.2.3` will skip prereleases when looking
+at a list of releases while `>=1.2.3-0` will evaluate and find prereleases.
+
+The reason for the `0` as a pre-release version in the example comparison is
+because pre-releases can only contain ASCII alphanumerics and hyphens (along with
+`.` separators), per the spec. Sorting happens in ASCII sort order, again per the
+spec. The lowest character is a `0` in ASCII sort order
+(see an [ASCII Table](http://www.asciitable.com/))
+
+Understanding ASCII sort ordering is important because A-Z comes before a-z. That
+means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case
+sensitivity doesn't apply here. This is due to ASCII sort ordering which is what
+the spec specifies.
+
+### Hyphen Range Comparisons
+
+There are multiple methods to handle ranges and the first is hyphens ranges.
+These look like:
+
+* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5`
+* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5`
+
+### Wildcards In Comparisons
+
+The `x`, `X`, and `*` characters can be used as a wildcard character. This works
+for all comparison operators. When used on the `=` operator it falls
+back to the patch level comparison (see tilde below). For example,
+
+* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
+* `>= 1.2.x` is equivalent to `>= 1.2.0`
+* `<= 2.x` is equivalent to `< 3`
+* `*` is equivalent to `>= 0.0.0`
+
+### Tilde Range Comparisons (Patch)
+
+The tilde (`~`) comparison operator is for patch level ranges when a minor
+version is specified and major level changes when the minor number is missing.
+For example,
+
+* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0`
+* `~1` is equivalent to `>= 1, < 2`
+* `~2.3` is equivalent to `>= 2.3, < 2.4`
+* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
+* `~1.x` is equivalent to `>= 1, < 2`
+
+### Caret Range Comparisons (Major)
+
+The caret (`^`) comparison operator is for major level changes once a stable
+(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts
+as the API stability level. This is useful when comparisons of API versions as a
+major change is API breaking. For example,
+
+* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0`
+* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0`
+* `^2.3` is equivalent to `>= 2.3, < 3`
+* `^2.x` is equivalent to `>= 2.0.0, < 3`
+* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0`
+* `^0.2` is equivalent to `>=0.2.0 <0.3.0`
+* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4`
+* `^0.0` is equivalent to `>=0.0.0 <0.1.0`
+* `^0` is equivalent to `>=0.0.0 <1.0.0`
+
+## Validation
+
+In addition to testing a version against a constraint, a version can be validated
+against a constraint. When validation fails a slice of errors containing why a
+version didn't meet the constraint is returned. For example,
+
+```go
+c, err := semver.NewConstraint("<= 1.2.3, >= 1.4")
+if err != nil {
+ // Handle constraint not being parseable.
+}
+
+v, err := semver.NewVersion("1.3")
+if err != nil {
+ // Handle version not being parseable.
+}
+
+// Validate a version against a constraint.
+a, msgs := c.Validate(v)
+// a is false
+for _, m := range msgs {
+ fmt.Println(m)
+
+ // Loops over the errors which would read
+ // "1.3 is greater than 1.2.3"
+ // "1.3 is less than 1.4"
+}
+```
+
+## Contribute
+
+If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues)
+or [create a pull request](https://github.com/Masterminds/semver/pulls).
diff --git a/vendor/github.com/Masterminds/semver/v3/collection.go b/vendor/github.com/Masterminds/semver/v3/collection.go
new file mode 100644
index 000000000..a78235895
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/v3/collection.go
@@ -0,0 +1,24 @@
+package semver
+
+// Collection is a collection of Version instances and implements the sort
+// interface. See the sort package for more details.
+// https://golang.org/pkg/sort/
+type Collection []*Version
+
+// Len returns the length of a collection. The number of Version instances
+// on the slice.
+func (c Collection) Len() int {
+ return len(c)
+}
+
+// Less is needed for the sort interface to compare two Version objects on the
+// slice. If checks if one is less than the other.
+func (c Collection) Less(i, j int) bool {
+ return c[i].LessThan(c[j])
+}
+
+// Swap is needed for the sort interface to replace the Version objects
+// at two different positions in the slice.
+func (c Collection) Swap(i, j int) {
+ c[i], c[j] = c[j], c[i]
+}
diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go
new file mode 100644
index 000000000..203072e46
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/v3/constraints.go
@@ -0,0 +1,594 @@
+package semver
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+// Constraints is one or more constraint that a semantic version can be
+// checked against.
+type Constraints struct {
+ constraints [][]*constraint
+}
+
+// NewConstraint returns a Constraints instance that a Version instance can
+// be checked against. If there is a parse error it will be returned.
+func NewConstraint(c string) (*Constraints, error) {
+
+ // Rewrite - ranges into a comparison operation.
+ c = rewriteRange(c)
+
+ ors := strings.Split(c, "||")
+ or := make([][]*constraint, len(ors))
+ for k, v := range ors {
+
+ // TODO: Find a way to validate and fetch all the constraints in a simpler form
+
+ // Validate the segment
+ if !validConstraintRegex.MatchString(v) {
+ return nil, fmt.Errorf("improper constraint: %s", v)
+ }
+
+ cs := findConstraintRegex.FindAllString(v, -1)
+ if cs == nil {
+ cs = append(cs, v)
+ }
+ result := make([]*constraint, len(cs))
+ for i, s := range cs {
+ pc, err := parseConstraint(s)
+ if err != nil {
+ return nil, err
+ }
+
+ result[i] = pc
+ }
+ or[k] = result
+ }
+
+ o := &Constraints{constraints: or}
+ return o, nil
+}
+
+// Check tests if a version satisfies the constraints.
+func (cs Constraints) Check(v *Version) bool {
+ // TODO(mattfarina): For v4 of this library consolidate the Check and Validate
+ // functions as the underlying functions make that possible now.
+ // loop over the ORs and check the inner ANDs
+ for _, o := range cs.constraints {
+ joy := true
+ for _, c := range o {
+ if check, _ := c.check(v); !check {
+ joy = false
+ break
+ }
+ }
+
+ if joy {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Validate checks if a version satisfies a constraint. If not a slice of
+// reasons for the failure are returned in addition to a bool.
+func (cs Constraints) Validate(v *Version) (bool, []error) {
+ // loop over the ORs and check the inner ANDs
+ var e []error
+
+ // Capture the prerelease message only once. When it happens the first time
+ // this var is marked
+ var prerelesase bool
+ for _, o := range cs.constraints {
+ joy := true
+ for _, c := range o {
+ // Before running the check handle the case there the version is
+ // a prerelease and the check is not searching for prereleases.
+ if c.con.pre == "" && v.pre != "" {
+ if !prerelesase {
+ em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ e = append(e, em)
+ prerelesase = true
+ }
+ joy = false
+
+ } else {
+
+ if _, err := c.check(v); err != nil {
+ e = append(e, err)
+ joy = false
+ }
+ }
+ }
+
+ if joy {
+ return true, []error{}
+ }
+ }
+
+ return false, e
+}
+
+func (cs Constraints) String() string {
+ buf := make([]string, len(cs.constraints))
+ var tmp bytes.Buffer
+
+ for k, v := range cs.constraints {
+ tmp.Reset()
+ vlen := len(v)
+ for kk, c := range v {
+ tmp.WriteString(c.string())
+
+ // Space separate the AND conditions
+ if vlen > 1 && kk < vlen-1 {
+ tmp.WriteString(" ")
+ }
+ }
+ buf[k] = tmp.String()
+ }
+
+ return strings.Join(buf, " || ")
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+func (cs *Constraints) UnmarshalText(text []byte) error {
+ temp, err := NewConstraint(string(text))
+ if err != nil {
+ return err
+ }
+
+ *cs = *temp
+
+ return nil
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+func (cs Constraints) MarshalText() ([]byte, error) {
+ return []byte(cs.String()), nil
+}
+
+var constraintOps map[string]cfunc
+var constraintRegex *regexp.Regexp
+var constraintRangeRegex *regexp.Regexp
+
+// Used to find individual constraints within a multi-constraint string
+var findConstraintRegex *regexp.Regexp
+
+// Used to validate an segment of ANDs is valid
+var validConstraintRegex *regexp.Regexp
+
+const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` +
+ `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
+ `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
+
+func init() {
+ constraintOps = map[string]cfunc{
+ "": constraintTildeOrEqual,
+ "=": constraintTildeOrEqual,
+ "!=": constraintNotEqual,
+ ">": constraintGreaterThan,
+ "<": constraintLessThan,
+ ">=": constraintGreaterThanEqual,
+ "=>": constraintGreaterThanEqual,
+ "<=": constraintLessThanEqual,
+ "=<": constraintLessThanEqual,
+ "~": constraintTilde,
+ "~>": constraintTilde,
+ "^": constraintCaret,
+ }
+
+ ops := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^`
+
+ constraintRegex = regexp.MustCompile(fmt.Sprintf(
+ `^\s*(%s)\s*(%s)\s*$`,
+ ops,
+ cvRegex))
+
+ constraintRangeRegex = regexp.MustCompile(fmt.Sprintf(
+ `\s*(%s)\s+-\s+(%s)\s*`,
+ cvRegex, cvRegex))
+
+ findConstraintRegex = regexp.MustCompile(fmt.Sprintf(
+ `(%s)\s*(%s)`,
+ ops,
+ cvRegex))
+
+ // The first time a constraint shows up will look slightly different from
+ // future times it shows up due to a leading space or comma in a given
+ // string.
+ validConstraintRegex = regexp.MustCompile(fmt.Sprintf(
+ `^(\s*(%s)\s*(%s)\s*)((?:\s+|,\s*)(%s)\s*(%s)\s*)*$`,
+ ops,
+ cvRegex,
+ ops,
+ cvRegex))
+}
+
+// An individual constraint
+type constraint struct {
+ // The version used in the constraint check. For example, if a constraint
+ // is '<= 2.0.0' the con a version instance representing 2.0.0.
+ con *Version
+
+ // The original parsed version (e.g., 4.x from != 4.x)
+ orig string
+
+ // The original operator for the constraint
+ origfunc string
+
+ // When an x is used as part of the version (e.g., 1.x)
+ minorDirty bool
+ dirty bool
+ patchDirty bool
+}
+
+// Check if a version meets the constraint
+func (c *constraint) check(v *Version) (bool, error) {
+ return constraintOps[c.origfunc](v, c)
+}
+
+// String prints an individual constraint into a string
+func (c *constraint) string() string {
+ return c.origfunc + c.orig
+}
+
+type cfunc func(v *Version, c *constraint) (bool, error)
+
+func parseConstraint(c string) (*constraint, error) {
+ if len(c) > 0 {
+ m := constraintRegex.FindStringSubmatch(c)
+ if m == nil {
+ return nil, fmt.Errorf("improper constraint: %s", c)
+ }
+
+ cs := &constraint{
+ orig: m[2],
+ origfunc: m[1],
+ }
+
+ ver := m[2]
+ minorDirty := false
+ patchDirty := false
+ dirty := false
+ if isX(m[3]) || m[3] == "" {
+ ver = fmt.Sprintf("0.0.0%s", m[6])
+ dirty = true
+ } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" {
+ minorDirty = true
+ dirty = true
+ ver = fmt.Sprintf("%s.0.0%s", m[3], m[6])
+ } else if isX(strings.TrimPrefix(m[5], ".")) || m[5] == "" {
+ dirty = true
+ patchDirty = true
+ ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6])
+ }
+
+ con, err := NewVersion(ver)
+ if err != nil {
+
+ // The constraintRegex should catch any regex parsing errors. So,
+ // we should never get here.
+ return nil, errors.New("constraint Parser Error")
+ }
+
+ cs.con = con
+ cs.minorDirty = minorDirty
+ cs.patchDirty = patchDirty
+ cs.dirty = dirty
+
+ return cs, nil
+ }
+
+ // The rest is the special case where an empty string was passed in which
+ // is equivalent to * or >=0.0.0
+ con, err := StrictNewVersion("0.0.0")
+ if err != nil {
+
+ // The constraintRegex should catch any regex parsing errors. So,
+ // we should never get here.
+ return nil, errors.New("constraint Parser Error")
+ }
+
+ cs := &constraint{
+ con: con,
+ orig: c,
+ origfunc: "",
+ minorDirty: false,
+ patchDirty: false,
+ dirty: true,
+ }
+ return cs, nil
+}
+
+// Constraint functions
+func constraintNotEqual(v *Version, c *constraint) (bool, error) {
+ if c.dirty {
+
+ // If there is a pre-release on the version but the constraint isn't looking
+ // for them assume that pre-releases are not compatible. See issue 21 for
+ // more details.
+ if v.Prerelease() != "" && c.con.Prerelease() == "" {
+ return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ }
+
+ if c.con.Major() != v.Major() {
+ return true, nil
+ }
+ if c.con.Minor() != v.Minor() && !c.minorDirty {
+ return true, nil
+ } else if c.minorDirty {
+ return false, fmt.Errorf("%s is equal to %s", v, c.orig)
+ } else if c.con.Patch() != v.Patch() && !c.patchDirty {
+ return true, nil
+ } else if c.patchDirty {
+ // Need to handle prereleases if present
+ if v.Prerelease() != "" || c.con.Prerelease() != "" {
+ eq := comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s is equal to %s", v, c.orig)
+ }
+ return false, fmt.Errorf("%s is equal to %s", v, c.orig)
+ }
+ }
+
+ eq := v.Equal(c.con)
+ if eq {
+ return false, fmt.Errorf("%s is equal to %s", v, c.orig)
+ }
+
+ return true, nil
+}
+
+func constraintGreaterThan(v *Version, c *constraint) (bool, error) {
+
+ // If there is a pre-release on the version but the constraint isn't looking
+ // for them assume that pre-releases are not compatible. See issue 21 for
+ // more details.
+ if v.Prerelease() != "" && c.con.Prerelease() == "" {
+ return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ }
+
+ var eq bool
+
+ if !c.dirty {
+ eq = v.Compare(c.con) == 1
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
+ }
+
+ if v.Major() > c.con.Major() {
+ return true, nil
+ } else if v.Major() < c.con.Major() {
+ return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
+ } else if c.minorDirty {
+ // This is a range case such as >11. When the version is something like
+ // 11.1.0 is it not > 11. For that we would need 12 or higher
+ return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
+ } else if c.patchDirty {
+ // This is for ranges such as >11.1. A version of 11.1.1 is not greater
+ // which one of 11.2.1 is greater
+ eq = v.Minor() > c.con.Minor()
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
+ }
+
+ // If we have gotten here we are not comparing pre-preleases and can use the
+ // Compare function to accomplish that.
+ eq = v.Compare(c.con) == 1
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig)
+}
+
+func constraintLessThan(v *Version, c *constraint) (bool, error) {
+ // If there is a pre-release on the version but the constraint isn't looking
+ // for them assume that pre-releases are not compatible. See issue 21 for
+ // more details.
+ if v.Prerelease() != "" && c.con.Prerelease() == "" {
+ return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ }
+
+ eq := v.Compare(c.con) < 0
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig)
+}
+
+func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) {
+
+ // If there is a pre-release on the version but the constraint isn't looking
+ // for them assume that pre-releases are not compatible. See issue 21 for
+ // more details.
+ if v.Prerelease() != "" && c.con.Prerelease() == "" {
+ return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ }
+
+ eq := v.Compare(c.con) >= 0
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s is less than %s", v, c.orig)
+}
+
+func constraintLessThanEqual(v *Version, c *constraint) (bool, error) {
+ // If there is a pre-release on the version but the constraint isn't looking
+ // for them assume that pre-releases are not compatible. See issue 21 for
+ // more details.
+ if v.Prerelease() != "" && c.con.Prerelease() == "" {
+ return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ }
+
+ var eq bool
+
+ if !c.dirty {
+ eq = v.Compare(c.con) <= 0
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s is greater than %s", v, c.orig)
+ }
+
+ if v.Major() > c.con.Major() {
+ return false, fmt.Errorf("%s is greater than %s", v, c.orig)
+ } else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty {
+ return false, fmt.Errorf("%s is greater than %s", v, c.orig)
+ }
+
+ return true, nil
+}
+
+// ~*, ~>* --> >= 0.0.0 (any)
+// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0
+// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0
+// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0
+// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0
+// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0
+func constraintTilde(v *Version, c *constraint) (bool, error) {
+ // If there is a pre-release on the version but the constraint isn't looking
+ // for them assume that pre-releases are not compatible. See issue 21 for
+ // more details.
+ if v.Prerelease() != "" && c.con.Prerelease() == "" {
+ return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ }
+
+ if v.LessThan(c.con) {
+ return false, fmt.Errorf("%s is less than %s", v, c.orig)
+ }
+
+ // ~0.0.0 is a special case where all constraints are accepted. It's
+ // equivalent to >= 0.0.0.
+ if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 &&
+ !c.minorDirty && !c.patchDirty {
+ return true, nil
+ }
+
+ if v.Major() != c.con.Major() {
+ return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig)
+ }
+
+ if v.Minor() != c.con.Minor() && !c.minorDirty {
+ return false, fmt.Errorf("%s does not have same major and minor version as %s", v, c.orig)
+ }
+
+ return true, nil
+}
+
+// When there is a .x (dirty) status it automatically opts in to ~. Otherwise
+// it's a straight =
+func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) {
+ // If there is a pre-release on the version but the constraint isn't looking
+ // for them assume that pre-releases are not compatible. See issue 21 for
+ // more details.
+ if v.Prerelease() != "" && c.con.Prerelease() == "" {
+ return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ }
+
+ if c.dirty {
+ return constraintTilde(v, c)
+ }
+
+ eq := v.Equal(c.con)
+ if eq {
+ return true, nil
+ }
+
+ return false, fmt.Errorf("%s is not equal to %s", v, c.orig)
+}
+
+// ^* --> (any)
+// ^1.2.3 --> >=1.2.3 <2.0.0
+// ^1.2 --> >=1.2.0 <2.0.0
+// ^1 --> >=1.0.0 <2.0.0
+// ^0.2.3 --> >=0.2.3 <0.3.0
+// ^0.2 --> >=0.2.0 <0.3.0
+// ^0.0.3 --> >=0.0.3 <0.0.4
+// ^0.0 --> >=0.0.0 <0.1.0
+// ^0 --> >=0.0.0 <1.0.0
+func constraintCaret(v *Version, c *constraint) (bool, error) {
+ // If there is a pre-release on the version but the constraint isn't looking
+ // for them assume that pre-releases are not compatible. See issue 21 for
+ // more details.
+ if v.Prerelease() != "" && c.con.Prerelease() == "" {
+ return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
+ }
+
+ // This less than handles prereleases
+ if v.LessThan(c.con) {
+ return false, fmt.Errorf("%s is less than %s", v, c.orig)
+ }
+
+ var eq bool
+
+ // ^ when the major > 0 is >=x.y.z < x+1
+ if c.con.Major() > 0 || c.minorDirty {
+
+ // ^ has to be within a major range for > 0. Everything less than was
+ // filtered out with the LessThan call above. This filters out those
+ // that greater but not within the same major range.
+ eq = v.Major() == c.con.Major()
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig)
+ }
+
+ // ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1
+ if c.con.Major() == 0 && v.Major() > 0 {
+ return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig)
+ }
+ // If the con Minor is > 0 it is not dirty
+ if c.con.Minor() > 0 || c.patchDirty {
+ eq = v.Minor() == c.con.Minor()
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0", v, c.orig)
+ }
+ // ^ when the minor is 0 and minor > 0 is =0.0.z
+ if c.con.Minor() == 0 && v.Minor() > 0 {
+ return false, fmt.Errorf("%s does not have same minor version as %s", v, c.orig)
+ }
+
+ // At this point the major is 0 and the minor is 0 and not dirty. The patch
+ // is not dirty so we need to check if they are equal. If they are not equal
+ eq = c.con.Patch() == v.Patch()
+ if eq {
+ return true, nil
+ }
+ return false, fmt.Errorf("%s does not equal %s. Expect version and constraint to equal when major and minor versions are 0", v, c.orig)
+}
+
+func isX(x string) bool {
+ switch x {
+ case "x", "*", "X":
+ return true
+ default:
+ return false
+ }
+}
+
+func rewriteRange(i string) string {
+ m := constraintRangeRegex.FindAllStringSubmatch(i, -1)
+ if m == nil {
+ return i
+ }
+ o := i
+ for _, v := range m {
+ t := fmt.Sprintf(">= %s, <= %s", v[1], v[11])
+ o = strings.Replace(o, v[0], t, 1)
+ }
+
+ return o
+}
diff --git a/vendor/github.com/Masterminds/semver/v3/doc.go b/vendor/github.com/Masterminds/semver/v3/doc.go
new file mode 100644
index 000000000..74f97caa5
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/v3/doc.go
@@ -0,0 +1,184 @@
+/*
+Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go.
+
+Specifically it provides the ability to:
+
+ - Parse semantic versions
+ - Sort semantic versions
+ - Check if a semantic version fits within a set of constraints
+ - Optionally work with a `v` prefix
+
+# Parsing Semantic Versions
+
+There are two functions that can parse semantic versions. The `StrictNewVersion`
+function only parses valid version 2 semantic versions as outlined in the
+specification. The `NewVersion` function attempts to coerce a version into a
+semantic version and parse it. For example, if there is a leading v or a version
+listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid
+semantic version (e.g., 1.2.0). In both cases a `Version` object is returned
+that can be sorted, compared, and used in constraints.
+
+When parsing a version an optional error can be returned if there is an issue
+parsing the version. For example,
+
+ v, err := semver.NewVersion("1.2.3-beta.1+b345")
+
+The version object has methods to get the parts of the version, compare it to
+other versions, convert the version back into a string, and get the original
+string. For more details please see the documentation
+at https://godoc.org/github.com/Masterminds/semver.
+
+# Sorting Semantic Versions
+
+A set of versions can be sorted using the `sort` package from the standard library.
+For example,
+
+ raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",}
+ vs := make([]*semver.Version, len(raw))
+ for i, r := range raw {
+ v, err := semver.NewVersion(r)
+ if err != nil {
+ t.Errorf("Error parsing version: %s", err)
+ }
+
+ vs[i] = v
+ }
+
+ sort.Sort(semver.Collection(vs))
+
+# Checking Version Constraints and Comparing Versions
+
+There are two methods for comparing versions. One uses comparison methods on
+`Version` instances and the other is using Constraints. There are some important
+differences to notes between these two methods of comparison.
+
+ 1. When two versions are compared using functions such as `Compare`, `LessThan`,
+ and others it will follow the specification and always include prereleases
+ within the comparison. It will provide an answer valid with the comparison
+ spec section at https://semver.org/#spec-item-11
+ 2. When constraint checking is used for checks or validation it will follow a
+ different set of rules that are common for ranges with tools like npm/js
+ and Rust/Cargo. This includes considering prereleases to be invalid if the
+ ranges does not include on. If you want to have it include pre-releases a
+ simple solution is to include `-0` in your range.
+ 3. Constraint ranges can have some complex rules including the shorthard use of
+ ~ and ^. For more details on those see the options below.
+
+There are differences between the two methods or checking versions because the
+comparison methods on `Version` follow the specification while comparison ranges
+are not part of the specification. Different packages and tools have taken it
+upon themselves to come up with range rules. This has resulted in differences.
+For example, npm/js and Cargo/Rust follow similar patterns which PHP has a
+different pattern for ^. The comparison features in this package follow the
+npm/js and Cargo/Rust lead because applications using it have followed similar
+patters with their versions.
+
+Checking a version against version constraints is one of the most featureful
+parts of the package.
+
+ c, err := semver.NewConstraint(">= 1.2.3")
+ if err != nil {
+ // Handle constraint not being parsable.
+ }
+
+ v, err := semver.NewVersion("1.3")
+ if err != nil {
+ // Handle version not being parsable.
+ }
+ // Check if the version meets the constraints. The a variable will be true.
+ a := c.Check(v)
+
+# Basic Comparisons
+
+There are two elements to the comparisons. First, a comparison string is a list
+of comma or space separated AND comparisons. These are then separated by || (OR)
+comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a
+comparison that's greater than or equal to 1.2 and less than 3.0.0 or is
+greater than or equal to 4.2.3. This can also be written as
+`">= 1.2, < 3.0.0 || >= 4.2.3"`
+
+The basic comparisons are:
+
+ - `=`: equal (aliased to no operator)
+ - `!=`: not equal
+ - `>`: greater than
+ - `<`: less than
+ - `>=`: greater than or equal to
+ - `<=`: less than or equal to
+
+# Hyphen Range Comparisons
+
+There are multiple methods to handle ranges and the first is hyphens ranges.
+These look like:
+
+ - `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5`
+ - `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5`
+
+# Wildcards In Comparisons
+
+The `x`, `X`, and `*` characters can be used as a wildcard character. This works
+for all comparison operators. When used on the `=` operator it falls
+back to the tilde operation. For example,
+
+ - `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0`
+ - `>= 1.2.x` is equivalent to `>= 1.2.0`
+ - `<= 2.x` is equivalent to `<= 3`
+ - `*` is equivalent to `>= 0.0.0`
+
+Tilde Range Comparisons (Patch)
+
+The tilde (`~`) comparison operator is for patch level ranges when a minor
+version is specified and major level changes when the minor number is missing.
+For example,
+
+ - `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0`
+ - `~1` is equivalent to `>= 1, < 2`
+ - `~2.3` is equivalent to `>= 2.3 < 2.4`
+ - `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0`
+ - `~1.x` is equivalent to `>= 1 < 2`
+
+Caret Range Comparisons (Major)
+
+The caret (`^`) comparison operator is for major level changes once a stable
+(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts
+as the API stability level. This is useful when comparisons of API versions as a
+major change is API breaking. For example,
+
+ - `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0`
+ - `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0`
+ - `^2.3` is equivalent to `>= 2.3, < 3`
+ - `^2.x` is equivalent to `>= 2.0.0, < 3`
+ - `^0.2.3` is equivalent to `>=0.2.3 <0.3.0`
+ - `^0.2` is equivalent to `>=0.2.0 <0.3.0`
+ - `^0.0.3` is equivalent to `>=0.0.3 <0.0.4`
+ - `^0.0` is equivalent to `>=0.0.0 <0.1.0`
+ - `^0` is equivalent to `>=0.0.0 <1.0.0`
+
+# Validation
+
+In addition to testing a version against a constraint, a version can be validated
+against a constraint. When validation fails a slice of errors containing why a
+version didn't meet the constraint is returned. For example,
+
+ c, err := semver.NewConstraint("<= 1.2.3, >= 1.4")
+ if err != nil {
+ // Handle constraint not being parseable.
+ }
+
+ v, _ := semver.NewVersion("1.3")
+ if err != nil {
+ // Handle version not being parseable.
+ }
+
+ // Validate a version against a constraint.
+ a, msgs := c.Validate(v)
+ // a is false
+ for _, m := range msgs {
+ fmt.Println(m)
+
+ // Loops over the errors which would read
+ // "1.3 is greater than 1.2.3"
+ // "1.3 is less than 1.4"
+ }
+*/
+package semver
diff --git a/vendor/github.com/Masterminds/semver/v3/fuzz.go b/vendor/github.com/Masterminds/semver/v3/fuzz.go
new file mode 100644
index 000000000..a242ad705
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/v3/fuzz.go
@@ -0,0 +1,22 @@
+// +build gofuzz
+
+package semver
+
+func Fuzz(data []byte) int {
+ d := string(data)
+
+ // Test NewVersion
+ _, _ = NewVersion(d)
+
+ // Test StrictNewVersion
+ _, _ = StrictNewVersion(d)
+
+ // Test NewConstraint
+ _, _ = NewConstraint(d)
+
+ // The return value should be 0 normally, 1 if the priority in future tests
+ // should be increased, and -1 if future tests should skip passing in that
+ // data. We do not have a reason to change priority so 0 is always returned.
+ // There are example tests that do this.
+ return 0
+}
diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go
new file mode 100644
index 000000000..7c4bed334
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/v3/version.go
@@ -0,0 +1,639 @@
+package semver
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// The compiled version of the regex created at init() is cached here so it
+// only needs to be created once.
+var versionRegex *regexp.Regexp
+
+var (
+ // ErrInvalidSemVer is returned a version is found to be invalid when
+ // being parsed.
+ ErrInvalidSemVer = errors.New("Invalid Semantic Version")
+
+ // ErrEmptyString is returned when an empty string is passed in for parsing.
+ ErrEmptyString = errors.New("Version string empty")
+
+ // ErrInvalidCharacters is returned when invalid characters are found as
+ // part of a version
+ ErrInvalidCharacters = errors.New("Invalid characters in version")
+
+ // ErrSegmentStartsZero is returned when a version segment starts with 0.
+ // This is invalid in SemVer.
+ ErrSegmentStartsZero = errors.New("Version segment starts with 0")
+
+ // ErrInvalidMetadata is returned when the metadata is an invalid format
+ ErrInvalidMetadata = errors.New("Invalid Metadata string")
+
+ // ErrInvalidPrerelease is returned when the pre-release is an invalid format
+ ErrInvalidPrerelease = errors.New("Invalid Prerelease string")
+)
+
+// semVerRegex is the regular expression used to parse a semantic version.
+const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` +
+ `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
+ `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
+
+// Version represents a single semantic version.
+type Version struct {
+ major, minor, patch uint64
+ pre string
+ metadata string
+ original string
+}
+
+func init() {
+ versionRegex = regexp.MustCompile("^" + semVerRegex + "$")
+}
+
+const (
+ num string = "0123456789"
+ allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num
+)
+
+// StrictNewVersion parses a given version and returns an instance of Version or
+// an error if unable to parse the version. Only parses valid semantic versions.
+// Performs checking that can find errors within the version.
+// If you want to coerce a version such as 1 or 1.2 and parse it as the 1.x
+// releases of semver did, use the NewVersion() function.
+func StrictNewVersion(v string) (*Version, error) {
+ // Parsing here does not use RegEx in order to increase performance and reduce
+ // allocations.
+
+ if len(v) == 0 {
+ return nil, ErrEmptyString
+ }
+
+ // Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build
+ parts := strings.SplitN(v, ".", 3)
+ if len(parts) != 3 {
+ return nil, ErrInvalidSemVer
+ }
+
+ sv := &Version{
+ original: v,
+ }
+
+ // check for prerelease or build metadata
+ var extra []string
+ if strings.ContainsAny(parts[2], "-+") {
+ // Start with the build metadata first as it needs to be on the right
+ extra = strings.SplitN(parts[2], "+", 2)
+ if len(extra) > 1 {
+ // build metadata found
+ sv.metadata = extra[1]
+ parts[2] = extra[0]
+ }
+
+ extra = strings.SplitN(parts[2], "-", 2)
+ if len(extra) > 1 {
+ // prerelease found
+ sv.pre = extra[1]
+ parts[2] = extra[0]
+ }
+ }
+
+ // Validate the number segments are valid. This includes only having positive
+ // numbers and no leading 0's.
+ for _, p := range parts {
+ if !containsOnly(p, num) {
+ return nil, ErrInvalidCharacters
+ }
+
+ if len(p) > 1 && p[0] == '0' {
+ return nil, ErrSegmentStartsZero
+ }
+ }
+
+ // Extract the major, minor, and patch elements onto the returned Version
+ var err error
+ sv.major, err = strconv.ParseUint(parts[0], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ sv.minor, err = strconv.ParseUint(parts[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ sv.patch, err = strconv.ParseUint(parts[2], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ // No prerelease or build metadata found so returning now as a fastpath.
+ if sv.pre == "" && sv.metadata == "" {
+ return sv, nil
+ }
+
+ if sv.pre != "" {
+ if err = validatePrerelease(sv.pre); err != nil {
+ return nil, err
+ }
+ }
+
+ if sv.metadata != "" {
+ if err = validateMetadata(sv.metadata); err != nil {
+ return nil, err
+ }
+ }
+
+ return sv, nil
+}
+
+// NewVersion parses a given version and returns an instance of Version or
+// an error if unable to parse the version. If the version is SemVer-ish it
+// attempts to convert it to SemVer. If you want to validate it was a strict
+// semantic version at parse time see StrictNewVersion().
+func NewVersion(v string) (*Version, error) {
+ m := versionRegex.FindStringSubmatch(v)
+ if m == nil {
+ return nil, ErrInvalidSemVer
+ }
+
+ sv := &Version{
+ metadata: m[8],
+ pre: m[5],
+ original: v,
+ }
+
+ var err error
+ sv.major, err = strconv.ParseUint(m[1], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("Error parsing version segment: %s", err)
+ }
+
+ if m[2] != "" {
+ sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("Error parsing version segment: %s", err)
+ }
+ } else {
+ sv.minor = 0
+ }
+
+ if m[3] != "" {
+ sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("Error parsing version segment: %s", err)
+ }
+ } else {
+ sv.patch = 0
+ }
+
+ // Perform some basic due diligence on the extra parts to ensure they are
+ // valid.
+
+ if sv.pre != "" {
+ if err = validatePrerelease(sv.pre); err != nil {
+ return nil, err
+ }
+ }
+
+ if sv.metadata != "" {
+ if err = validateMetadata(sv.metadata); err != nil {
+ return nil, err
+ }
+ }
+
+ return sv, nil
+}
+
+// New creates a new instance of Version with each of the parts passed in as
+// arguments instead of parsing a version string.
+func New(major, minor, patch uint64, pre, metadata string) *Version {
+ v := Version{
+ major: major,
+ minor: minor,
+ patch: patch,
+ pre: pre,
+ metadata: metadata,
+ original: "",
+ }
+
+ v.original = v.String()
+
+ return &v
+}
+
+// MustParse parses a given version and panics on error.
+func MustParse(v string) *Version {
+ sv, err := NewVersion(v)
+ if err != nil {
+ panic(err)
+ }
+ return sv
+}
+
+// String converts a Version object to a string.
+// Note, if the original version contained a leading v this version will not.
+// See the Original() method to retrieve the original value. Semantic Versions
+// don't contain a leading v per the spec. Instead it's optional on
+// implementation.
+func (v Version) String() string {
+ var buf bytes.Buffer
+
+ fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch)
+ if v.pre != "" {
+ fmt.Fprintf(&buf, "-%s", v.pre)
+ }
+ if v.metadata != "" {
+ fmt.Fprintf(&buf, "+%s", v.metadata)
+ }
+
+ return buf.String()
+}
+
+// Original returns the original value passed in to be parsed.
+func (v *Version) Original() string {
+ return v.original
+}
+
+// Major returns the major version.
+func (v Version) Major() uint64 {
+ return v.major
+}
+
+// Minor returns the minor version.
+func (v Version) Minor() uint64 {
+ return v.minor
+}
+
+// Patch returns the patch version.
+func (v Version) Patch() uint64 {
+ return v.patch
+}
+
+// Prerelease returns the pre-release version.
+func (v Version) Prerelease() string {
+ return v.pre
+}
+
+// Metadata returns the metadata on the version.
+func (v Version) Metadata() string {
+ return v.metadata
+}
+
+// originalVPrefix returns the original 'v' prefix if any.
+func (v Version) originalVPrefix() string {
+ // Note, only lowercase v is supported as a prefix by the parser.
+ if v.original != "" && v.original[:1] == "v" {
+ return v.original[:1]
+ }
+ return ""
+}
+
+// IncPatch produces the next patch version.
+// If the current version does not have prerelease/metadata information,
+// it unsets metadata and prerelease values, increments patch number.
+// If the current version has any of prerelease or metadata information,
+// it unsets both values and keeps current patch value
+func (v Version) IncPatch() Version {
+ vNext := v
+ // according to http://semver.org/#spec-item-9
+ // Pre-release versions have a lower precedence than the associated normal version.
+ // according to http://semver.org/#spec-item-10
+ // Build metadata SHOULD be ignored when determining version precedence.
+ if v.pre != "" {
+ vNext.metadata = ""
+ vNext.pre = ""
+ } else {
+ vNext.metadata = ""
+ vNext.pre = ""
+ vNext.patch = v.patch + 1
+ }
+ vNext.original = v.originalVPrefix() + "" + vNext.String()
+ return vNext
+}
+
+// IncMinor produces the next minor version.
+// Sets patch to 0.
+// Increments minor number.
+// Unsets metadata.
+// Unsets prerelease status.
+func (v Version) IncMinor() Version {
+ vNext := v
+ vNext.metadata = ""
+ vNext.pre = ""
+ vNext.patch = 0
+ vNext.minor = v.minor + 1
+ vNext.original = v.originalVPrefix() + "" + vNext.String()
+ return vNext
+}
+
+// IncMajor produces the next major version.
+// Sets patch to 0.
+// Sets minor to 0.
+// Increments major number.
+// Unsets metadata.
+// Unsets prerelease status.
+func (v Version) IncMajor() Version {
+ vNext := v
+ vNext.metadata = ""
+ vNext.pre = ""
+ vNext.patch = 0
+ vNext.minor = 0
+ vNext.major = v.major + 1
+ vNext.original = v.originalVPrefix() + "" + vNext.String()
+ return vNext
+}
+
+// SetPrerelease defines the prerelease value.
+// Value must not include the required 'hyphen' prefix.
+func (v Version) SetPrerelease(prerelease string) (Version, error) {
+ vNext := v
+ if len(prerelease) > 0 {
+ if err := validatePrerelease(prerelease); err != nil {
+ return vNext, err
+ }
+ }
+ vNext.pre = prerelease
+ vNext.original = v.originalVPrefix() + "" + vNext.String()
+ return vNext, nil
+}
+
+// SetMetadata defines metadata value.
+// Value must not include the required 'plus' prefix.
+func (v Version) SetMetadata(metadata string) (Version, error) {
+ vNext := v
+ if len(metadata) > 0 {
+ if err := validateMetadata(metadata); err != nil {
+ return vNext, err
+ }
+ }
+ vNext.metadata = metadata
+ vNext.original = v.originalVPrefix() + "" + vNext.String()
+ return vNext, nil
+}
+
+// LessThan tests if one version is less than another one.
+func (v *Version) LessThan(o *Version) bool {
+ return v.Compare(o) < 0
+}
+
+// GreaterThan tests if one version is greater than another one.
+func (v *Version) GreaterThan(o *Version) bool {
+ return v.Compare(o) > 0
+}
+
+// Equal tests if two versions are equal to each other.
+// Note, versions can be equal with different metadata since metadata
+// is not considered part of the comparable version.
+func (v *Version) Equal(o *Version) bool {
+ return v.Compare(o) == 0
+}
+
+// Compare compares this version to another one. It returns -1, 0, or 1 if
+// the version smaller, equal, or larger than the other version.
+//
+// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is
+// lower than the version without a prerelease. Compare always takes into account
+// prereleases. If you want to work with ranges using typical range syntaxes that
+// skip prereleases if the range is not looking for them use constraints.
+func (v *Version) Compare(o *Version) int {
+ // Compare the major, minor, and patch version for differences. If a
+ // difference is found return the comparison.
+ if d := compareSegment(v.Major(), o.Major()); d != 0 {
+ return d
+ }
+ if d := compareSegment(v.Minor(), o.Minor()); d != 0 {
+ return d
+ }
+ if d := compareSegment(v.Patch(), o.Patch()); d != 0 {
+ return d
+ }
+
+ // At this point the major, minor, and patch versions are the same.
+ ps := v.pre
+ po := o.Prerelease()
+
+ if ps == "" && po == "" {
+ return 0
+ }
+ if ps == "" {
+ return 1
+ }
+ if po == "" {
+ return -1
+ }
+
+ return comparePrerelease(ps, po)
+}
+
+// UnmarshalJSON implements JSON.Unmarshaler interface.
+func (v *Version) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ temp, err := NewVersion(s)
+ if err != nil {
+ return err
+ }
+ v.major = temp.major
+ v.minor = temp.minor
+ v.patch = temp.patch
+ v.pre = temp.pre
+ v.metadata = temp.metadata
+ v.original = temp.original
+ return nil
+}
+
+// MarshalJSON implements JSON.Marshaler interface.
+func (v Version) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+func (v *Version) UnmarshalText(text []byte) error {
+ temp, err := NewVersion(string(text))
+ if err != nil {
+ return err
+ }
+
+ *v = *temp
+
+ return nil
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+func (v Version) MarshalText() ([]byte, error) {
+ return []byte(v.String()), nil
+}
+
+// Scan implements the SQL.Scanner interface.
+func (v *Version) Scan(value interface{}) error {
+ var s string
+ s, _ = value.(string)
+ temp, err := NewVersion(s)
+ if err != nil {
+ return err
+ }
+ v.major = temp.major
+ v.minor = temp.minor
+ v.patch = temp.patch
+ v.pre = temp.pre
+ v.metadata = temp.metadata
+ v.original = temp.original
+ return nil
+}
+
+// Value implements the Driver.Valuer interface.
+func (v Version) Value() (driver.Value, error) {
+ return v.String(), nil
+}
+
+func compareSegment(v, o uint64) int {
+ if v < o {
+ return -1
+ }
+ if v > o {
+ return 1
+ }
+
+ return 0
+}
+
+func comparePrerelease(v, o string) int {
+ // split the prelease versions by their part. The separator, per the spec,
+ // is a .
+ sparts := strings.Split(v, ".")
+ oparts := strings.Split(o, ".")
+
+ // Find the longer length of the parts to know how many loop iterations to
+ // go through.
+ slen := len(sparts)
+ olen := len(oparts)
+
+ l := slen
+ if olen > slen {
+ l = olen
+ }
+
+ // Iterate over each part of the prereleases to compare the differences.
+ for i := 0; i < l; i++ {
+ // Since the lentgh of the parts can be different we need to create
+ // a placeholder. This is to avoid out of bounds issues.
+ stemp := ""
+ if i < slen {
+ stemp = sparts[i]
+ }
+
+ otemp := ""
+ if i < olen {
+ otemp = oparts[i]
+ }
+
+ d := comparePrePart(stemp, otemp)
+ if d != 0 {
+ return d
+ }
+ }
+
+ // Reaching here means two versions are of equal value but have different
+ // metadata (the part following a +). They are not identical in string form
+ // but the version comparison finds them to be equal.
+ return 0
+}
+
+func comparePrePart(s, o string) int {
+ // Fastpath if they are equal
+ if s == o {
+ return 0
+ }
+
+ // When s or o are empty we can use the other in an attempt to determine
+ // the response.
+ if s == "" {
+ if o != "" {
+ return -1
+ }
+ return 1
+ }
+
+ if o == "" {
+ if s != "" {
+ return 1
+ }
+ return -1
+ }
+
+ // When comparing strings "99" is greater than "103". To handle
+ // cases like this we need to detect numbers and compare them. According
+ // to the semver spec, numbers are always positive. If there is a - at the
+ // start like -99 this is to be evaluated as an alphanum. numbers always
+ // have precedence over alphanum. Parsing as Uints because negative numbers
+ // are ignored.
+
+ oi, n1 := strconv.ParseUint(o, 10, 64)
+ si, n2 := strconv.ParseUint(s, 10, 64)
+
+ // The case where both are strings compare the strings
+ if n1 != nil && n2 != nil {
+ if s > o {
+ return 1
+ }
+ return -1
+ } else if n1 != nil {
+ // o is a string and s is a number
+ return -1
+ } else if n2 != nil {
+ // s is a string and o is a number
+ return 1
+ }
+ // Both are numbers
+ if si > oi {
+ return 1
+ }
+ return -1
+}
+
+// Like strings.ContainsAny but does an only instead of any.
+func containsOnly(s string, comp string) bool {
+ return strings.IndexFunc(s, func(r rune) bool {
+ return !strings.ContainsRune(comp, r)
+ }) == -1
+}
+
+// From the spec, "Identifiers MUST comprise only
+// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty.
+// Numeric identifiers MUST NOT include leading zeroes.". These segments can
+// be dot separated.
+func validatePrerelease(p string) error {
+ eparts := strings.Split(p, ".")
+ for _, p := range eparts {
+ if containsOnly(p, num) {
+ if len(p) > 1 && p[0] == '0' {
+ return ErrSegmentStartsZero
+ }
+ } else if !containsOnly(p, allowed) {
+ return ErrInvalidPrerelease
+ }
+ }
+
+ return nil
+}
+
+// From the spec, "Build metadata MAY be denoted by
+// appending a plus sign and a series of dot separated identifiers immediately
+// following the patch or pre-release version. Identifiers MUST comprise only
+// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty."
+func validateMetadata(m string) error {
+ eparts := strings.Split(m, ".")
+ for _, p := range eparts {
+ if !containsOnly(p, allowed) {
+ return ErrInvalidMetadata
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/Masterminds/sprig/v3/.gitignore b/vendor/github.com/Masterminds/sprig/v3/.gitignore
new file mode 100644
index 000000000..5e3002f88
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/v3/.gitignore
@@ -0,0 +1,2 @@
+vendor/
+/.glide
diff --git a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md
new file mode 100644
index 000000000..2ce45dd4e
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md
@@ -0,0 +1,383 @@
+# Changelog
+
+## Release 3.2.3 (2022-11-29)
+
+### Changed
+
+- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi)
+- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero)
+- #353: Updated masterminds/semver which included bug fixes
+- #354: Updated golang.org/x/crypto which included bug fixes
+
+## Release 3.2.2 (2021-02-04)
+
+This is a re-release of 3.2.1 to satisfy something with the Go module system.
+
+## Release 3.2.1 (2021-02-04)
+
+### Changed
+
+- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr)
+
+## Release 3.2.0 (2020-12-14)
+
+### Added
+
+- #211: Added randInt function (thanks @kochurovro)
+- #223: Added fromJson and mustFromJson functions (thanks @mholt)
+- #242: Added a bcrypt function (thanks @robbiet480)
+- #253: Added randBytes function (thanks @MikaelSmith)
+- #254: Added dig function for dicts (thanks @nyarly)
+- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton)
+- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl)
+- #268: Added and and all functions for testing conditions (thanks @phuslu)
+- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf
+ (thanks @andrewmostello)
+- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek)
+- #270: Extend certificate functions to handle non-RSA keys + add support for
+ ed25519 keys (thanks @misberner)
+
+### Changed
+
+- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer
+- Using semver 3.1.1 and mergo 0.3.11
+
+### Fixed
+
+- #249: Fix htmlDateInZone example (thanks @spawnia)
+
+NOTE: The dependency github.com/imdario/mergo reverted the breaking change in
+0.3.9 via 0.3.10 release.
+
+## Release 3.1.0 (2020-04-16)
+
+NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9
+that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8.
+
+### Added
+
+- #225: Added support for generating htpasswd hash (thanks @rustycl0ck)
+- #224: Added duration filter (thanks @frebib)
+- #205: Added `seq` function (thanks @thadc23)
+
+### Changed
+
+- #203: Unlambda functions with correct signature (thanks @muesli)
+- #236: Updated the license formatting for GitHub display purposes
+- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9
+ as it causes a breaking change for sprig. That issue is tracked at
+ https://github.com/imdario/mergo/issues/139
+
+### Fixed
+
+- #229: Fix `seq` example in docs (thanks @kalmant)
+
+## Release 3.0.2 (2019-12-13)
+
+### Fixed
+
+- #220: Updating to semver v3.0.3 to fix issue with <= ranges
+- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya)
+
+## Release 3.0.1 (2019-12-08)
+
+### Fixed
+
+- #212: Updated semver fixing broken constraint checking with ^0.0
+
+## Release 3.0.0 (2019-10-02)
+
+### Added
+
+- #187: Added durationRound function (thanks @yjp20)
+- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn)
+- #193: Added toRawJson support (thanks @Dean-Coakley)
+- #197: Added get support to dicts (thanks @Dean-Coakley)
+
+### Changed
+
+- #186: Moving dependency management to Go modules
+- #186: Updated semver to v3. This has changes in the way ^ is handled
+- #194: Updated documentation on merging and how it copies. Added example using deepCopy
+- #196: trunc now supports negative values (thanks @Dean-Coakley)
+
+## Release 2.22.0 (2019-10-02)
+
+### Added
+
+- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos)
+- #195: Added deepCopy function for use with dicts
+
+### Changed
+
+- Updated merge and mergeOverwrite documentation to explain copying and how to
+ use deepCopy with it
+
+## Release 2.21.0 (2019-09-18)
+
+### Added
+
+- #122: Added encryptAES/decryptAES functions (thanks @n0madic)
+- #128: Added toDecimal support (thanks @Dean-Coakley)
+- #169: Added list contcat (thanks @astorath)
+- #174: Added deepEqual function (thanks @bonifaido)
+- #170: Added url parse and join functions (thanks @astorath)
+
+### Changed
+
+- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify
+
+### Fixed
+
+- #172: Fix semver wildcard example (thanks @piepmatz)
+- #175: Fix dateInZone doc example (thanks @s3than)
+
+## Release 2.20.0 (2019-06-18)
+
+### Added
+
+- #164: Adding function to get unix epoch for a time (@mattfarina)
+- #166: Adding tests for date_in_zone (@mattfarina)
+
+### Changed
+
+- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam)
+- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19)
+- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan)
+
+### Fixed
+
+## Release 2.19.0 (2019-03-02)
+
+IMPORTANT: This release reverts a change from 2.18.0
+
+In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random.
+
+We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience.
+
+### Changed
+
+- Fix substr panic 35fb796 (Alexey igrychev)
+- Remove extra period 1eb7729 (Matthew Lorimor)
+- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor)
+- README edits/fixes/suggestions 08fe136 (Lauri Apple)
+
+
+## Release 2.18.0 (2019-02-12)
+
+### Added
+
+- Added mergeOverwrite function
+- cryptographic functions that use secure random (see fe1de12)
+
+### Changed
+
+- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer)
+- Handle has for nil list 9c10885 (Daniel Cohen)
+- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder)
+- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic)
+- Replace outdated goutils imports 01893d2 (Matthew Lorimor)
+- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor)
+- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen)
+
+### Fixed
+
+- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder)
+- Fix substr var names and comments d581f80 (Dean Coakley)
+- Fix substr documentation 2737203 (Dean Coakley)
+
+## Release 2.17.1 (2019-01-03)
+
+### Fixed
+
+The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml.
+
+## Release 2.17.0 (2019-01-03)
+
+### Added
+
+- adds alder32sum function and test 6908fc2 (marshallford)
+- Added kebabcase function ca331a1 (Ilyes512)
+
+### Changed
+
+- Update goutils to 1.1.0 4e1125d (Matt Butcher)
+
+### Fixed
+
+- Fix 'has' documentation e3f2a85 (dean-coakley)
+- docs(dict): fix typo in pick example dc424f9 (Dustin Specker)
+- fixes spelling errors... not sure how that happened 4cf188a (marshallford)
+
+## Release 2.16.0 (2018-08-13)
+
+### Added
+
+- add splitn function fccb0b0 (Helgi Þorbjörnsson)
+- Add slice func df28ca7 (gongdo)
+- Generate serial number a3bdffd (Cody Coons)
+- Extract values of dict with values function df39312 (Lawrence Jones)
+
+### Changed
+
+- Modify panic message for list.slice ae38335 (gongdo)
+- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap)
+- Remove duplicated documentation 1d97af1 (Matthew Fisher)
+- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson)
+
+### Fixed
+
+- Fix file permissions c5f40b5 (gongdo)
+- Fix example for buildCustomCert 7779e0d (Tin Lam)
+
+## Release 2.15.0 (2018-04-02)
+
+### Added
+
+- #68 and #69: Add json helpers to docs (thanks @arunvelsriram)
+- #66: Add ternary function (thanks @binoculars)
+- #67: Allow keys function to take multiple dicts (thanks @binoculars)
+- #89: Added sha1sum to crypto function (thanks @benkeil)
+- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei)
+- #92: Add travis testing for go 1.10
+- #93: Adding appveyor config for windows testing
+
+### Changed
+
+- #90: Updating to more recent dependencies
+- #73: replace satori/go.uuid with google/uuid (thanks @petterw)
+
+### Fixed
+
+- #76: Fixed documentation typos (thanks @Thiht)
+- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older
+
+## Release 2.14.1 (2017-12-01)
+
+### Fixed
+
+- #60: Fix typo in function name documentation (thanks @neil-ca-moore)
+- #61: Removing line with {{ due to blocking github pages genertion
+- #64: Update the list functions to handle int, string, and other slices for compatibility
+
+## Release 2.14.0 (2017-10-06)
+
+This new version of Sprig adds a set of functions for generating and working with SSL certificates.
+
+- `genCA` generates an SSL Certificate Authority
+- `genSelfSignedCert` generates an SSL self-signed certificate
+- `genSignedCert` generates an SSL certificate and key based on a given CA
+
+## Release 2.13.0 (2017-09-18)
+
+This release adds new functions, including:
+
+- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions
+- `floor`, `ceil`, and `round` math functions
+- `toDate` converts a string to a date
+- `nindent` is just like `indent` but also prepends a new line
+- `ago` returns the time from `time.Now`
+
+### Added
+
+- #40: Added basic regex functionality (thanks @alanquillin)
+- #41: Added ceil floor and round functions (thanks @alanquillin)
+- #48: Added toDate function (thanks @andreynering)
+- #50: Added nindent function (thanks @binoculars)
+- #46: Added ago function (thanks @slayer)
+
+### Changed
+
+- #51: Updated godocs to include new string functions (thanks @curtisallen)
+- #49: Added ability to merge multiple dicts (thanks @binoculars)
+
+## Release 2.12.0 (2017-05-17)
+
+- `snakecase`, `camelcase`, and `shuffle` are three new string functions
+- `fail` allows you to bail out of a template render when conditions are not met
+
+## Release 2.11.0 (2017-05-02)
+
+- Added `toJson` and `toPrettyJson`
+- Added `merge`
+- Refactored documentation
+
+## Release 2.10.0 (2017-03-15)
+
+- Added `semver` and `semverCompare` for Semantic Versions
+- `list` replaces `tuple`
+- Fixed issue with `join`
+- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without`
+
+## Release 2.9.0 (2017-02-23)
+
+- Added `splitList` to split a list
+- Added crypto functions of `genPrivateKey` and `derivePassword`
+
+## Release 2.8.0 (2016-12-21)
+
+- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`)
+- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`)
+
+## Release 2.7.0 (2016-12-01)
+
+- Added `sha256sum` to generate a hash of an input
+- Added functions to convert a numeric or string to `int`, `int64`, `float64`
+
+## Release 2.6.0 (2016-10-03)
+
+- Added a `uuidv4` template function for generating UUIDs inside of a template.
+
+## Release 2.5.0 (2016-08-19)
+
+- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions
+- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`)
+- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0
+
+## Release 2.4.0 (2016-08-16)
+
+- Adds two functions: `until` and `untilStep`
+
+## Release 2.3.0 (2016-06-21)
+
+- cat: Concatenate strings with whitespace separators.
+- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First"
+- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos"
+- indent: Indent blocks of text in a way that is sensitive to "\n" characters.
+
+## Release 2.2.0 (2016-04-21)
+
+- Added a `genPrivateKey` function (Thanks @bacongobbler)
+
+## Release 2.1.0 (2016-03-30)
+
+- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`.
+- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output.
+
+## Release 2.0.0 (2016-03-29)
+
+Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented.
+
+- `min` complements `max` (formerly `biggest`)
+- `empty` indicates that a value is the empty value for its type
+- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}`
+- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}`
+- Date formatters have been added for HTML dates (as used in `date` input fields)
+- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`).
+
+## Release 1.2.0 (2016-02-01)
+
+- Added quote and squote
+- Added b32enc and b32dec
+- add now takes varargs
+- biggest now takes varargs
+
+## Release 1.1.0 (2015-12-29)
+
+- Added #4: Added contains function. strings.Contains, but with the arguments
+ switched to simplify common pipelines. (thanks krancour)
+- Added Travis-CI testing support
+
+## Release 1.0.0 (2015-12-23)
+
+- Initial release
diff --git a/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt
new file mode 100644
index 000000000..f311b1eaa
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (C) 2013-2020 Masterminds
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/Masterminds/sprig/v3/Makefile b/vendor/github.com/Masterminds/sprig/v3/Makefile
new file mode 100644
index 000000000..78d409cde
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/v3/Makefile
@@ -0,0 +1,9 @@
+.PHONY: test
+test:
+ @echo "==> Running tests"
+ GO111MODULE=on go test -v
+
+.PHONY: test-cover
+test-cover:
+ @echo "==> Running Tests with coverage"
+ GO111MODULE=on go test -cover .
diff --git a/vendor/github.com/Masterminds/sprig/v3/README.md b/vendor/github.com/Masterminds/sprig/v3/README.md
new file mode 100644
index 000000000..3e22c60e1
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/v3/README.md
@@ -0,0 +1,100 @@
+# Sprig: Template functions for Go templates
+
+[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/sprig/v3)
+[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/sprig)](https://goreportcard.com/report/github.com/Masterminds/sprig)
+[![Stability: Sustained](https://masterminds.github.io/stability/sustained.svg)](https://masterminds.github.io/stability/sustained.html)
+[![](https://github.com/Masterminds/sprig/workflows/Tests/badge.svg)](https://github.com/Masterminds/sprig/actions)
+
+The Go language comes with a [built-in template
+language](http://golang.org/pkg/text/template/), but not
+very many template functions. Sprig is a library that provides more than 100 commonly
+used template functions.
+
+It is inspired by the template functions found in
+[Twig](http://twig.sensiolabs.org/documentation) and in various
+JavaScript libraries, such as [underscore.js](http://underscorejs.org/).
+
+## IMPORTANT NOTES
+
+Sprig leverages [mergo](https://github.com/imdario/mergo) to handle merges. In
+its v0.3.9 release, there was a behavior change that impacts merging template
+functions in sprig. It is currently recommended to use v0.3.10 or later of that package.
+Using v0.3.9 will cause sprig tests to fail.
+
+## Package Versions
+
+There are two active major versions of the `sprig` package.
+
+* v3 is currently stable release series on the `master` branch. The Go API should
+ remain compatible with v2, the current stable version. Behavior change behind
+ some functions is the reason for the new major version.
+* v2 is the previous stable release series. It has been more than three years since
+ the initial release of v2. You can read the documentation and see the code
+ on the [release-2](https://github.com/Masterminds/sprig/tree/release-2) branch.
+ Bug fixes to this major version will continue for some time.
+
+## Usage
+
+**Template developers**: Please use Sprig's [function documentation](http://masterminds.github.io/sprig/) for
+detailed instructions and code snippets for the >100 template functions available.
+
+**Go developers**: If you'd like to include Sprig as a library in your program,
+our API documentation is available [at GoDoc.org](http://godoc.org/github.com/Masterminds/sprig).
+
+For standard usage, read on.
+
+### Load the Sprig library
+
+To load the Sprig `FuncMap`:
+
+```go
+
+import (
+ "github.com/Masterminds/sprig/v3"
+ "html/template"
+)
+
+// This example illustrates that the FuncMap *must* be set before the
+// templates themselves are loaded.
+tpl := template.Must(
+ template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html")
+)
+
+
+```
+
+### Calling the functions inside of templates
+
+By convention, all functions are lowercase. This seems to follow the Go
+idiom for template functions (as opposed to template methods, which are
+TitleCase). For example, this:
+
+```
+{{ "hello!" | upper | repeat 5 }}
+```
+
+produces this:
+
+```
+HELLO!HELLO!HELLO!HELLO!HELLO!
+```
+
+## Principles Driving Our Function Selection
+
+We followed these principles to decide which functions to add and how to implement them:
+
+- Use template functions to build layout. The following
+ types of operations are within the domain of template functions:
+ - Formatting
+ - Layout
+ - Simple type conversions
+ - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic)
+- Template functions should not return errors unless there is no way to print
+ a sensible value. For example, converting a string to an integer should not
+ produce an error if conversion fails. Instead, it should display a default
+ value.
+- Simple math is necessary for grid layouts, pagers, and so on. Complex math
+ (anything other than arithmetic) should be done outside of templates.
+- Template functions only deal with the data passed into them. They never retrieve
+ data from a source.
+- Finally, do not override core Go template functions.
diff --git a/vendor/github.com/Masterminds/sprig/v3/crypto.go b/vendor/github.com/Masterminds/sprig/v3/crypto.go
new file mode 100644
index 000000000..13a5cd559
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/v3/crypto.go
@@ -0,0 +1,653 @@
+package sprig
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/elliptic"
+ "crypto/hmac"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "encoding/base64"
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "hash/adler32"
+ "io"
+ "math/big"
+ "net"
+ "time"
+
+ "strings"
+
+ "github.com/google/uuid"
+ bcrypt_lib "golang.org/x/crypto/bcrypt"
+ "golang.org/x/crypto/scrypt"
+)
+
+func sha256sum(input string) string {
+ hash := sha256.Sum256([]byte(input))
+ return hex.EncodeToString(hash[:])
+}
+
+func sha1sum(input string) string {
+ hash := sha1.Sum([]byte(input))
+ return hex.EncodeToString(hash[:])
+}
+
+func adler32sum(input string) string {
+ hash := adler32.Checksum([]byte(input))
+ return fmt.Sprintf("%d", hash)
+}
+
+func bcrypt(input string) string {
+ hash, err := bcrypt_lib.GenerateFromPassword([]byte(input), bcrypt_lib.DefaultCost)
+ if err != nil {
+ return fmt.Sprintf("failed to encrypt string with bcrypt: %s", err)
+ }
+
+ return string(hash)
+}
+
+func htpasswd(username string, password string) string {
+ if strings.Contains(username, ":") {
+ return fmt.Sprintf("invalid username: %s", username)
+ }
+ return fmt.Sprintf("%s:%s", username, bcrypt(password))
+}
+
+func randBytes(count int) (string, error) {
+ buf := make([]byte, count)
+ if _, err := rand.Read(buf); err != nil {
+ return "", err
+ }
+ return base64.StdEncoding.EncodeToString(buf), nil
+}
+
+// uuidv4 provides a safe and secure UUID v4 implementation
+func uuidv4() string {
+ return uuid.New().String()
+}
+
+var masterPasswordSeed = "com.lyndir.masterpassword"
+
+var passwordTypeTemplates = map[string][][]byte{
+ "maximum": {[]byte("anoxxxxxxxxxxxxxxxxx"), []byte("axxxxxxxxxxxxxxxxxno")},
+ "long": {[]byte("CvcvnoCvcvCvcv"), []byte("CvcvCvcvnoCvcv"), []byte("CvcvCvcvCvcvno"), []byte("CvccnoCvcvCvcv"), []byte("CvccCvcvnoCvcv"),
+ []byte("CvccCvcvCvcvno"), []byte("CvcvnoCvccCvcv"), []byte("CvcvCvccnoCvcv"), []byte("CvcvCvccCvcvno"), []byte("CvcvnoCvcvCvcc"),
+ []byte("CvcvCvcvnoCvcc"), []byte("CvcvCvcvCvccno"), []byte("CvccnoCvccCvcv"), []byte("CvccCvccnoCvcv"), []byte("CvccCvccCvcvno"),
+ []byte("CvcvnoCvccCvcc"), []byte("CvcvCvccnoCvcc"), []byte("CvcvCvccCvccno"), []byte("CvccnoCvcvCvcc"), []byte("CvccCvcvnoCvcc"),
+ []byte("CvccCvcvCvccno")},
+ "medium": {[]byte("CvcnoCvc"), []byte("CvcCvcno")},
+ "short": {[]byte("Cvcn")},
+ "basic": {[]byte("aaanaaan"), []byte("aannaaan"), []byte("aaannaaa")},
+ "pin": {[]byte("nnnn")},
+}
+
+var templateCharacters = map[byte]string{
+ 'V': "AEIOU",
+ 'C': "BCDFGHJKLMNPQRSTVWXYZ",
+ 'v': "aeiou",
+ 'c': "bcdfghjklmnpqrstvwxyz",
+ 'A': "AEIOUBCDFGHJKLMNPQRSTVWXYZ",
+ 'a': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz",
+ 'n': "0123456789",
+ 'o': "@&%?,=[]_:-+*$#!'^~;()/.",
+ 'x': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz0123456789!@#$%^&*()",
+}
+
+func derivePassword(counter uint32, passwordType, password, user, site string) string {
+ var templates = passwordTypeTemplates[passwordType]
+ if templates == nil {
+ return fmt.Sprintf("cannot find password template %s", passwordType)
+ }
+
+ var buffer bytes.Buffer
+ buffer.WriteString(masterPasswordSeed)
+ binary.Write(&buffer, binary.BigEndian, uint32(len(user)))
+ buffer.WriteString(user)
+
+ salt := buffer.Bytes()
+ key, err := scrypt.Key([]byte(password), salt, 32768, 8, 2, 64)
+ if err != nil {
+ return fmt.Sprintf("failed to derive password: %s", err)
+ }
+
+ buffer.Truncate(len(masterPasswordSeed))
+ binary.Write(&buffer, binary.BigEndian, uint32(len(site)))
+ buffer.WriteString(site)
+ binary.Write(&buffer, binary.BigEndian, counter)
+
+ var hmacv = hmac.New(sha256.New, key)
+ hmacv.Write(buffer.Bytes())
+ var seed = hmacv.Sum(nil)
+ var temp = templates[int(seed[0])%len(templates)]
+
+ buffer.Truncate(0)
+ for i, element := range temp {
+ passChars := templateCharacters[element]
+ passChar := passChars[int(seed[i+1])%len(passChars)]
+ buffer.WriteByte(passChar)
+ }
+
+ return buffer.String()
+}
+
+func generatePrivateKey(typ string) string {
+ var priv interface{}
+ var err error
+ switch typ {
+ case "", "rsa":
+ // good enough for government work
+ priv, err = rsa.GenerateKey(rand.Reader, 4096)
+ case "dsa":
+ key := new(dsa.PrivateKey)
+ // again, good enough for government work
+ if err = dsa.GenerateParameters(&key.Parameters, rand.Reader, dsa.L2048N256); err != nil {
+ return fmt.Sprintf("failed to generate dsa params: %s", err)
+ }
+ err = dsa.GenerateKey(key, rand.Reader)
+ priv = key
+ case "ecdsa":
+ // again, good enough for government work
+ priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+ case "ed25519":
+ _, priv, err = ed25519.GenerateKey(rand.Reader)
+ default:
+ return "Unknown type " + typ
+ }
+ if err != nil {
+ return fmt.Sprintf("failed to generate private key: %s", err)
+ }
+
+ return string(pem.EncodeToMemory(pemBlockForKey(priv)))
+}
+
+// DSAKeyFormat stores the format for DSA keys.
+// Used by pemBlockForKey
+type DSAKeyFormat struct {
+ Version int
+ P, Q, G, Y, X *big.Int
+}
+
+func pemBlockForKey(priv interface{}) *pem.Block {
+ switch k := priv.(type) {
+ case *rsa.PrivateKey:
+ return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)}
+ case *dsa.PrivateKey:
+ val := DSAKeyFormat{
+ P: k.P, Q: k.Q, G: k.G,
+ Y: k.Y, X: k.X,
+ }
+ bytes, _ := asn1.Marshal(val)
+ return &pem.Block{Type: "DSA PRIVATE KEY", Bytes: bytes}
+ case *ecdsa.PrivateKey:
+ b, _ := x509.MarshalECPrivateKey(k)
+ return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}
+ default:
+ // attempt PKCS#8 format for all other keys
+ b, err := x509.MarshalPKCS8PrivateKey(k)
+ if err != nil {
+ return nil
+ }
+ return &pem.Block{Type: "PRIVATE KEY", Bytes: b}
+ }
+}
+
+func parsePrivateKeyPEM(pemBlock string) (crypto.PrivateKey, error) {
+ block, _ := pem.Decode([]byte(pemBlock))
+ if block == nil {
+ return nil, errors.New("no PEM data in input")
+ }
+
+ if block.Type == "PRIVATE KEY" {
+ priv, err := x509.ParsePKCS8PrivateKey(block.Bytes)
+ if err != nil {
+ return nil, fmt.Errorf("decoding PEM as PKCS#8: %s", err)
+ }
+ return priv, nil
+ } else if !strings.HasSuffix(block.Type, " PRIVATE KEY") {
+ return nil, fmt.Errorf("no private key data in PEM block of type %s", block.Type)
+ }
+
+ switch block.Type[:len(block.Type)-12] { // strip " PRIVATE KEY"
+ case "RSA":
+ priv, err := x509.ParsePKCS1PrivateKey(block.Bytes)
+ if err != nil {
+ return nil, fmt.Errorf("parsing RSA private key from PEM: %s", err)
+ }
+ return priv, nil
+ case "EC":
+ priv, err := x509.ParseECPrivateKey(block.Bytes)
+ if err != nil {
+ return nil, fmt.Errorf("parsing EC private key from PEM: %s", err)
+ }
+ return priv, nil
+ case "DSA":
+ var k DSAKeyFormat
+ _, err := asn1.Unmarshal(block.Bytes, &k)
+ if err != nil {
+ return nil, fmt.Errorf("parsing DSA private key from PEM: %s", err)
+ }
+ priv := &dsa.PrivateKey{
+ PublicKey: dsa.PublicKey{
+ Parameters: dsa.Parameters{
+ P: k.P, Q: k.Q, G: k.G,
+ },
+ Y: k.Y,
+ },
+ X: k.X,
+ }
+ return priv, nil
+ default:
+ return nil, fmt.Errorf("invalid private key type %s", block.Type)
+ }
+}
+
+func getPublicKey(priv crypto.PrivateKey) (crypto.PublicKey, error) {
+ switch k := priv.(type) {
+ case interface{ Public() crypto.PublicKey }:
+ return k.Public(), nil
+ case *dsa.PrivateKey:
+ return &k.PublicKey, nil
+ default:
+ return nil, fmt.Errorf("unable to get public key for type %T", priv)
+ }
+}
+
+type certificate struct {
+ Cert string
+ Key string
+}
+
+func buildCustomCertificate(b64cert string, b64key string) (certificate, error) {
+ crt := certificate{}
+
+ cert, err := base64.StdEncoding.DecodeString(b64cert)
+ if err != nil {
+ return crt, errors.New("unable to decode base64 certificate")
+ }
+
+ key, err := base64.StdEncoding.DecodeString(b64key)
+ if err != nil {
+ return crt, errors.New("unable to decode base64 private key")
+ }
+
+ decodedCert, _ := pem.Decode(cert)
+ if decodedCert == nil {
+ return crt, errors.New("unable to decode certificate")
+ }
+ _, err = x509.ParseCertificate(decodedCert.Bytes)
+ if err != nil {
+ return crt, fmt.Errorf(
+ "error parsing certificate: decodedCert.Bytes: %s",
+ err,
+ )
+ }
+
+ _, err = parsePrivateKeyPEM(string(key))
+ if err != nil {
+ return crt, fmt.Errorf(
+ "error parsing private key: %s",
+ err,
+ )
+ }
+
+ crt.Cert = string(cert)
+ crt.Key = string(key)
+
+ return crt, nil
+}
+
+func generateCertificateAuthority(
+ cn string,
+ daysValid int,
+) (certificate, error) {
+ priv, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ return certificate{}, fmt.Errorf("error generating rsa key: %s", err)
+ }
+
+ return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv)
+}
+
+func generateCertificateAuthorityWithPEMKey(
+ cn string,
+ daysValid int,
+ privPEM string,
+) (certificate, error) {
+ priv, err := parsePrivateKeyPEM(privPEM)
+ if err != nil {
+ return certificate{}, fmt.Errorf("parsing private key: %s", err)
+ }
+ return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv)
+}
+
+func generateCertificateAuthorityWithKeyInternal(
+ cn string,
+ daysValid int,
+ priv crypto.PrivateKey,
+) (certificate, error) {
+ ca := certificate{}
+
+ template, err := getBaseCertTemplate(cn, nil, nil, daysValid)
+ if err != nil {
+ return ca, err
+ }
+ // Override KeyUsage and IsCA
+ template.KeyUsage = x509.KeyUsageKeyEncipherment |
+ x509.KeyUsageDigitalSignature |
+ x509.KeyUsageCertSign
+ template.IsCA = true
+
+ ca.Cert, ca.Key, err = getCertAndKey(template, priv, template, priv)
+
+ return ca, err
+}
+
+func generateSelfSignedCertificate(
+ cn string,
+ ips []interface{},
+ alternateDNS []interface{},
+ daysValid int,
+) (certificate, error) {
+ priv, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ return certificate{}, fmt.Errorf("error generating rsa key: %s", err)
+ }
+ return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv)
+}
+
+func generateSelfSignedCertificateWithPEMKey(
+ cn string,
+ ips []interface{},
+ alternateDNS []interface{},
+ daysValid int,
+ privPEM string,
+) (certificate, error) {
+ priv, err := parsePrivateKeyPEM(privPEM)
+ if err != nil {
+ return certificate{}, fmt.Errorf("parsing private key: %s", err)
+ }
+ return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv)
+}
+
+func generateSelfSignedCertificateWithKeyInternal(
+ cn string,
+ ips []interface{},
+ alternateDNS []interface{},
+ daysValid int,
+ priv crypto.PrivateKey,
+) (certificate, error) {
+ cert := certificate{}
+
+ template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid)
+ if err != nil {
+ return cert, err
+ }
+
+ cert.Cert, cert.Key, err = getCertAndKey(template, priv, template, priv)
+
+ return cert, err
+}
+
+func generateSignedCertificate(
+ cn string,
+ ips []interface{},
+ alternateDNS []interface{},
+ daysValid int,
+ ca certificate,
+) (certificate, error) {
+ priv, err := rsa.GenerateKey(rand.Reader, 2048)
+ if err != nil {
+ return certificate{}, fmt.Errorf("error generating rsa key: %s", err)
+ }
+ return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv)
+}
+
+func generateSignedCertificateWithPEMKey(
+ cn string,
+ ips []interface{},
+ alternateDNS []interface{},
+ daysValid int,
+ ca certificate,
+ privPEM string,
+) (certificate, error) {
+ priv, err := parsePrivateKeyPEM(privPEM)
+ if err != nil {
+ return certificate{}, fmt.Errorf("parsing private key: %s", err)
+ }
+ return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv)
+}
+
+func generateSignedCertificateWithKeyInternal(
+ cn string,
+ ips []interface{},
+ alternateDNS []interface{},
+ daysValid int,
+ ca certificate,
+ priv crypto.PrivateKey,
+) (certificate, error) {
+ cert := certificate{}
+
+ decodedSignerCert, _ := pem.Decode([]byte(ca.Cert))
+ if decodedSignerCert == nil {
+ return cert, errors.New("unable to decode certificate")
+ }
+ signerCert, err := x509.ParseCertificate(decodedSignerCert.Bytes)
+ if err != nil {
+ return cert, fmt.Errorf(
+ "error parsing certificate: decodedSignerCert.Bytes: %s",
+ err,
+ )
+ }
+ signerKey, err := parsePrivateKeyPEM(ca.Key)
+ if err != nil {
+ return cert, fmt.Errorf(
+ "error parsing private key: %s",
+ err,
+ )
+ }
+
+ template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid)
+ if err != nil {
+ return cert, err
+ }
+
+ cert.Cert, cert.Key, err = getCertAndKey(
+ template,
+ priv,
+ signerCert,
+ signerKey,
+ )
+
+ return cert, err
+}
+
+func getCertAndKey(
+ template *x509.Certificate,
+ signeeKey crypto.PrivateKey,
+ parent *x509.Certificate,
+ signingKey crypto.PrivateKey,
+) (string, string, error) {
+ signeePubKey, err := getPublicKey(signeeKey)
+ if err != nil {
+ return "", "", fmt.Errorf("error retrieving public key from signee key: %s", err)
+ }
+ derBytes, err := x509.CreateCertificate(
+ rand.Reader,
+ template,
+ parent,
+ signeePubKey,
+ signingKey,
+ )
+ if err != nil {
+ return "", "", fmt.Errorf("error creating certificate: %s", err)
+ }
+
+ certBuffer := bytes.Buffer{}
+ if err := pem.Encode(
+ &certBuffer,
+ &pem.Block{Type: "CERTIFICATE", Bytes: derBytes},
+ ); err != nil {
+ return "", "", fmt.Errorf("error pem-encoding certificate: %s", err)
+ }
+
+ keyBuffer := bytes.Buffer{}
+ if err := pem.Encode(
+ &keyBuffer,
+ pemBlockForKey(signeeKey),
+ ); err != nil {
+ return "", "", fmt.Errorf("error pem-encoding key: %s", err)
+ }
+
+ return certBuffer.String(), keyBuffer.String(), nil
+}
+
+func getBaseCertTemplate(
+ cn string,
+ ips []interface{},
+ alternateDNS []interface{},
+ daysValid int,
+) (*x509.Certificate, error) {
+ ipAddresses, err := getNetIPs(ips)
+ if err != nil {
+ return nil, err
+ }
+ dnsNames, err := getAlternateDNSStrs(alternateDNS)
+ if err != nil {
+ return nil, err
+ }
+ serialNumberUpperBound := new(big.Int).Lsh(big.NewInt(1), 128)
+ serialNumber, err := rand.Int(rand.Reader, serialNumberUpperBound)
+ if err != nil {
+ return nil, err
+ }
+ return &x509.Certificate{
+ SerialNumber: serialNumber,
+ Subject: pkix.Name{
+ CommonName: cn,
+ },
+ IPAddresses: ipAddresses,
+ DNSNames: dnsNames,
+ NotBefore: time.Now(),
+ NotAfter: time.Now().Add(time.Hour * 24 * time.Duration(daysValid)),
+ KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
+ ExtKeyUsage: []x509.ExtKeyUsage{
+ x509.ExtKeyUsageServerAuth,
+ x509.ExtKeyUsageClientAuth,
+ },
+ BasicConstraintsValid: true,
+ }, nil
+}
+
+func getNetIPs(ips []interface{}) ([]net.IP, error) {
+ if ips == nil {
+ return []net.IP{}, nil
+ }
+ var ipStr string
+ var ok bool
+ var netIP net.IP
+ netIPs := make([]net.IP, len(ips))
+ for i, ip := range ips {
+ ipStr, ok = ip.(string)
+ if !ok {
+ return nil, fmt.Errorf("error parsing ip: %v is not a string", ip)
+ }
+ netIP = net.ParseIP(ipStr)
+ if netIP == nil {
+ return nil, fmt.Errorf("error parsing ip: %s", ipStr)
+ }
+ netIPs[i] = netIP
+ }
+ return netIPs, nil
+}
+
+func getAlternateDNSStrs(alternateDNS []interface{}) ([]string, error) {
+ if alternateDNS == nil {
+ return []string{}, nil
+ }
+ var dnsStr string
+ var ok bool
+ alternateDNSStrs := make([]string, len(alternateDNS))
+ for i, dns := range alternateDNS {
+ dnsStr, ok = dns.(string)
+ if !ok {
+ return nil, fmt.Errorf(
+ "error processing alternate dns name: %v is not a string",
+ dns,
+ )
+ }
+ alternateDNSStrs[i] = dnsStr
+ }
+ return alternateDNSStrs, nil
+}
+
+func encryptAES(password string, plaintext string) (string, error) {
+ if plaintext == "" {
+ return "", nil
+ }
+
+ key := make([]byte, 32)
+ copy(key, []byte(password))
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return "", err
+ }
+
+ content := []byte(plaintext)
+ blockSize := block.BlockSize()
+ padding := blockSize - len(content)%blockSize
+ padtext := bytes.Repeat([]byte{byte(padding)}, padding)
+ content = append(content, padtext...)
+
+ ciphertext := make([]byte, aes.BlockSize+len(content))
+
+ iv := ciphertext[:aes.BlockSize]
+ if _, err := io.ReadFull(rand.Reader, iv); err != nil {
+ return "", err
+ }
+
+ mode := cipher.NewCBCEncrypter(block, iv)
+ mode.CryptBlocks(ciphertext[aes.BlockSize:], content)
+
+ return base64.StdEncoding.EncodeToString(ciphertext), nil
+}
+
+func decryptAES(password string, crypt64 string) (string, error) {
+ if crypt64 == "" {
+ return "", nil
+ }
+
+ key := make([]byte, 32)
+ copy(key, []byte(password))
+
+ crypt, err := base64.StdEncoding.DecodeString(crypt64)
+ if err != nil {
+ return "", err
+ }
+
+ block, err := aes.NewCipher(key)
+ if err != nil {
+ return "", err
+ }
+
+ iv := crypt[:aes.BlockSize]
+ crypt = crypt[aes.BlockSize:]
+ decrypted := make([]byte, len(crypt))
+ mode := cipher.NewCBCDecrypter(block, iv)
+ mode.CryptBlocks(decrypted, crypt)
+
+ return string(decrypted[:len(decrypted)-int(decrypted[len(decrypted)-1])]), nil
+}
diff --git a/vendor/github.com/Masterminds/sprig/v3/date.go b/vendor/github.com/Masterminds/sprig/v3/date.go
new file mode 100644
index 000000000..ed022ddac
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/v3/date.go
@@ -0,0 +1,152 @@
+package sprig
+
+import (
+ "strconv"
+ "time"
+)
+
+// Given a format and a date, format the date string.
+//
+// Date can be a `time.Time` or an `int, int32, int64`.
+// In the later case, it is treated as seconds since UNIX
+// epoch.
+func date(fmt string, date interface{}) string {
+ return dateInZone(fmt, date, "Local")
+}
+
+func htmlDate(date interface{}) string {
+ return dateInZone("2006-01-02", date, "Local")
+}
+
+func htmlDateInZone(date interface{}, zone string) string {
+ return dateInZone("2006-01-02", date, zone)
+}
+
+func dateInZone(fmt string, date interface{}, zone string) string {
+ var t time.Time
+ switch date := date.(type) {
+ default:
+ t = time.Now()
+ case time.Time:
+ t = date
+ case *time.Time:
+ t = *date
+ case int64:
+ t = time.Unix(date, 0)
+ case int:
+ t = time.Unix(int64(date), 0)
+ case int32:
+ t = time.Unix(int64(date), 0)
+ }
+
+ loc, err := time.LoadLocation(zone)
+ if err != nil {
+ loc, _ = time.LoadLocation("UTC")
+ }
+
+ return t.In(loc).Format(fmt)
+}
+
+func dateModify(fmt string, date time.Time) time.Time {
+ d, err := time.ParseDuration(fmt)
+ if err != nil {
+ return date
+ }
+ return date.Add(d)
+}
+
+func mustDateModify(fmt string, date time.Time) (time.Time, error) {
+ d, err := time.ParseDuration(fmt)
+ if err != nil {
+ return time.Time{}, err
+ }
+ return date.Add(d), nil
+}
+
+func dateAgo(date interface{}) string {
+ var t time.Time
+
+ switch date := date.(type) {
+ default:
+ t = time.Now()
+ case time.Time:
+ t = date
+ case int64:
+ t = time.Unix(date, 0)
+ case int:
+ t = time.Unix(int64(date), 0)
+ }
+ // Drop resolution to seconds
+ duration := time.Since(t).Round(time.Second)
+ return duration.String()
+}
+
+func duration(sec interface{}) string {
+ var n int64
+ switch value := sec.(type) {
+ default:
+ n = 0
+ case string:
+ n, _ = strconv.ParseInt(value, 10, 64)
+ case int64:
+ n = value
+ }
+ return (time.Duration(n) * time.Second).String()
+}
+
+func durationRound(duration interface{}) string {
+ var d time.Duration
+ switch duration := duration.(type) {
+ default:
+ d = 0
+ case string:
+ d, _ = time.ParseDuration(duration)
+ case int64:
+ d = time.Duration(duration)
+ case time.Time:
+ d = time.Since(duration)
+ }
+
+ u := uint64(d)
+ neg := d < 0
+ if neg {
+ u = -u
+ }
+
+ var (
+ year = uint64(time.Hour) * 24 * 365
+ month = uint64(time.Hour) * 24 * 30
+ day = uint64(time.Hour) * 24
+ hour = uint64(time.Hour)
+ minute = uint64(time.Minute)
+ second = uint64(time.Second)
+ )
+ switch {
+ case u > year:
+ return strconv.FormatUint(u/year, 10) + "y"
+ case u > month:
+ return strconv.FormatUint(u/month, 10) + "mo"
+ case u > day:
+ return strconv.FormatUint(u/day, 10) + "d"
+ case u > hour:
+ return strconv.FormatUint(u/hour, 10) + "h"
+ case u > minute:
+ return strconv.FormatUint(u/minute, 10) + "m"
+ case u > second:
+ return strconv.FormatUint(u/second, 10) + "s"
+ }
+ return "0s"
+}
+
+func toDate(fmt, str string) time.Time {
+ t, _ := time.ParseInLocation(fmt, str, time.Local)
+ return t
+}
+
+func mustToDate(fmt, str string) (time.Time, error) {
+ return time.ParseInLocation(fmt, str, time.Local)
+}
+
+func unixEpoch(date time.Time) string {
+ return strconv.FormatInt(date.Unix(), 10)
+}
diff --git a/vendor/github.com/Masterminds/sprig/v3/defaults.go b/vendor/github.com/Masterminds/sprig/v3/defaults.go
new file mode 100644
index 000000000..b9f979666
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/v3/defaults.go
@@ -0,0 +1,163 @@
+package sprig
+
+import (
+ "bytes"
+ "encoding/json"
+ "math/rand"
+ "reflect"
+ "strings"
+ "time"
+)
+
+func init() {
+ rand.Seed(time.Now().UnixNano())
+}
+
+// dfault checks whether `given` is set, and returns default if not set.
+//
+// This returns `d` if `given` appears not to be set, and `given` otherwise.
+//
+// For numeric types 0 is unset.
+// For strings, maps, arrays, and slices, len() = 0 is considered unset.
+// For bool, false is unset.
+// Structs are never considered unset.
+//
+// For everything else, including pointers, a nil value is unset.
+func dfault(d interface{}, given ...interface{}) interface{} {
+
+ if empty(given) || empty(given[0]) {
+ return d
+ }
+ return given[0]
+}
+
+// empty returns true if the given value has the zero value for its type.
+func empty(given interface{}) bool {
+ g := reflect.ValueOf(given)
+ if !g.IsValid() {
+ return true
+ }
+
+ // Basically adapted from text/template.isTrue
+ switch g.Kind() {
+ default:
+ return g.IsNil()
+ case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
+ return g.Len() == 0
+ case reflect.Bool:
+ return !g.Bool()
+ case reflect.Complex64, reflect.Complex128:
+ return g.Complex() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return g.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return g.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return g.Float() == 0
+ case reflect.Struct:
+ return false
+ }
+}
+
+// coalesce returns the first non-empty value.
+func coalesce(v ...interface{}) interface{} {
+ for _, val := range v {
+ if !empty(val) {
+ return val
+ }
+ }
+ return nil
+}
+
+// all returns true if empty(x) is false for all values x in the list.
+// If the list is empty, return true.
+func all(v ...interface{}) bool {
+ for _, val := range v {
+ if empty(val) {
+ return false
+ }
+ }
+ return true
+}
+
+// any returns true if empty(x) is false for any x in the list.
+// If the list is empty, return false.
+func any(v ...interface{}) bool {
+ for _, val := range v {
+ if !empty(val) {
+ return true
+ }
+ }
+ return false
+}
+
+// fromJson decodes JSON into a structured value, ignoring errors.
+func fromJson(v string) interface{} {
+ output, _ := mustFromJson(v)
+ return output
+}
+
+// mustFromJson decodes JSON into a structured value, returning errors.
+func mustFromJson(v string) (interface{}, error) {
+ var output interface{}
+ err := json.Unmarshal([]byte(v), &output)
+ return output, err
+}
+
+// toJson encodes an item into a JSON string
+func toJson(v interface{}) string {
+ output, _ := json.Marshal(v)
+ return string(output)
+}
+
+func mustToJson(v interface{}) (string, error) {
+ output, err := json.Marshal(v)
+ if err != nil {
+ return "", err
+ }
+ return string(output), nil
+}
+
+// toPrettyJson encodes an item into a pretty (indented) JSON string
+func toPrettyJson(v interface{}) string {
+ output, _ := json.MarshalIndent(v, "", " ")
+ return string(output)
+}
+
+func mustToPrettyJson(v interface{}) (string, error) {
+ output, err := json.MarshalIndent(v, "", " ")
+ if err != nil {
+ return "", err
+ }
+ return string(output), nil
+}
+
+// toRawJson encodes an item into a JSON string with no escaping of HTML characters.
+func toRawJson(v interface{}) string {
+ output, err := mustToRawJson(v)
+ if err != nil {
+ panic(err)
+ }
+ return string(output)
+}
+
+// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters.
+func mustToRawJson(v interface{}) (string, error) {
+ buf := new(bytes.Buffer)
+ enc := json.NewEncoder(buf)
+ enc.SetEscapeHTML(false)
+ err := enc.Encode(&v)
+ if err != nil {
+ return "", err
+ }
+ return strings.TrimSuffix(buf.String(), "\n"), nil
+}
+
+// ternary returns the first value if the last value is true, otherwise returns the second value.
+func ternary(vt interface{}, vf interface{}, v bool) interface{} {
+ if v {
+ return vt
+ }
+
+ return vf
+}
diff --git a/vendor/github.com/Masterminds/sprig/v3/dict.go b/vendor/github.com/Masterminds/sprig/v3/dict.go
new file mode 100644
index 000000000..ade889698
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/v3/dict.go
@@ -0,0 +1,174 @@
+package sprig
+
+import (
+ "github.com/imdario/mergo"
+ "github.com/mitchellh/copystructure"
+)
+
+func get(d map[string]interface{}, key string) interface{} {
+ if val, ok := d[key]; ok {
+ return val
+ }
+ return ""
+}
+
+func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} {
+ d[key] = value
+ return d
+}
+
+func unset(d map[string]interface{}, key string) map[string]interface{} {
+ delete(d, key)
+ return d
+}
+
+func hasKey(d map[string]interface{}, key string) bool {
+ _, ok := d[key]
+ return ok
+}
+
+func pluck(key string, d ...map[string]interface{}) []interface{} {
+ res := []interface{}{}
+ for _, dict := range d {
+ if val, ok := dict[key]; ok {
+ res = append(res, val)
+ }
+ }
+ return res
+}
+
+func keys(dicts ...map[string]interface{}) []string {
+ k := []string{}
+ for _, dict := range dicts {
+ for key := range dict {
+ k = append(k, key)
+ }
+ }
+ return k
+}
+
+func pick(dict map[string]interface{}, keys ...string) map[string]interface{} {
+ res := map[string]interface{}{}
+ for _, k := range keys {
+ if v, ok := dict[k]; ok {
+ res[k] = v
+ }
+ }
+ return res
+}
+
+func omit(dict map[string]interface{}, keys ...string) map[string]interface{} {
+ res := map[string]interface{}{}
+
+ omit := make(map[string]bool, len(keys))
+ for _, k := range keys {
+ omit[k] = true
+ }
+
+ for k, v := range dict {
+ if _, ok := omit[k]; !ok {
+ res[k] = v
+ }
+ }
+ return res
+}
+
+func dict(v ...interface{}) map[string]interface{} {
+ dict := map[string]interface{}{}
+ lenv := len(v)
+ for i := 0; i < lenv; i += 2 {
+ key := strval(v[i])
+ if i+1 >= lenv {
+ dict[key] = ""
+ continue
+ }
+ dict[key] = v[i+1]
+ }
+ return dict
+}
+
+func merge(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} {
+ for _, src := range srcs {
+ if err := mergo.Merge(&dst, src); err != nil {
+ // Swallow errors inside of a template.
+ return ""
+ }
+ }
+ return dst
+}
+
+func mustMerge(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) {
+ for _, src := range srcs {
+ if err := mergo.Merge(&dst, src); err != nil {
+ return nil, err
+ }
+ }
+ return dst, nil
+}
+
+func mergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} {
+ for _, src := range srcs {
+ if err := mergo.MergeWithOverwrite(&dst, src); err != nil {
+ // Swallow errors inside of a template.
+ return ""
+ }
+ }
+ return dst
+}
+
+func mustMergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) {
+ for _, src := range srcs {
+ if err := mergo.MergeWithOverwrite(&dst, src); err != nil {
+ return nil, err
+ }
+ }
+ return dst, nil
+}
+
+func values(dict map[string]interface{}) []interface{} {
+ values := []interface{}{}
+ for _, value := range dict {
+ values = append(values, value)
+ }
+
+ return values
+}
+
+func deepCopy(i interface{}) interface{} {
+ c, err := mustDeepCopy(i)
+ if err != nil {
+ panic("deepCopy error: " + err.Error())
+ }
+
+ return c
+}
+
+func mustDeepCopy(i interface{}) (interface{}, error) {
+ return copystructure.Copy(i)
+}
+
+func dig(ps ...interface{}) (interface{}, error) {
+ if len(ps) < 3 {
+ panic("dig needs at least three arguments")
+ }
+ dict := ps[len(ps)-1].(map[string]interface{})
+ def := ps[len(ps)-2]
+ ks := make([]string, len(ps)-2)
+ for i := 0; i < len(ks); i++ {
+ ks[i] = ps[i].(string)
+ }
+
+ return digFromDict(dict, def, ks)
+}
+
+func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) {
+ k, ns := ks[0], ks[1:len(ks)]
+ step, has := dict[k]
+ if !has {
+ return d, nil
+ }
+ if len(ns) == 0 {
+ return step, nil
+ }
+ return digFromDict(step.(map[string]interface{}), d, ns)
+}
diff --git a/vendor/github.com/Masterminds/sprig/v3/doc.go b/vendor/github.com/Masterminds/sprig/v3/doc.go
new file mode 100644
index 000000000..aabb9d448
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/v3/doc.go
@@ -0,0 +1,19 @@
+/*
+Package sprig provides template functions for Go.
+
+This package contains a number of utility functions for working with data
+inside of Go `html/template` and `text/template` files.
+
+To add these functions, use the `template.Funcs()` method:
+
+ t := templates.New("foo").Funcs(sprig.FuncMap())
+
+Note that you should add the function map before you parse any template files.
+
+ In several cases, Sprig reverses the order of arguments from the way they
+ appear in the standard library. This is to make it easier to pipe
+ arguments into functions.
+
+See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions.
+*/
+package sprig
diff --git a/vendor/github.com/Masterminds/sprig/v3/functions.go b/vendor/github.com/Masterminds/sprig/v3/functions.go
new file mode 100644
index 000000000..57fcec1d9
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/v3/functions.go
@@ -0,0 +1,382 @@
+package sprig
+
+import (
+ "errors"
+ "html/template"
+ "math/rand"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "strconv"
+ "strings"
+ ttemplate "text/template"
+ "time"
+
+ util "github.com/Masterminds/goutils"
+ "github.com/huandu/xstrings"
+ "github.com/shopspring/decimal"
+)
+
+// FuncMap produces the function map.
+//
+// Use this to pass the functions into the template engine:
+//
+// tpl := template.New("foo").Funcs(sprig.FuncMap()))
+//
+func FuncMap() template.FuncMap {
+ return HtmlFuncMap()
+}
+
+// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions.
+func HermeticTxtFuncMap() ttemplate.FuncMap {
+ r := TxtFuncMap()
+ for _, name := range nonhermeticFunctions {
+ delete(r, name)
+ }
+ return r
+}
+
+// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions.
+func HermeticHtmlFuncMap() template.FuncMap {
+ r := HtmlFuncMap()
+ for _, name := range nonhermeticFunctions {
+ delete(r, name)
+ }
+ return r
+}
+
+// TxtFuncMap returns a 'text/template'.FuncMap
+func TxtFuncMap() ttemplate.FuncMap {
+ return ttemplate.FuncMap(GenericFuncMap())
+}
+
+// HtmlFuncMap returns an 'html/template'.Funcmap
+func HtmlFuncMap() template.FuncMap {
+ return template.FuncMap(GenericFuncMap())
+}
+
+// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}.
+func GenericFuncMap() map[string]interface{} {
+ gfm := make(map[string]interface{}, len(genericMap))
+ for k, v := range genericMap {
+ gfm[k] = v
+ }
+ return gfm
+}
+
+// These functions are not guaranteed to evaluate to the same result for given input, because they
+// refer to the environment or global state.
+var nonhermeticFunctions = []string{
+ // Date functions
+ "date",
+ "date_in_zone",
+ "date_modify",
+ "now",
+ "htmlDate",
+ "htmlDateInZone",
+ "dateInZone",
+ "dateModify",
+
+ // Strings
+ "randAlphaNum",
+ "randAlpha",
+ "randAscii",
+ "randNumeric",
+ "randBytes",
+ "uuidv4",
+
+ // OS
+ "env",
+ "expandenv",
+
+ // Network
+ "getHostByName",
+}
+
+var genericMap = map[string]interface{}{
+ "hello": func() string { return "Hello!" },
+
+ // Date functions
+ "ago": dateAgo,
+ "date": date,
+ "date_in_zone": dateInZone,
+ "date_modify": dateModify,
+ "dateInZone": dateInZone,
+ "dateModify": dateModify,
+ "duration": duration,
+ "durationRound": durationRound,
+ "htmlDate": htmlDate,
+ "htmlDateInZone": htmlDateInZone,
+ "must_date_modify": mustDateModify,
+ "mustDateModify": mustDateModify,
+ "mustToDate": mustToDate,
+ "now": time.Now,
+ "toDate": toDate,
+ "unixEpoch": unixEpoch,
+
+ // Strings
+ "abbrev": abbrev,
+ "abbrevboth": abbrevboth,
+ "trunc": trunc,
+ "trim": strings.TrimSpace,
+ "upper": strings.ToUpper,
+ "lower": strings.ToLower,
+ "title": strings.Title,
+ "untitle": untitle,
+ "substr": substring,
+ // Switch order so that "foo" | repeat 5
+ "repeat": func(count int, str string) string { return strings.Repeat(str, count) },
+ // Deprecated: Use trimAll.
+ "trimall": func(a, b string) string { return strings.Trim(b, a) },
+ // Switch order so that "$foo" | trimall "$"
+ "trimAll": func(a, b string) string { return strings.Trim(b, a) },
+ "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) },
+ "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) },
+ "nospace": util.DeleteWhiteSpace,
+ "initials": initials,
+ "randAlphaNum": randAlphaNumeric,
+ "randAlpha": randAlpha,
+ "randAscii": randAscii,
+ "randNumeric": randNumeric,
+ "swapcase": util.SwapCase,
+ "shuffle": xstrings.Shuffle,
+ "snakecase": xstrings.ToSnakeCase,
+ "camelcase": xstrings.ToCamelCase,
+ "kebabcase": xstrings.ToKebabCase,
+ "wrap": func(l int, s string) string { return util.Wrap(s, l) },
+ "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) },
+ // Switch order so that "foobar" | contains "foo"
+ "contains": func(substr string, str string) bool { return strings.Contains(str, substr) },
+ "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) },
+ "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) },
+ "quote": quote,
+ "squote": squote,
+ "cat": cat,
+ "indent": indent,
+ "nindent": nindent,
+ "replace": replace,
+ "plural": plural,
+ "sha1sum": sha1sum,
+ "sha256sum": sha256sum,
+ "adler32sum": adler32sum,
+ "toString": strval,
+
+ // Wrap Atoi to stop errors.
+ "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i },
+ "int64": toInt64,
+ "int": toInt,
+ "float64": toFloat64,
+ "seq": seq,
+ "toDecimal": toDecimal,
+
+ //"gt": func(a, b int) bool {return a > b},
+ //"gte": func(a, b int) bool {return a >= b},
+ //"lt": func(a, b int) bool {return a < b},
+ //"lte": func(a, b int) bool {return a <= b},
+
+ // split "/" foo/bar returns map[int]string{0: foo, 1: bar}
+ "split": split,
+ "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) },
+ // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu}
+ "splitn": splitn,
+ "toStrings": strslice,
+
+ "until": until,
+ "untilStep": untilStep,
+
+ // VERY basic arithmetic.
+ "add1": func(i interface{}) int64 { return toInt64(i) + 1 },
+ "add": func(i ...interface{}) int64 {
+ var a int64 = 0
+ for _, b := range i {
+ a += toInt64(b)
+ }
+ return a
+ },
+ "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) },
+ "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) },
+ "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) },
+ "mul": func(a interface{}, v ...interface{}) int64 {
+ val := toInt64(a)
+ for _, b := range v {
+ val = val * toInt64(b)
+ }
+ return val
+ },
+ "randInt": func(min, max int) int { return rand.Intn(max-min) + min },
+ "add1f": func(i interface{}) float64 {
+ return execDecimalOp(i, []interface{}{1}, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) })
+ },
+ "addf": func(i ...interface{}) float64 {
+ a := interface{}(float64(0))
+ return execDecimalOp(a, i, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) })
+ },
+ "subf": func(a interface{}, v ...interface{}) float64 {
+ return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Sub(d2) })
+ },
+ "divf": func(a interface{}, v ...interface{}) float64 {
+ return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Div(d2) })
+ },
+ "mulf": func(a interface{}, v ...interface{}) float64 {
+ return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Mul(d2) })
+ },
+ "biggest": max,
+ "max": max,
+ "min": min,
+ "maxf": maxf,
+ "minf": minf,
+ "ceil": ceil,
+ "floor": floor,
+ "round": round,
+
+ // string slices. Note that we reverse the order b/c that's better
+ // for template processing.
+ "join": join,
+ "sortAlpha": sortAlpha,
+
+ // Defaults
+ "default": dfault,
+ "empty": empty,
+ "coalesce": coalesce,
+ "all": all,
+ "any": any,
+ "compact": compact,
+ "mustCompact": mustCompact,
+ "fromJson": fromJson,
+ "toJson": toJson,
+ "toPrettyJson": toPrettyJson,
+ "toRawJson": toRawJson,
+ "mustFromJson": mustFromJson,
+ "mustToJson": mustToJson,
+ "mustToPrettyJson": mustToPrettyJson,
+ "mustToRawJson": mustToRawJson,
+ "ternary": ternary,
+ "deepCopy": deepCopy,
+ "mustDeepCopy": mustDeepCopy,
+
+ // Reflection
+ "typeOf": typeOf,
+ "typeIs": typeIs,
+ "typeIsLike": typeIsLike,
+ "kindOf": kindOf,
+ "kindIs": kindIs,
+ "deepEqual": reflect.DeepEqual,
+
+ // OS:
+ "env": os.Getenv,
+ "expandenv": os.ExpandEnv,
+
+ // Network:
+ "getHostByName": getHostByName,
+
+ // Paths:
+ "base": path.Base,
+ "dir": path.Dir,
+ "clean": path.Clean,
+ "ext": path.Ext,
+ "isAbs": path.IsAbs,
+
+ // Filepaths:
+ "osBase": filepath.Base,
+ "osClean": filepath.Clean,
+ "osDir": filepath.Dir,
+ "osExt": filepath.Ext,
+ "osIsAbs": filepath.IsAbs,
+
+ // Encoding:
+ "b64enc": base64encode,
+ "b64dec": base64decode,
+ "b32enc": base32encode,
+ "b32dec": base32decode,
+
+ // Data Structures:
+ "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable.
+ "list": list,
+ "dict": dict,
+ "get": get,
+ "set": set,
+ "unset": unset,
+ "hasKey": hasKey,
+ "pluck": pluck,
+ "keys": keys,
+ "pick": pick,
+ "omit": omit,
+ "merge": merge,
+ "mergeOverwrite": mergeOverwrite,
+ "mustMerge": mustMerge,
+ "mustMergeOverwrite": mustMergeOverwrite,
+ "values": values,
+
+ "append": push, "push": push,
+ "mustAppend": mustPush, "mustPush": mustPush,
+ "prepend": prepend,
+ "mustPrepend": mustPrepend,
+ "first": first,
+ "mustFirst": mustFirst,
+ "rest": rest,
+ "mustRest": mustRest,
+ "last": last,
+ "mustLast": mustLast,
+ "initial": initial,
+ "mustInitial": mustInitial,
+ "reverse": reverse,
+ "mustReverse": mustReverse,
+ "uniq": uniq,
+ "mustUniq": mustUniq,
+ "without": without,
+ "mustWithout": mustWithout,
+ "has": has,
+ "mustHas": mustHas,
+ "slice": slice,
+ "mustSlice": mustSlice,
+ "concat": concat,
+ "dig": dig,
+ "chunk": chunk,
+ "mustChunk": mustChunk,
+
+ // Crypto:
+ "bcrypt": bcrypt,
+ "htpasswd": htpasswd,
+ "genPrivateKey": generatePrivateKey,
+ "derivePassword": derivePassword,
+ "buildCustomCert": buildCustomCertificate,
+ "genCA": generateCertificateAuthority,
+ "genCAWithKey": generateCertificateAuthorityWithPEMKey,
+ "genSelfSignedCert": generateSelfSignedCertificate,
+ "genSelfSignedCertWithKey": generateSelfSignedCertificateWithPEMKey,
+ "genSignedCert": generateSignedCertificate,
+ "genSignedCertWithKey": generateSignedCertificateWithPEMKey,
+ "encryptAES": encryptAES,
+ "decryptAES": decryptAES,
+ "randBytes": randBytes,
+
+ // UUIDs:
+ "uuidv4": uuidv4,
+
+ // SemVer:
+ "semver": semver,
+ "semverCompare": semverCompare,
+
+ // Flow Control:
+ "fail": func(msg string) (string, error) { return "", errors.New(msg) },
+
+ // Regex
+ "regexMatch": regexMatch,
+ "mustRegexMatch": mustRegexMatch,
+ "regexFindAll": regexFindAll,
+ "mustRegexFindAll": mustRegexFindAll,
+ "regexFind": regexFind,
+ "mustRegexFind": mustRegexFind,
+ "regexReplaceAll": regexReplaceAll,
+ "mustRegexReplaceAll": mustRegexReplaceAll,
+ "regexReplaceAllLiteral": regexReplaceAllLiteral,
+ "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral,
+ "regexSplit": regexSplit,
+ "mustRegexSplit": mustRegexSplit,
+ "regexQuoteMeta": regexQuoteMeta,
+
+ // URLs:
+ "urlParse": urlParse,
+ "urlJoin": urlJoin,
+}
diff --git a/vendor/github.com/Masterminds/sprig/v3/list.go b/vendor/github.com/Masterminds/sprig/v3/list.go
new file mode 100644
index 000000000..ca0fbb789
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/v3/list.go
@@ -0,0 +1,464 @@
+package sprig
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+)
+
+// Reflection is used in these functions so that slices and arrays of strings,
+// ints, and other types not implementing []interface{} can be worked with.
+// For example, this is useful if you need to work on the output of regexs.
+
+func list(v ...interface{}) []interface{} {
+ return v
+}
+
+func push(list interface{}, v interface{}) []interface{} {
+ l, err := mustPush(list, v)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustPush(list interface{}, v interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ nl := make([]interface{}, l)
+ for i := 0; i < l; i++ {
+ nl[i] = l2.Index(i).Interface()
+ }
+
+ return append(nl, v), nil
+
+ default:
+ return nil, fmt.Errorf("Cannot push on type %s", tp)
+ }
+}
+
+func prepend(list interface{}, v interface{}) []interface{} {
+ l, err := mustPrepend(list, v)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) {
+ //return append([]interface{}{v}, list...)
+
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ nl := make([]interface{}, l)
+ for i := 0; i < l; i++ {
+ nl[i] = l2.Index(i).Interface()
+ }
+
+ return append([]interface{}{v}, nl...), nil
+
+ default:
+ return nil, fmt.Errorf("Cannot prepend on type %s", tp)
+ }
+}
+
+func chunk(size int, list interface{}) [][]interface{} {
+ l, err := mustChunk(size, list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustChunk(size int, list interface{}) ([][]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+
+ cs := int(math.Floor(float64(l-1)/float64(size)) + 1)
+ nl := make([][]interface{}, cs)
+
+ for i := 0; i < cs; i++ {
+ clen := size
+ if i == cs-1 {
+ clen = int(math.Floor(math.Mod(float64(l), float64(size))))
+ if clen == 0 {
+ clen = size
+ }
+ }
+
+ nl[i] = make([]interface{}, clen)
+
+ for j := 0; j < clen; j++ {
+ ix := i*size + j
+ nl[i][j] = l2.Index(ix).Interface()
+ }
+ }
+
+ return nl, nil
+
+ default:
+ return nil, fmt.Errorf("Cannot chunk type %s", tp)
+ }
+}
+
+func last(list interface{}) interface{} {
+ l, err := mustLast(list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustLast(list interface{}) (interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ if l == 0 {
+ return nil, nil
+ }
+
+ return l2.Index(l - 1).Interface(), nil
+ default:
+ return nil, fmt.Errorf("Cannot find last on type %s", tp)
+ }
+}
+
+func first(list interface{}) interface{} {
+ l, err := mustFirst(list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustFirst(list interface{}) (interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ if l == 0 {
+ return nil, nil
+ }
+
+ return l2.Index(0).Interface(), nil
+ default:
+ return nil, fmt.Errorf("Cannot find first on type %s", tp)
+ }
+}
+
+func rest(list interface{}) []interface{} {
+ l, err := mustRest(list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustRest(list interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ if l == 0 {
+ return nil, nil
+ }
+
+ nl := make([]interface{}, l-1)
+ for i := 1; i < l; i++ {
+ nl[i-1] = l2.Index(i).Interface()
+ }
+
+ return nl, nil
+ default:
+ return nil, fmt.Errorf("Cannot find rest on type %s", tp)
+ }
+}
+
+func initial(list interface{}) []interface{} {
+ l, err := mustInitial(list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustInitial(list interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ if l == 0 {
+ return nil, nil
+ }
+
+ nl := make([]interface{}, l-1)
+ for i := 0; i < l-1; i++ {
+ nl[i] = l2.Index(i).Interface()
+ }
+
+ return nl, nil
+ default:
+ return nil, fmt.Errorf("Cannot find initial on type %s", tp)
+ }
+}
+
+func sortAlpha(list interface{}) []string {
+ k := reflect.Indirect(reflect.ValueOf(list)).Kind()
+ switch k {
+ case reflect.Slice, reflect.Array:
+ a := strslice(list)
+ s := sort.StringSlice(a)
+ s.Sort()
+ return s
+ }
+ return []string{strval(list)}
+}
+
+func reverse(v interface{}) []interface{} {
+ l, err := mustReverse(v)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustReverse(v interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(v).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(v)
+
+ l := l2.Len()
+ // We do not sort in place because the incoming array should not be altered.
+ nl := make([]interface{}, l)
+ for i := 0; i < l; i++ {
+ nl[l-i-1] = l2.Index(i).Interface()
+ }
+
+ return nl, nil
+ default:
+ return nil, fmt.Errorf("Cannot find reverse on type %s", tp)
+ }
+}
+
+func compact(list interface{}) []interface{} {
+ l, err := mustCompact(list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustCompact(list interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ nl := []interface{}{}
+ var item interface{}
+ for i := 0; i < l; i++ {
+ item = l2.Index(i).Interface()
+ if !empty(item) {
+ nl = append(nl, item)
+ }
+ }
+
+ return nl, nil
+ default:
+ return nil, fmt.Errorf("Cannot compact on type %s", tp)
+ }
+}
+
+func uniq(list interface{}) []interface{} {
+ l, err := mustUniq(list)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustUniq(list interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ dest := []interface{}{}
+ var item interface{}
+ for i := 0; i < l; i++ {
+ item = l2.Index(i).Interface()
+ if !inList(dest, item) {
+ dest = append(dest, item)
+ }
+ }
+
+ return dest, nil
+ default:
+ return nil, fmt.Errorf("Cannot find uniq on type %s", tp)
+ }
+}
+
+func inList(haystack []interface{}, needle interface{}) bool {
+ for _, h := range haystack {
+ if reflect.DeepEqual(needle, h) {
+ return true
+ }
+ }
+ return false
+}
+
+func without(list interface{}, omit ...interface{}) []interface{} {
+ l, err := mustWithout(list, omit...)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ res := []interface{}{}
+ var item interface{}
+ for i := 0; i < l; i++ {
+ item = l2.Index(i).Interface()
+ if !inList(omit, item) {
+ res = append(res, item)
+ }
+ }
+
+ return res, nil
+ default:
+ return nil, fmt.Errorf("Cannot find without on type %s", tp)
+ }
+}
+
+func has(needle interface{}, haystack interface{}) bool {
+ l, err := mustHas(needle, haystack)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustHas(needle interface{}, haystack interface{}) (bool, error) {
+ if haystack == nil {
+ return false, nil
+ }
+ tp := reflect.TypeOf(haystack).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(haystack)
+ var item interface{}
+ l := l2.Len()
+ for i := 0; i < l; i++ {
+ item = l2.Index(i).Interface()
+ if reflect.DeepEqual(needle, item) {
+ return true, nil
+ }
+ }
+
+ return false, nil
+ default:
+ return false, fmt.Errorf("Cannot find has on type %s", tp)
+ }
+}
+
+// $list := [1, 2, 3, 4, 5]
+// slice $list -> list[0:5] = list[:]
+// slice $list 0 3 -> list[0:3] = list[:3]
+// slice $list 3 5 -> list[3:5]
+// slice $list 3 -> list[3:5] = list[3:]
+func slice(list interface{}, indices ...interface{}) interface{} {
+ l, err := mustSlice(list, indices...)
+ if err != nil {
+ panic(err)
+ }
+
+ return l
+}
+
+func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+
+ l := l2.Len()
+ if l == 0 {
+ return nil, nil
+ }
+
+ var start, end int
+ if len(indices) > 0 {
+ start = toInt(indices[0])
+ }
+ if len(indices) < 2 {
+ end = l
+ } else {
+ end = toInt(indices[1])
+ }
+
+ return l2.Slice(start, end).Interface(), nil
+ default:
+ return nil, fmt.Errorf("list should be type of slice or array but %s", tp)
+ }
+}
+
+func concat(lists ...interface{}) interface{} {
+ var res []interface{}
+ for _, list := range lists {
+ tp := reflect.TypeOf(list).Kind()
+ switch tp {
+ case reflect.Slice, reflect.Array:
+ l2 := reflect.ValueOf(list)
+ for i := 0; i < l2.Len(); i++ {
+ res = append(res, l2.Index(i).Interface())
+ }
+ default:
+ panic(fmt.Sprintf("Cannot concat type %s as list", tp))
+ }
+ }
+ return res
+}
diff --git a/vendor/github.com/Masterminds/sprig/v3/network.go b/vendor/github.com/Masterminds/sprig/v3/network.go
new file mode 100644
index 000000000..108d78a94
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/v3/network.go
@@ -0,0 +1,12 @@
+package sprig
+
+import (
+ "math/rand"
+ "net"
+)
+
+func getHostByName(name string) string {
+ addrs, _ := net.LookupHost(name)
+ //TODO: add error handing when release v3 comes out
+ return addrs[rand.Intn(len(addrs))]
+}
diff --git a/vendor/github.com/Masterminds/sprig/v3/numeric.go b/vendor/github.com/Masterminds/sprig/v3/numeric.go
new file mode 100644
index 000000000..f68e4182e
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/v3/numeric.go
@@ -0,0 +1,186 @@
+package sprig
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/spf13/cast"
+ "github.com/shopspring/decimal"
+)
+
+// toFloat64 converts 64-bit floats
+func toFloat64(v interface{}) float64 {
+ return cast.ToFloat64(v)
+}
+
+func toInt(v interface{}) int {
+ return cast.ToInt(v)
+}
+
+// toInt64 converts integer types to 64-bit integers
+func toInt64(v interface{}) int64 {
+ return cast.ToInt64(v)
+}
+
+func max(a interface{}, i ...interface{}) int64 {
+ aa := toInt64(a)
+ for _, b := range i {
+ bb := toInt64(b)
+ if bb > aa {
+ aa = bb
+ }
+ }
+ return aa
+}
+
+func maxf(a interface{}, i ...interface{}) float64 {
+ aa := toFloat64(a)
+ for _, b := range i {
+ bb := toFloat64(b)
+ aa = math.Max(aa, bb)
+ }
+ return aa
+}
+
+func min(a interface{}, i ...interface{}) int64 {
+ aa := toInt64(a)
+ for _, b := range i {
+ bb := toInt64(b)
+ if bb < aa {
+ aa = bb
+ }
+ }
+ return aa
+}
+
+func minf(a interface{}, i ...interface{}) float64 {
+ aa := toFloat64(a)
+ for _, b := range i {
+ bb := toFloat64(b)
+ aa = math.Min(aa, bb)
+ }
+ return aa
+}
+
+func until(count int) []int {
+ step := 1
+ if count < 0 {
+ step = -1
+ }
+ return untilStep(0, count, step)
+}
+
+func untilStep(start, stop, step int) []int {
+ v := []int{}
+
+ if stop < start {
+ if step >= 0 {
+ return v
+ }
+ for i := start; i > stop; i += step {
+ v = append(v, i)
+ }
+ return v
+ }
+
+ if step <= 0 {
+ return v
+ }
+ for i := start; i < stop; i += step {
+ v = append(v, i)
+ }
+ return v
+}
+
+func floor(a interface{}) float64 {
+ aa := toFloat64(a)
+ return math.Floor(aa)
+}
+
+func ceil(a interface{}) float64 {
+ aa := toFloat64(a)
+ return math.Ceil(aa)
+}
+
+func round(a interface{}, p int, rOpt ...float64) float64 {
+ roundOn := .5
+ if len(rOpt) > 0 {
+ roundOn = rOpt[0]
+ }
+ val := toFloat64(a)
+ places := toFloat64(p)
+
+ var round float64
+ pow := math.Pow(10, places)
+ digit := pow * val
+ _, div := math.Modf(digit)
+ if div >= roundOn {
+ round = math.Ceil(digit)
+ } else {
+ round = math.Floor(digit)
+ }
+ return round / pow
+}
+
+// converts unix octal to decimal
+func toDecimal(v interface{}) int64 {
+ result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64)
+ if err != nil {
+ return 0
+ }
+ return result
+}
+
+func seq(params ...int) string {
+ increment := 1
+ switch len(params) {
+ case 0:
+ return ""
+ case 1:
+ start := 1
+ end := params[0]
+ if end < start {
+ increment = -1
+ }
+ return intArrayToString(untilStep(start, end+increment, increment), " ")
+ case 3:
+ start := params[0]
+ end := params[2]
+ step := params[1]
+ if end < start {
+ increment = -1
+ if step > 0 {
+ return ""
+ }
+ }
+ return intArrayToString(untilStep(start, end+increment, step), " ")
+ case 2:
+ start := params[0]
+ end := params[1]
+ step := 1
+ if end < start {
+ step = -1
+ }
+ return intArrayToString(untilStep(start, end+step, step), " ")
+ default:
+ return ""
+ }
+}
+
+func intArrayToString(slice []int, delimeter string) string {
+ return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]")
+}
+
+// performs a float and subsequent decimal.Decimal conversion on inputs,
+// and iterates through a and b executing the mathmetical operation f
+func execDecimalOp(a interface{}, b []interface{}, f func(d1, d2 decimal.Decimal) decimal.Decimal) float64 {
+ prt := decimal.NewFromFloat(toFloat64(a))
+ for _, x := range b {
+ dx := decimal.NewFromFloat(toFloat64(x))
+ prt = f(prt, dx)
+ }
+ rslt, _ := prt.Float64()
+ return rslt
+}
diff --git a/vendor/github.com/Masterminds/sprig/v3/reflect.go b/vendor/github.com/Masterminds/sprig/v3/reflect.go
new file mode 100644
index 000000000..8a65c132f
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/v3/reflect.go
@@ -0,0 +1,28 @@
+package sprig
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// typeIs returns true if the src is the type named in target.
+func typeIs(target string, src interface{}) bool {
+ return target == typeOf(src)
+}
+
+func typeIsLike(target string, src interface{}) bool {
+ t := typeOf(src)
+ return target == t || "*"+target == t
+}
+
+func typeOf(src interface{}) string {
+ return fmt.Sprintf("%T", src)
+}
+
+func kindIs(target string, src interface{}) bool {
+ return target == kindOf(src)
+}
+
+func kindOf(src interface{}) string {
+ return reflect.ValueOf(src).Kind().String()
+}
diff --git a/vendor/github.com/Masterminds/sprig/v3/regex.go b/vendor/github.com/Masterminds/sprig/v3/regex.go
new file mode 100644
index 000000000..fab551018
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/v3/regex.go
@@ -0,0 +1,83 @@
+package sprig
+
+import (
+ "regexp"
+)
+
+func regexMatch(regex string, s string) bool {
+ match, _ := regexp.MatchString(regex, s)
+ return match
+}
+
+func mustRegexMatch(regex string, s string) (bool, error) {
+ return regexp.MatchString(regex, s)
+}
+
+func regexFindAll(regex string, s string, n int) []string {
+ r := regexp.MustCompile(regex)
+ return r.FindAllString(s, n)
+}
+
+func mustRegexFindAll(regex string, s string, n int) ([]string, error) {
+ r, err := regexp.Compile(regex)
+ if err != nil {
+ return []string{}, err
+ }
+ return r.FindAllString(s, n), nil
+}
+
+func regexFind(regex string, s string) string {
+ r := regexp.MustCompile(regex)
+ return r.FindString(s)
+}
+
+func mustRegexFind(regex string, s string) (string, error) {
+ r, err := regexp.Compile(regex)
+ if err != nil {
+ return "", err
+ }
+ return r.FindString(s), nil
+}
+
+func regexReplaceAll(regex string, s string, repl string) string {
+ r := regexp.MustCompile(regex)
+ return r.ReplaceAllString(s, repl)
+}
+
+func mustRegexReplaceAll(regex string, s string, repl string) (string, error) {
+ r, err := regexp.Compile(regex)
+ if err != nil {
+ return "", err
+ }
+ return r.ReplaceAllString(s, repl), nil
+}
+
+func regexReplaceAllLiteral(regex string, s string, repl string) string {
+ r := regexp.MustCompile(regex)
+ return r.ReplaceAllLiteralString(s, repl)
+}
+
+func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) {
+ r, err := regexp.Compile(regex)
+ if err != nil {
+ return "", err
+ }
+ return r.ReplaceAllLiteralString(s, repl), nil
+}
+
+func regexSplit(regex string, s string, n int) []string {
+ r := regexp.MustCompile(regex)
+ return r.Split(s, n)
+}
+
+func mustRegexSplit(regex string, s string, n int) ([]string, error) {
+ r, err := regexp.Compile(regex)
+ if err != nil {
+ return []string{}, err
+ }
+ return r.Split(s, n), nil
+}
+
+func regexQuoteMeta(s string) string {
+ return regexp.QuoteMeta(s)
+}
diff --git a/vendor/github.com/Masterminds/sprig/v3/semver.go b/vendor/github.com/Masterminds/sprig/v3/semver.go
new file mode 100644
index 000000000..3fbe08aa6
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/v3/semver.go
@@ -0,0 +1,23 @@
+package sprig
+
+import (
+ sv2 "github.com/Masterminds/semver/v3"
+)
+
+func semverCompare(constraint, version string) (bool, error) {
+ c, err := sv2.NewConstraint(constraint)
+ if err != nil {
+ return false, err
+ }
+
+ v, err := sv2.NewVersion(version)
+ if err != nil {
+ return false, err
+ }
+
+ return c.Check(v), nil
+}
+
+func semver(version string) (*sv2.Version, error) {
+ return sv2.NewVersion(version)
+}
diff --git a/vendor/github.com/Masterminds/sprig/v3/strings.go b/vendor/github.com/Masterminds/sprig/v3/strings.go
new file mode 100644
index 000000000..e0ae628c8
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/v3/strings.go
@@ -0,0 +1,236 @@
+package sprig
+
+import (
+ "encoding/base32"
+ "encoding/base64"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+
+ util "github.com/Masterminds/goutils"
+)
+
+func base64encode(v string) string {
+ return base64.StdEncoding.EncodeToString([]byte(v))
+}
+
+func base64decode(v string) string {
+ data, err := base64.StdEncoding.DecodeString(v)
+ if err != nil {
+ return err.Error()
+ }
+ return string(data)
+}
+
+func base32encode(v string) string {
+ return base32.StdEncoding.EncodeToString([]byte(v))
+}
+
+func base32decode(v string) string {
+ data, err := base32.StdEncoding.DecodeString(v)
+ if err != nil {
+ return err.Error()
+ }
+ return string(data)
+}
+
+func abbrev(width int, s string) string {
+ if width < 4 {
+ return s
+ }
+ r, _ := util.Abbreviate(s, width)
+ return r
+}
+
+func abbrevboth(left, right int, s string) string {
+ if right < 4 || left > 0 && right < 7 {
+ return s
+ }
+ r, _ := util.AbbreviateFull(s, left, right)
+ return r
+}
+func initials(s string) string {
+ // Wrap this just to eliminate the var args, which templates don't do well.
+ return util.Initials(s)
+}
+
+func randAlphaNumeric(count int) string {
+ // It is not possible, it appears, to actually generate an error here.
+ r, _ := util.CryptoRandomAlphaNumeric(count)
+ return r
+}
+
+func randAlpha(count int) string {
+ r, _ := util.CryptoRandomAlphabetic(count)
+ return r
+}
+
+func randAscii(count int) string {
+ r, _ := util.CryptoRandomAscii(count)
+ return r
+}
+
+func randNumeric(count int) string {
+ r, _ := util.CryptoRandomNumeric(count)
+ return r
+}
+
+func untitle(str string) string {
+ return util.Uncapitalize(str)
+}
+
+func quote(str ...interface{}) string {
+ out := make([]string, 0, len(str))
+ for _, s := range str {
+ if s != nil {
+ out = append(out, fmt.Sprintf("%q", strval(s)))
+ }
+ }
+ return strings.Join(out, " ")
+}
+
+func squote(str ...interface{}) string {
+ out := make([]string, 0, len(str))
+ for _, s := range str {
+ if s != nil {
+ out = append(out, fmt.Sprintf("'%v'", s))
+ }
+ }
+ return strings.Join(out, " ")
+}
+
+func cat(v ...interface{}) string {
+ v = removeNilElements(v)
+ r := strings.TrimSpace(strings.Repeat("%v ", len(v)))
+ return fmt.Sprintf(r, v...)
+}
+
+func indent(spaces int, v string) string {
+ pad := strings.Repeat(" ", spaces)
+ return pad + strings.Replace(v, "\n", "\n"+pad, -1)
+}
+
+func nindent(spaces int, v string) string {
+ return "\n" + indent(spaces, v)
+}
+
+func replace(old, new, src string) string {
+ return strings.Replace(src, old, new, -1)
+}
+
+func plural(one, many string, count int) string {
+ if count == 1 {
+ return one
+ }
+ return many
+}
+
+func strslice(v interface{}) []string {
+ switch v := v.(type) {
+ case []string:
+ return v
+ case []interface{}:
+ b := make([]string, 0, len(v))
+ for _, s := range v {
+ if s != nil {
+ b = append(b, strval(s))
+ }
+ }
+ return b
+ default:
+ val := reflect.ValueOf(v)
+ switch val.Kind() {
+ case reflect.Array, reflect.Slice:
+ l := val.Len()
+ b := make([]string, 0, l)
+ for i := 0; i < l; i++ {
+ value := val.Index(i).Interface()
+ if value != nil {
+ b = append(b, strval(value))
+ }
+ }
+ return b
+ default:
+ if v == nil {
+ return []string{}
+ }
+
+ return []string{strval(v)}
+ }
+ }
+}
+
+func removeNilElements(v []interface{}) []interface{} {
+ newSlice := make([]interface{}, 0, len(v))
+ for _, i := range v {
+ if i != nil {
+ newSlice = append(newSlice, i)
+ }
+ }
+ return newSlice
+}
+
+func strval(v interface{}) string {
+ switch v := v.(type) {
+ case string:
+ return v
+ case []byte:
+ return string(v)
+ case error:
+ return v.Error()
+ case fmt.Stringer:
+ return v.String()
+ default:
+ return fmt.Sprintf("%v", v)
+ }
+}
+
+func trunc(c int, s string) string {
+ if c < 0 && len(s)+c > 0 {
+ return s[len(s)+c:]
+ }
+ if c >= 0 && len(s) > c {
+ return s[:c]
+ }
+ return s
+}
+
+func join(sep string, v interface{}) string {
+ return strings.Join(strslice(v), sep)
+}
+
+func split(sep, orig string) map[string]string {
+ parts := strings.Split(orig, sep)
+ res := make(map[string]string, len(parts))
+ for i, v := range parts {
+ res["_"+strconv.Itoa(i)] = v
+ }
+ return res
+}
+
+func splitn(sep string, n int, orig string) map[string]string {
+ parts := strings.SplitN(orig, sep, n)
+ res := make(map[string]string, len(parts))
+ for i, v := range parts {
+ res["_"+strconv.Itoa(i)] = v
+ }
+ return res
+}
+
+// substring creates a substring of the given string.
+//
+// If start is < 0, this calls string[:end].
+//
+// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:]
+//
+// Otherwise, this calls string[start, end].
+func substring(start, end int, s string) string {
+ if start < 0 {
+ return s[:end]
+ }
+ if end < 0 || end > len(s) {
+ return s[start:]
+ }
+ return s[start:end]
+}
diff --git a/vendor/github.com/Masterminds/sprig/v3/url.go b/vendor/github.com/Masterminds/sprig/v3/url.go
new file mode 100644
index 000000000..b8e120e19
--- /dev/null
+++ b/vendor/github.com/Masterminds/sprig/v3/url.go
@@ -0,0 +1,66 @@
+package sprig
+
+import (
+ "fmt"
+ "net/url"
+ "reflect"
+)
+
+func dictGetOrEmpty(dict map[string]interface{}, key string) string {
+ value, ok := dict[key]
+ if !ok {
+ return ""
+ }
+ tp := reflect.TypeOf(value).Kind()
+ if tp != reflect.String {
+ panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String()))
+ }
+ return reflect.ValueOf(value).String()
+}
+
+// parses given URL to return dict object
+func urlParse(v string) map[string]interface{} {
+ dict := map[string]interface{}{}
+ parsedURL, err := url.Parse(v)
+ if err != nil {
+ panic(fmt.Sprintf("unable to parse url: %s", err))
+ }
+ dict["scheme"] = parsedURL.Scheme
+ dict["host"] = parsedURL.Host
+ dict["hostname"] = parsedURL.Hostname()
+ dict["path"] = parsedURL.Path
+ dict["query"] = parsedURL.RawQuery
+ dict["opaque"] = parsedURL.Opaque
+ dict["fragment"] = parsedURL.Fragment
+ if parsedURL.User != nil {
+ dict["userinfo"] = parsedURL.User.String()
+ } else {
+ dict["userinfo"] = ""
+ }
+
+ return dict
+}
+
+// join given dict to URL string
+func urlJoin(d map[string]interface{}) string {
+ resURL := url.URL{
+ Scheme: dictGetOrEmpty(d, "scheme"),
+ Host: dictGetOrEmpty(d, "host"),
+ Path: dictGetOrEmpty(d, "path"),
+ RawQuery: dictGetOrEmpty(d, "query"),
+ Opaque: dictGetOrEmpty(d, "opaque"),
+ Fragment: dictGetOrEmpty(d, "fragment"),
+ }
+ userinfo := dictGetOrEmpty(d, "userinfo")
+ var user *url.Userinfo
+ if userinfo != "" {
+ tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo))
+ if err != nil {
+ panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err))
+ }
+ user = tempURL.User
+ }
+
+ resURL.User = user
+ return resURL.String()
+}
diff --git a/vendor/github.com/flosch/pongo2/v6/.gitattributes b/vendor/github.com/flosch/pongo2/v6/.gitattributes
deleted file mode 100644
index fcadb2cf9..000000000
--- a/vendor/github.com/flosch/pongo2/v6/.gitattributes
+++ /dev/null
@@ -1 +0,0 @@
-* text eol=lf
diff --git a/vendor/github.com/flosch/pongo2/v6/.replit b/vendor/github.com/flosch/pongo2/v6/.replit
deleted file mode 100644
index b312b179d..000000000
--- a/vendor/github.com/flosch/pongo2/v6/.replit
+++ /dev/null
@@ -1,2 +0,0 @@
-language = "go"
-run = ""
\ No newline at end of file
diff --git a/vendor/github.com/flosch/pongo2/v6/.travis.yml b/vendor/github.com/flosch/pongo2/v6/.travis.yml
deleted file mode 100644
index 5ea084579..000000000
--- a/vendor/github.com/flosch/pongo2/v6/.travis.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-language: go
-arch:
- - AMD64
- - ppc64le
-os:
- - linux
- - osx
-go:
- - 1.18
-script:
- - go test -v
diff --git a/vendor/github.com/flosch/pongo2/v6/AUTHORS b/vendor/github.com/flosch/pongo2/v6/AUTHORS
deleted file mode 100644
index 601697cfa..000000000
--- a/vendor/github.com/flosch/pongo2/v6/AUTHORS
+++ /dev/null
@@ -1,11 +0,0 @@
-Main author and maintainer of pongo2:
-
-* Florian Schlachter
-
-Contributors (in no specific order):
-
-* @romanoaugusto88
-* @vitalbh
-* @blaubaer
-
-Feel free to add yourself to the list or to modify your entry if you did a contribution.
diff --git a/vendor/github.com/flosch/pongo2/v6/LICENSE b/vendor/github.com/flosch/pongo2/v6/LICENSE
deleted file mode 100644
index a25460b39..000000000
--- a/vendor/github.com/flosch/pongo2/v6/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2013-2022 Florian Schlachter
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/flosch/pongo2/v6/README.md b/vendor/github.com/flosch/pongo2/v6/README.md
deleted file mode 100644
index 68a519559..000000000
--- a/vendor/github.com/flosch/pongo2/v6/README.md
+++ /dev/null
@@ -1,168 +0,0 @@
-# [pongo](https://en.wikipedia.org/wiki/Pongo_%28genus%29)2
-
-[![PkgGoDev](https://pkg.go.dev/badge/github.com/flosch/pongo2)](https://pkg.go.dev/github.com/flosch/pongo2)
-[![Build Status](https://travis-ci.org/flosch/pongo2.svg?branch=master)](https://travis-ci.org/flosch/pongo2)
-[![Run on Repl.it](https://repl.it/badge/github/flosch/pongo2)](https://repl.it/github/flosch/pongo2)
-
-pongo2 is a Django-syntax like templating-language ([official website](https://www.schlachter.tech/solutions/pongo2-template-engine/)).
-
-Install/update using `go get` (no dependencies required by pongo2):
-
-```sh
-go get -u github.com/flosch/pongo2/v6
-```
-
-Please use the [issue tracker](https://github.com/flosch/pongo2/issues) if you're encountering any problems with pongo2 or if you need help with implementing tags or filters ([create a ticket!](https://github.com/flosch/pongo2/issues/new)).
-
-## First impression of a template
-
-```django
-
-
- Our admins and users
-
- {# This is a short example to give you a quick overview of pongo2's syntax. #}
- {% macro user_details(user, is_admin=false) %}
-
-
-
= 40) || (user.karma > calc_avg_karma(userlist)+5) %} class="karma-good"{% endif %}>
-
-
- {{ user }}
-
-
-
-
This user registered {{ user.register_date|naturaltime }}.
-
-
-
The user's biography:
-
- {{ user.biography|markdown|truncatewords_html:15 }}
- read more
-
-
- {% if is_admin %}
-
This user is an admin!
- {% endif %}
-
- {% endmacro %}
-
-
-
-
- Our admins
- {% for admin in adminlist %} {{ user_details(admin, true) }} {% endfor %}
-
- Our members
- {% for user in userlist %} {{ user_details(user) }} {% endfor %}
-
-
-```
-
-## Features
-
-- Syntax- and feature-set-compatible with [Django 1.7](https://django.readthedocs.io/en/1.7.x/topics/templates.html)
-- [Advanced C-like expressions](https://github.com/flosch/pongo2/blob/master/template_tests/expressions.tpl).
-- [Complex function calls within expressions](https://github.com/flosch/pongo2/blob/master/template_tests/function_calls_wrapper.tpl).
-- [Easy API to create new filters and tags](http://godoc.org/github.com/flosch/pongo2#RegisterFilter) ([including parsing arguments](http://godoc.org/github.com/flosch/pongo2#Parser))
-- Additional features:
- - Macros including importing macros from other files (see [template_tests/macro.tpl](https://github.com/flosch/pongo2/blob/master/template_tests/macro.tpl))
- - [Template sandboxing](https://godoc.org/github.com/flosch/pongo2#TemplateSet) ([directory patterns](http://golang.org/pkg/path/filepath/#Match), banned tags/filters)
-
-## Caveats
-
-### Filters
-
-- **date** / **time**: The `date` and `time` filter are taking the Golang specific time- and date-format (not Django's one) currently. [Take a look on the format here](http://golang.org/pkg/time/#Time.Format).
-- **stringformat**: `stringformat` does **not** take Python's string format syntax as a parameter, instead it takes Go's. Essentially `{{ 3.14|stringformat:"pi is %.2f" }}` is `fmt.Sprintf("pi is %.2f", 3.14)`.
-- **escape** / **force_escape**: Unlike Django's behaviour, the `escape`-filter is applied immediately. Therefore there is no need for a `force_escape`-filter yet.
-
-### Tags
-
-- **for**: All the `forloop` fields (like `forloop.counter`) are written with a capital letter at the beginning. For example, the `counter` can be accessed by `forloop.Counter` and the parentloop by `forloop.Parentloop`.
-- **now**: takes Go's time format (see **date** and **time**-filter).
-
-### Misc
-
-- **not in-operator**: You can check whether a map/struct/string contains a key/field/substring by using the in-operator (or the negation of it):
- `{% if key in map %}Key is in map{% else %}Key not in map{% endif %}` or `{% if !(key in map) %}Key is NOT in map{% else %}Key is in map{% endif %}`.
-
-## Add-ons, libraries and helpers
-
-### Official
-
-- [pongo2-addons](https://github.com/flosch/pongo2-addons) - Official additional filters/tags for pongo2 (for example a **markdown**-filter). They are in their own repository because they're relying on 3rd-party-libraries.
-
-### 3rd-party
-
-- [beego-pongo2](https://github.com/oal/beego-pongo2) - A tiny little helper for using Pongo2 with [Beego](https://github.com/astaxie/beego).
-- [beego-pongo2.v2](https://github.com/ipfans/beego-pongo2.v2) - Same as `beego-pongo2`, but for pongo2 v2.
-- [macaron-pongo2](https://github.com/macaron-contrib/pongo2) - pongo2 support for [Macaron](https://github.com/Unknwon/macaron), a modular web framework.
-- [ginpongo2](https://github.com/ngerakines/ginpongo2) - middleware for [gin](github.com/gin-gonic/gin) to use pongo2 templates
-- [Build'n support for Iris' template engine](https://github.com/kataras/iris)
-- [pongo2gin](https://gitlab.com/go-box/pongo2gin) - alternative renderer for [gin](github.com/gin-gonic/gin) to use pongo2 templates
-- [pongo2-trans](https://github.com/digitalcrab/pongo2trans) - `trans`-tag implementation for internationalization
-- [tpongo2](https://github.com/tango-contrib/tpongo2) - pongo2 support for [Tango](https://github.com/lunny/tango), a micro-kernel & pluggable web framework.
-- [p2cli](https://github.com/wrouesnel/p2cli) - command line templating utility based on pongo2
-- [pongorenderer](https://github.com/siredwin/pongorenderer) - minimal pongo2 renderer for [Echo](https://github.com/labstack/echo) web framework
-- [pongo2gcloud](https://github.com/dieselburner/pongo2gcloud) - Google Cloud Storage loader for pongo2 template files
-
-Please add your project to this list and send me a pull request when you've developed something nice for pongo2.
-
-## Who's using pongo2
-
-[I'm compiling a list of pongo2 users](https://github.com/flosch/pongo2/issues/241). Add your project or company!
-
-## API-usage examples
-
-Please see the documentation for a full list of provided API methods.
-
-### A tiny example (template string)
-
-```go
-// Compile the template first (i. e. creating the AST)
-tpl, err := pongo2.FromString("Hello {{ name|capfirst }}!")
-if err != nil {
- panic(err)
-}
-// Now you can render the template with the given
-// pongo2.Context how often you want to.
-out, err := tpl.Execute(pongo2.Context{"name": "florian"})
-if err != nil {
- panic(err)
-}
-fmt.Println(out) // Output: Hello Florian!
-```
-
-## Example server-usage (template file)
-
-```go
-package main
-
-import (
- "github.com/flosch/pongo2/v6"
- "net/http"
-)
-
-// Pre-compiling the templates at application startup using the
-// little Must()-helper function (Must() will panic if FromFile()
-// or FromString() will return with an error - that's it).
-// It's faster to pre-compile it anywhere at startup and only
-// execute the template later.
-var tplExample = pongo2.Must(pongo2.FromFile("example.html"))
-
-func examplePage(w http.ResponseWriter, r *http.Request) {
- // Execute the template per HTTP request
- err := tplExample.ExecuteWriter(pongo2.Context{"query": r.FormValue("query")}, w)
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- }
-}
-
-func main() {
- http.HandleFunc("/", examplePage)
- http.ListenAndServe(":8080", nil)
-}
-```
diff --git a/vendor/github.com/flosch/pongo2/v6/context.go b/vendor/github.com/flosch/pongo2/v6/context.go
deleted file mode 100644
index 5b167322c..000000000
--- a/vendor/github.com/flosch/pongo2/v6/context.go
+++ /dev/null
@@ -1,136 +0,0 @@
-package pongo2
-
-import (
- "errors"
- "fmt"
- "regexp"
-)
-
-var reIdentifiers = regexp.MustCompile("^[a-zA-Z0-9_]+$")
-
-var autoescape = true
-
-func SetAutoescape(newValue bool) {
- autoescape = newValue
-}
-
-// A Context type provides constants, variables, instances or functions to a template.
-//
-// pongo2 automatically provides meta-information or functions through the "pongo2"-key.
-// Currently, context["pongo2"] contains the following keys:
-// 1. version: returns the version string
-//
-// Template examples for accessing items from your context:
-// {{ myconstant }}
-// {{ myfunc("test", 42) }}
-// {{ user.name }}
-// {{ pongo2.version }}
-type Context map[string]any
-
-func (c Context) checkForValidIdentifiers() *Error {
- for k, v := range c {
- if !reIdentifiers.MatchString(k) {
- return &Error{
- Sender: "checkForValidIdentifiers",
- OrigError: fmt.Errorf("context-key '%s' (value: '%+v') is not a valid identifier", k, v),
- }
- }
- }
- return nil
-}
-
-// Update updates this context with the key/value-pairs from another context.
-func (c Context) Update(other Context) Context {
- for k, v := range other {
- c[k] = v
- }
- return c
-}
-
-// ExecutionContext contains all data important for the current rendering state.
-//
-// If you're writing a custom tag, your tag's Execute()-function will
-// have access to the ExecutionContext. This struct stores anything
-// about the current rendering process's Context including
-// the Context provided by the user (field Public).
-// You can safely use the Private context to provide data to the user's
-// template (like a 'forloop'-information). The Shared-context is used
-// to share data between tags. All ExecutionContexts share this context.
-//
-// Please be careful when accessing the Public data.
-// PLEASE DO NOT MODIFY THE PUBLIC CONTEXT (read-only).
-//
-// To create your own execution context within tags, use the
-// NewChildExecutionContext(parent) function.
-type ExecutionContext struct {
- template *Template
-
- Autoescape bool
- Public Context
- Private Context
- Shared Context
-}
-
-var pongo2MetaContext = Context{
- "version": Version,
-}
-
-func newExecutionContext(tpl *Template, ctx Context) *ExecutionContext {
- privateCtx := make(Context)
-
- // Make the pongo2-related funcs/vars available to the context
- privateCtx["pongo2"] = pongo2MetaContext
-
- return &ExecutionContext{
- template: tpl,
-
- Public: ctx,
- Private: privateCtx,
- Autoescape: autoescape,
- }
-}
-
-func NewChildExecutionContext(parent *ExecutionContext) *ExecutionContext {
- newctx := &ExecutionContext{
- template: parent.template,
-
- Public: parent.Public,
- Private: make(Context),
- Autoescape: parent.Autoescape,
- }
- newctx.Shared = parent.Shared
-
- // Copy all existing private items
- newctx.Private.Update(parent.Private)
-
- return newctx
-}
-
-func (ctx *ExecutionContext) Error(msg string, token *Token) *Error {
- return ctx.OrigError(errors.New(msg), token)
-}
-
-func (ctx *ExecutionContext) OrigError(err error, token *Token) *Error {
- filename := ctx.template.name
- var line, col int
- if token != nil {
- // No tokens available
- // TODO: Add location (from where?)
- filename = token.Filename
- line = token.Line
- col = token.Col
- }
- return &Error{
- Template: ctx.template,
- Filename: filename,
- Line: line,
- Column: col,
- Token: token,
- Sender: "execution",
- OrigError: err,
- }
-}
-
-func (ctx *ExecutionContext) Logf(format string, args ...any) {
- ctx.template.set.logf(format, args...)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/doc.go b/vendor/github.com/flosch/pongo2/v6/doc.go
deleted file mode 100644
index c3c277f5e..000000000
--- a/vendor/github.com/flosch/pongo2/v6/doc.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Package pongo2 is a Django-syntax like template-engine
-//
-// More info about pongo2: https://www.schlachter.tech/pongo2
-//
-// Complete documentation on the template language:
-// https://docs.djangoproject.com/en/dev/topics/templates/
-//
-// Make sure to read README.md in the repository as well.
-//
-// A tiny example with template strings:
-//
-//
-// // Compile the template first (i. e. creating the AST)
-// tpl, err := pongo2.FromString("Hello {{ name|capfirst }}!")
-// if err != nil {
-// panic(err)
-// }
-// // Now you can render the template with the given
-// // pongo2.Context how often you want to.
-// out, err := tpl.Execute(pongo2.Context{"name": "fred"})
-// if err != nil {
-// panic(err)
-// }
-// fmt.Println(out) // Output: Hello Fred!
-//
-package pongo2
diff --git a/vendor/github.com/flosch/pongo2/v6/error.go b/vendor/github.com/flosch/pongo2/v6/error.go
deleted file mode 100644
index 8aec8c100..000000000
--- a/vendor/github.com/flosch/pongo2/v6/error.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package pongo2
-
-import (
- "bufio"
- "fmt"
- "os"
-)
-
-// The Error type is being used to address an error during lexing, parsing or
-// execution. If you want to return an error object (for example in your own
-// tag or filter) fill this object with as much information as you have.
-// Make sure "Sender" is always given (if you're returning an error within
-// a filter, make Sender equals 'filter:yourfilter'; same goes for tags: 'tag:mytag').
-// It's okay if you only fill in ErrorMsg if you don't have any other details at hand.
-type Error struct {
- Template *Template
- Filename string
- Line int
- Column int
- Token *Token
- Sender string
- OrigError error
-}
-
-func (e *Error) updateFromTokenIfNeeded(template *Template, t *Token) *Error {
- if e.Template == nil {
- e.Template = template
- }
-
- if e.Token == nil {
- e.Token = t
- if e.Line <= 0 {
- e.Line = t.Line
- e.Column = t.Col
- }
- }
-
- return e
-}
-
-// Returns a nice formatted error string.
-func (e *Error) Error() string {
- s := "[Error"
- if e.Sender != "" {
- s += " (where: " + e.Sender + ")"
- }
- if e.Filename != "" {
- s += " in " + e.Filename
- }
- if e.Line > 0 {
- s += fmt.Sprintf(" | Line %d Col %d", e.Line, e.Column)
- if e.Token != nil {
- s += fmt.Sprintf(" near '%s'", e.Token.Val)
- }
- }
- s += "] "
- s += e.OrigError.Error()
- return s
-}
-
-// RawLine returns the affected line from the original template, if available.
-func (e *Error) RawLine() (line string, available bool, outErr error) {
- if e.Line <= 0 || e.Filename == "" {
- return "", false, nil
- }
-
- filename := e.Filename
- if e.Template != nil {
- filename = e.Template.set.resolveFilename(e.Template, e.Filename)
- }
- file, err := os.Open(filename)
- if err != nil {
- return "", false, err
- }
- defer func() {
- err := file.Close()
- if err != nil && outErr == nil {
- outErr = err
- }
- }()
-
- scanner := bufio.NewScanner(file)
- l := 0
- for scanner.Scan() {
- l++
- if l == e.Line {
- return scanner.Text(), true, nil
- }
- }
- return "", false, nil
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/filters.go b/vendor/github.com/flosch/pongo2/v6/filters.go
deleted file mode 100644
index cfee7cc2e..000000000
--- a/vendor/github.com/flosch/pongo2/v6/filters.go
+++ /dev/null
@@ -1,137 +0,0 @@
-package pongo2
-
-import (
- "fmt"
-)
-
-// FilterFunction is the type filter functions must fulfil
-type FilterFunction func(in *Value, param *Value) (out *Value, err *Error)
-
-var filters map[string]FilterFunction
-
-func init() {
- filters = make(map[string]FilterFunction)
-}
-
-// FilterExists returns true if the given filter is already registered
-func FilterExists(name string) bool {
- _, existing := filters[name]
- return existing
-}
-
-// RegisterFilter registers a new filter. If there's already a filter with the same. You usually
-// want to call this function in the filter's init() function:
-// http://golang.org/doc/effective_go.html#init
-func RegisterFilter(name string, fn FilterFunction) error {
- if FilterExists(name) {
- return fmt.Errorf("filter with name '%s' is already registered", name)
- }
- filters[name] = fn
- return nil
-}
-
-// ReplaceFilter replaces an already registered filter with a new implementation. Use this
-// function with caution since it allows you to change existing filter behaviour.
-func ReplaceFilter(name string, fn FilterFunction) error {
- if !FilterExists(name) {
- return fmt.Errorf("filter with name '%s' does not exist (therefore cannot be overridden)", name)
- }
- filters[name] = fn
- return nil
-}
-
-// MustApplyFilter behaves like ApplyFilter, but panics on an error.
-func MustApplyFilter(name string, value *Value, param *Value) *Value {
- val, err := ApplyFilter(name, value, param)
- if err != nil {
- panic(err)
- }
- return val
-}
-
-// ApplyFilter applies a filter to a given value using the given parameters.
-// Returns a *pongo2.Value or an error.
-func ApplyFilter(name string, value *Value, param *Value) (*Value, *Error) {
- fn, existing := filters[name]
- if !existing {
- return nil, &Error{
- Sender: "applyfilter",
- OrigError: fmt.Errorf("filter with name '%s' not found", name),
- }
- }
-
- // Make sure param is a *Value
- if param == nil {
- param = AsValue(nil)
- }
-
- return fn(value, param)
-}
-
-type filterCall struct {
- token *Token
-
- name string
- parameter IEvaluator
-
- filterFunc FilterFunction
-}
-
-func (fc *filterCall) Execute(v *Value, ctx *ExecutionContext) (*Value, *Error) {
- var param *Value
- var err *Error
-
- if fc.parameter != nil {
- param, err = fc.parameter.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- } else {
- param = AsValue(nil)
- }
-
- filteredValue, err := fc.filterFunc(v, param)
- if err != nil {
- return nil, err.updateFromTokenIfNeeded(ctx.template, fc.token)
- }
- return filteredValue, nil
-}
-
-// Filter = IDENT | IDENT ":" FilterArg | IDENT "|" Filter
-func (p *Parser) parseFilter() (*filterCall, *Error) {
- identToken := p.MatchType(TokenIdentifier)
-
- // Check filter ident
- if identToken == nil {
- return nil, p.Error("Filter name must be an identifier.", nil)
- }
-
- filter := &filterCall{
- token: identToken,
- name: identToken.Val,
- }
-
- // Get the appropriate filter function and bind it
- filterFn, exists := filters[identToken.Val]
- if !exists {
- return nil, p.Error(fmt.Sprintf("Filter '%s' does not exist.", identToken.Val), identToken)
- }
-
- filter.filterFunc = filterFn
-
- // Check for filter-argument (2 tokens needed: ':' ARG)
- if p.Match(TokenSymbol, ":") != nil {
- if p.Peek(TokenSymbol, "}}") != nil {
- return nil, p.Error("Filter parameter required after ':'.", nil)
- }
-
- // Get filter argument expression
- v, err := p.parseVariableOrLiteral()
- if err != nil {
- return nil, err
- }
- filter.parameter = v
- }
-
- return filter, nil
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/filters_builtin.go b/vendor/github.com/flosch/pongo2/v6/filters_builtin.go
deleted file mode 100644
index 81ec5a350..000000000
--- a/vendor/github.com/flosch/pongo2/v6/filters_builtin.go
+++ /dev/null
@@ -1,955 +0,0 @@
-package pongo2
-
-/* Filters that are provided through github.com/flosch/pongo2-addons:
- ------------------------------------------------------------------
-
- filesizeformat
- slugify
- timesince
- timeuntil
-
- Filters that won't be added:
- ----------------------------
-
- get_static_prefix (reason: web-framework specific)
- pprint (reason: python-specific)
- static (reason: web-framework specific)
-
- Reconsideration (not implemented yet):
- --------------------------------------
-
- force_escape (reason: not yet needed since this is the behaviour of pongo2's escape filter)
- safeseq (reason: same reason as `force_escape`)
- unordered_list (python-specific; not sure whether needed or not)
- dictsort (python-specific; maybe one could add a filter to sort a list of structs by a specific field name)
- dictsortreversed (see dictsort)
-*/
-
-import (
- "bytes"
- "errors"
- "fmt"
- "math/rand"
- "net/url"
- "regexp"
- "strconv"
- "strings"
- "time"
- "unicode/utf8"
-)
-
-func init() {
- rand.Seed(time.Now().Unix())
-
- RegisterFilter("escape", filterEscape)
- RegisterFilter("e", filterEscape) // alias of `escape`
- RegisterFilter("safe", filterSafe)
- RegisterFilter("escapejs", filterEscapejs)
-
- RegisterFilter("add", filterAdd)
- RegisterFilter("addslashes", filterAddslashes)
- RegisterFilter("capfirst", filterCapfirst)
- RegisterFilter("center", filterCenter)
- RegisterFilter("cut", filterCut)
- RegisterFilter("date", filterDate)
- RegisterFilter("default", filterDefault)
- RegisterFilter("default_if_none", filterDefaultIfNone)
- RegisterFilter("divisibleby", filterDivisibleby)
- RegisterFilter("first", filterFirst)
- RegisterFilter("floatformat", filterFloatformat)
- RegisterFilter("get_digit", filterGetdigit)
- RegisterFilter("iriencode", filterIriencode)
- RegisterFilter("join", filterJoin)
- RegisterFilter("last", filterLast)
- RegisterFilter("length", filterLength)
- RegisterFilter("length_is", filterLengthis)
- RegisterFilter("linebreaks", filterLinebreaks)
- RegisterFilter("linebreaksbr", filterLinebreaksbr)
- RegisterFilter("linenumbers", filterLinenumbers)
- RegisterFilter("ljust", filterLjust)
- RegisterFilter("lower", filterLower)
- RegisterFilter("make_list", filterMakelist)
- RegisterFilter("phone2numeric", filterPhone2numeric)
- RegisterFilter("pluralize", filterPluralize)
- RegisterFilter("random", filterRandom)
- RegisterFilter("removetags", filterRemovetags)
- RegisterFilter("rjust", filterRjust)
- RegisterFilter("slice", filterSlice)
- RegisterFilter("split", filterSplit)
- RegisterFilter("stringformat", filterStringformat)
- RegisterFilter("striptags", filterStriptags)
- RegisterFilter("time", filterDate) // time uses filterDate (same golang-format)
- RegisterFilter("title", filterTitle)
- RegisterFilter("truncatechars", filterTruncatechars)
- RegisterFilter("truncatechars_html", filterTruncatecharsHTML)
- RegisterFilter("truncatewords", filterTruncatewords)
- RegisterFilter("truncatewords_html", filterTruncatewordsHTML)
- RegisterFilter("upper", filterUpper)
- RegisterFilter("urlencode", filterUrlencode)
- RegisterFilter("urlize", filterUrlize)
- RegisterFilter("urlizetrunc", filterUrlizetrunc)
- RegisterFilter("wordcount", filterWordcount)
- RegisterFilter("wordwrap", filterWordwrap)
- RegisterFilter("yesno", filterYesno)
-
- RegisterFilter("float", filterFloat) // pongo-specific
- RegisterFilter("integer", filterInteger) // pongo-specific
-}
-
-func filterTruncatecharsHelper(s string, newLen int) string {
- runes := []rune(s)
- if newLen < len(runes) {
- if newLen >= 3 {
- return fmt.Sprintf("%s...", string(runes[:newLen-3]))
- }
- // Not enough space for the ellipsis
- return string(runes[:newLen])
- }
- return string(runes)
-}
-
-func filterTruncateHTMLHelper(value string, newOutput *bytes.Buffer, cond func() bool, fn func(c rune, s int, idx int) int, finalize func()) {
- vLen := len(value)
- var tagStack []string
- idx := 0
-
- for idx < vLen && !cond() {
- c, s := utf8.DecodeRuneInString(value[idx:])
- if c == utf8.RuneError {
- idx += s
- continue
- }
-
- if c == '<' {
- newOutput.WriteRune(c)
- idx += s // consume "<"
-
- if idx+1 < vLen {
- if value[idx] == '/' {
- // Close tag
-
- newOutput.WriteString("/")
-
- tag := ""
- idx++ // consume "/"
-
- for idx < vLen {
- c2, size2 := utf8.DecodeRuneInString(value[idx:])
- if c2 == utf8.RuneError {
- idx += size2
- continue
- }
-
- // End of tag found
- if c2 == '>' {
- idx++ // consume ">"
- break
- }
- tag += string(c2)
- idx += size2
- }
-
- if len(tagStack) > 0 {
- // Ideally, the close tag is TOP of tag stack
- // In malformed HTML, it must not be, so iterate through the stack and remove the tag
- for i := len(tagStack) - 1; i >= 0; i-- {
- if tagStack[i] == tag {
- // Found the tag
- tagStack[i] = tagStack[len(tagStack)-1]
- tagStack = tagStack[:len(tagStack)-1]
- break
- }
- }
- }
-
- newOutput.WriteString(tag)
- newOutput.WriteString(">")
- } else {
- // Open tag
-
- tag := ""
-
- params := false
- for idx < vLen {
- c2, size2 := utf8.DecodeRuneInString(value[idx:])
- if c2 == utf8.RuneError {
- idx += size2
- continue
- }
-
- newOutput.WriteRune(c2)
-
- // End of tag found
- if c2 == '>' {
- idx++ // consume ">"
- break
- }
-
- if !params {
- if c2 == ' ' {
- params = true
- } else {
- tag += string(c2)
- }
- }
-
- idx += size2
- }
-
- // Add tag to stack
- tagStack = append(tagStack, tag)
- }
- }
- } else {
- idx = fn(c, s, idx)
- }
- }
-
- finalize()
-
- for i := len(tagStack) - 1; i >= 0; i-- {
- tag := tagStack[i]
- // Close everything from the regular tag stack
- newOutput.WriteString(fmt.Sprintf("%s>", tag))
- }
-}
-
-func filterTruncatechars(in *Value, param *Value) (*Value, *Error) {
- s := in.String()
- newLen := param.Integer()
- return AsValue(filterTruncatecharsHelper(s, newLen)), nil
-}
-
-func filterTruncatecharsHTML(in *Value, param *Value) (*Value, *Error) {
- value := in.String()
- newLen := max(param.Integer()-3, 0)
-
- newOutput := bytes.NewBuffer(nil)
-
- textcounter := 0
-
- filterTruncateHTMLHelper(value, newOutput, func() bool {
- return textcounter >= newLen
- }, func(c rune, s int, idx int) int {
- textcounter++
- newOutput.WriteRune(c)
-
- return idx + s
- }, func() {
- if textcounter >= newLen && textcounter < len(value) {
- newOutput.WriteString("...")
- }
- })
-
- return AsSafeValue(newOutput.String()), nil
-}
-
-func filterTruncatewords(in *Value, param *Value) (*Value, *Error) {
- words := strings.Fields(in.String())
- n := param.Integer()
- if n <= 0 {
- return AsValue(""), nil
- }
- nlen := min(len(words), n)
- out := make([]string, 0, nlen)
- for i := 0; i < nlen; i++ {
- out = append(out, words[i])
- }
-
- if n < len(words) {
- out = append(out, "...")
- }
-
- return AsValue(strings.Join(out, " ")), nil
-}
-
-func filterTruncatewordsHTML(in *Value, param *Value) (*Value, *Error) {
- value := in.String()
- newLen := max(param.Integer(), 0)
-
- newOutput := bytes.NewBuffer(nil)
-
- wordcounter := 0
-
- filterTruncateHTMLHelper(value, newOutput, func() bool {
- return wordcounter >= newLen
- }, func(_ rune, _ int, idx int) int {
- // Get next word
- wordFound := false
-
- for idx < len(value) {
- c2, size2 := utf8.DecodeRuneInString(value[idx:])
- if c2 == utf8.RuneError {
- idx += size2
- continue
- }
-
- if c2 == '<' {
- // HTML tag start, don't consume it
- return idx
- }
-
- newOutput.WriteRune(c2)
- idx += size2
-
- if c2 == ' ' || c2 == '.' || c2 == ',' || c2 == ';' {
- // Word ends here, stop capturing it now
- break
- } else {
- wordFound = true
- }
- }
-
- if wordFound {
- wordcounter++
- }
-
- return idx
- }, func() {
- if wordcounter >= newLen {
- newOutput.WriteString("...")
- }
- })
-
- return AsSafeValue(newOutput.String()), nil
-}
-
-func filterEscape(in *Value, param *Value) (*Value, *Error) {
- output := strings.Replace(in.String(), "&", "&", -1)
- output = strings.Replace(output, ">", ">", -1)
- output = strings.Replace(output, "<", "<", -1)
- output = strings.Replace(output, "\"", """, -1)
- output = strings.Replace(output, "'", "'", -1)
- return AsValue(output), nil
-}
-
-func filterSafe(in *Value, param *Value) (*Value, *Error) {
- return in, nil // nothing to do here, just to keep track of the safe application
-}
-
-func filterEscapejs(in *Value, param *Value) (*Value, *Error) {
- sin := in.String()
-
- var b bytes.Buffer
-
- idx := 0
- for idx < len(sin) {
- c, size := utf8.DecodeRuneInString(sin[idx:])
- if c == utf8.RuneError {
- idx += size
- continue
- }
-
- if c == '\\' {
- // Escape seq?
- if idx+1 < len(sin) {
- switch sin[idx+1] {
- case 'r':
- b.WriteString(fmt.Sprintf(`\u%04X`, '\r'))
- idx += 2
- continue
- case 'n':
- b.WriteString(fmt.Sprintf(`\u%04X`, '\n'))
- idx += 2
- continue
- /*case '\'':
- b.WriteString(fmt.Sprintf(`\u%04X`, '\''))
- idx += 2
- continue
- case '"':
- b.WriteString(fmt.Sprintf(`\u%04X`, '"'))
- idx += 2
- continue*/
- }
- }
- }
-
- if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == ' ' || c == '/' {
- b.WriteRune(c)
- } else {
- b.WriteString(fmt.Sprintf(`\u%04X`, c))
- }
-
- idx += size
- }
-
- return AsValue(b.String()), nil
-}
-
-func filterAdd(in *Value, param *Value) (*Value, *Error) {
- if in.IsNumber() && param.IsNumber() {
- if in.IsFloat() || param.IsFloat() {
- return AsValue(in.Float() + param.Float()), nil
- }
- return AsValue(in.Integer() + param.Integer()), nil
- }
- // If in/param is not a number, we're relying on the
- // Value's String() conversion and just add them both together
- return AsValue(in.String() + param.String()), nil
-}
-
-func filterAddslashes(in *Value, param *Value) (*Value, *Error) {
- output := strings.Replace(in.String(), "\\", "\\\\", -1)
- output = strings.Replace(output, "\"", "\\\"", -1)
- output = strings.Replace(output, "'", "\\'", -1)
- return AsValue(output), nil
-}
-
-func filterCut(in *Value, param *Value) (*Value, *Error) {
- return AsValue(strings.Replace(in.String(), param.String(), "", -1)), nil
-}
-
-func filterLength(in *Value, param *Value) (*Value, *Error) {
- return AsValue(in.Len()), nil
-}
-
-func filterLengthis(in *Value, param *Value) (*Value, *Error) {
- return AsValue(in.Len() == param.Integer()), nil
-}
-
-func filterDefault(in *Value, param *Value) (*Value, *Error) {
- if !in.IsTrue() {
- return param, nil
- }
- return in, nil
-}
-
-func filterDefaultIfNone(in *Value, param *Value) (*Value, *Error) {
- if in.IsNil() {
- return param, nil
- }
- return in, nil
-}
-
-func filterDivisibleby(in *Value, param *Value) (*Value, *Error) {
- if param.Integer() == 0 {
- return AsValue(false), nil
- }
- return AsValue(in.Integer()%param.Integer() == 0), nil
-}
-
-func filterFirst(in *Value, param *Value) (*Value, *Error) {
- if in.CanSlice() && in.Len() > 0 {
- return in.Index(0), nil
- }
- return AsValue(""), nil
-}
-
-func filterFloatformat(in *Value, param *Value) (*Value, *Error) {
- val := in.Float()
-
- decimals := -1
- if !param.IsNil() {
- // Any argument provided?
- decimals = param.Integer()
- }
-
- // if the argument is not a number (e. g. empty), the default
- // behaviour is trim the result
- trim := !param.IsNumber()
-
- if decimals <= 0 {
- // argument is negative or zero, so we
- // want the output being trimmed
- decimals = -decimals
- trim = true
- }
-
- if trim {
- // Remove zeroes
- if float64(int(val)) == val {
- return AsValue(in.Integer()), nil
- }
- }
-
- return AsValue(strconv.FormatFloat(val, 'f', decimals, 64)), nil
-}
-
-func filterGetdigit(in *Value, param *Value) (*Value, *Error) {
- i := param.Integer()
- l := len(in.String()) // do NOT use in.Len() here!
- if i <= 0 || i > l {
- return in, nil
- }
- return AsValue(in.String()[l-i] - 48), nil
-}
-
-const filterIRIChars = "/#%[]=:;$&()+,!?*@'~"
-
-func filterIriencode(in *Value, param *Value) (*Value, *Error) {
- var b bytes.Buffer
-
- sin := in.String()
- for _, r := range sin {
- if strings.ContainsRune(filterIRIChars, r) {
- b.WriteRune(r)
- } else {
- b.WriteString(url.QueryEscape(string(r)))
- }
- }
-
- return AsValue(b.String()), nil
-}
-
-func filterJoin(in *Value, param *Value) (*Value, *Error) {
- if !in.CanSlice() {
- return in, nil
- }
- sep := param.String()
- sl := make([]string, 0, in.Len())
- for i := 0; i < in.Len(); i++ {
- sl = append(sl, in.Index(i).String())
- }
- return AsValue(strings.Join(sl, sep)), nil
-}
-
-func filterLast(in *Value, param *Value) (*Value, *Error) {
- if in.CanSlice() && in.Len() > 0 {
- return in.Index(in.Len() - 1), nil
- }
- return AsValue(""), nil
-}
-
-func filterUpper(in *Value, param *Value) (*Value, *Error) {
- return AsValue(strings.ToUpper(in.String())), nil
-}
-
-func filterLower(in *Value, param *Value) (*Value, *Error) {
- return AsValue(strings.ToLower(in.String())), nil
-}
-
-func filterMakelist(in *Value, param *Value) (*Value, *Error) {
- s := in.String()
- result := make([]string, 0, len(s))
- for _, c := range s {
- result = append(result, string(c))
- }
- return AsValue(result), nil
-}
-
-func filterCapfirst(in *Value, param *Value) (*Value, *Error) {
- if in.Len() <= 0 {
- return AsValue(""), nil
- }
- t := in.String()
- r, size := utf8.DecodeRuneInString(t)
- return AsValue(strings.ToUpper(string(r)) + t[size:]), nil
-}
-
-func filterCenter(in *Value, param *Value) (*Value, *Error) {
- width := param.Integer()
- slen := in.Len()
- if width <= slen {
- return in, nil
- }
-
- spaces := width - slen
- left := spaces/2 + spaces%2
- right := spaces / 2
-
- return AsValue(fmt.Sprintf("%s%s%s", strings.Repeat(" ", left),
- in.String(), strings.Repeat(" ", right))), nil
-}
-
-func filterDate(in *Value, param *Value) (*Value, *Error) {
- t, isTime := in.Interface().(time.Time)
- if !isTime {
- return nil, &Error{
- Sender: "filter:date",
- OrigError: errors.New("filter input argument must be of type 'time.Time'"),
- }
- }
- return AsValue(t.Format(param.String())), nil
-}
-
-func filterFloat(in *Value, param *Value) (*Value, *Error) {
- return AsValue(in.Float()), nil
-}
-
-func filterInteger(in *Value, param *Value) (*Value, *Error) {
- return AsValue(in.Integer()), nil
-}
-
-func filterLinebreaks(in *Value, param *Value) (*Value, *Error) {
- if in.Len() == 0 {
- return in, nil
- }
-
- var b bytes.Buffer
-
- // Newline =
- // Double newline = ...
- lines := strings.Split(in.String(), "\n")
- lenlines := len(lines)
-
- opened := false
-
- for idx, line := range lines {
-
- if !opened {
- b.WriteString("")
- opened = true
- }
-
- b.WriteString(line)
-
- if idx < lenlines-1 && strings.TrimSpace(lines[idx]) != "" {
- // We've not reached the end
- if strings.TrimSpace(lines[idx+1]) == "" {
- // Next line is empty
- if opened {
- b.WriteString("
")
- opened = false
- }
- } else {
- b.WriteString("
")
- }
- }
- }
-
- if opened {
- b.WriteString("
")
- }
-
- return AsValue(b.String()), nil
-}
-
-func filterSplit(in *Value, param *Value) (*Value, *Error) {
- chunks := strings.Split(in.String(), param.String())
-
- return AsValue(chunks), nil
-}
-
-func filterLinebreaksbr(in *Value, param *Value) (*Value, *Error) {
- return AsValue(strings.Replace(in.String(), "\n", "
", -1)), nil
-}
-
-func filterLinenumbers(in *Value, param *Value) (*Value, *Error) {
- lines := strings.Split(in.String(), "\n")
- output := make([]string, 0, len(lines))
- for idx, line := range lines {
- output = append(output, fmt.Sprintf("%d. %s", idx+1, line))
- }
- return AsValue(strings.Join(output, "\n")), nil
-}
-
-func filterLjust(in *Value, param *Value) (*Value, *Error) {
- times := param.Integer() - in.Len()
- if times < 0 {
- times = 0
- }
- return AsValue(fmt.Sprintf("%s%s", in.String(), strings.Repeat(" ", times))), nil
-}
-
-func filterUrlencode(in *Value, param *Value) (*Value, *Error) {
- return AsValue(url.QueryEscape(in.String())), nil
-}
-
-// TODO: This regexp could do some work
-var (
- filterUrlizeURLRegexp = regexp.MustCompile(`((((http|https)://)|www\.|((^|[ ])[0-9A-Za-z_\-]+(\.com|\.net|\.org|\.info|\.biz|\.de))))(?U:.*)([ ]+|$)`)
- filterUrlizeEmailRegexp = regexp.MustCompile(`(\w+@\w+\.\w{2,4})`)
-)
-
-func filterUrlizeHelper(input string, autoescape bool, trunc int) (string, error) {
- var soutErr error
- sout := filterUrlizeURLRegexp.ReplaceAllStringFunc(input, func(raw_url string) string {
- var prefix string
- var suffix string
- if strings.HasPrefix(raw_url, " ") {
- prefix = " "
- }
- if strings.HasSuffix(raw_url, " ") {
- suffix = " "
- }
-
- raw_url = strings.TrimSpace(raw_url)
-
- t, err := ApplyFilter("iriencode", AsValue(raw_url), nil)
- if err != nil {
- soutErr = err
- return ""
- }
- url := t.String()
-
- if !strings.HasPrefix(url, "http") {
- url = fmt.Sprintf("http://%s", url)
- }
-
- title := raw_url
-
- if trunc > 3 && len(title) > trunc {
- title = fmt.Sprintf("%s...", title[:trunc-3])
- }
-
- if autoescape {
- t, err := ApplyFilter("escape", AsValue(title), nil)
- if err != nil {
- soutErr = err
- return ""
- }
- title = t.String()
- }
-
- return fmt.Sprintf(`%s%s%s`, prefix, url, title, suffix)
- })
- if soutErr != nil {
- return "", soutErr
- }
-
- sout = filterUrlizeEmailRegexp.ReplaceAllStringFunc(sout, func(mail string) string {
- title := mail
-
- if trunc > 3 && len(title) > trunc {
- title = fmt.Sprintf("%s...", title[:trunc-3])
- }
-
- return fmt.Sprintf(`%s`, mail, title)
- })
-
- return sout, nil
-}
-
-func filterUrlize(in *Value, param *Value) (*Value, *Error) {
- autoescape := true
- if param.IsBool() {
- autoescape = param.Bool()
- }
-
- s, err := filterUrlizeHelper(in.String(), autoescape, -1)
- if err != nil {
- return nil, &Error{
- Sender: "filter:urlize",
- OrigError: err,
- }
- }
-
- return AsValue(s), nil
-}
-
-func filterUrlizetrunc(in *Value, param *Value) (*Value, *Error) {
- s, err := filterUrlizeHelper(in.String(), true, param.Integer())
- if err != nil {
- return nil, &Error{
- Sender: "filter:urlizetrunc",
- OrigError: err,
- }
- }
- return AsValue(s), nil
-}
-
-func filterStringformat(in *Value, param *Value) (*Value, *Error) {
- return AsValue(fmt.Sprintf(param.String(), in.Interface())), nil
-}
-
-var reStriptags = regexp.MustCompile("<[^>]*?>")
-
-func filterStriptags(in *Value, param *Value) (*Value, *Error) {
- s := in.String()
-
- // Strip all tags
- s = reStriptags.ReplaceAllString(s, "")
-
- return AsValue(strings.TrimSpace(s)), nil
-}
-
-// https://en.wikipedia.org/wiki/Phoneword
-var filterPhone2numericMap = map[string]string{
- "a": "2", "b": "2", "c": "2", "d": "3", "e": "3", "f": "3", "g": "4", "h": "4", "i": "4", "j": "5", "k": "5",
- "l": "5", "m": "6", "n": "6", "o": "6", "p": "7", "q": "7", "r": "7", "s": "7", "t": "8", "u": "8", "v": "8",
- "w": "9", "x": "9", "y": "9", "z": "9",
-}
-
-func filterPhone2numeric(in *Value, param *Value) (*Value, *Error) {
- sin := in.String()
- for k, v := range filterPhone2numericMap {
- sin = strings.Replace(sin, k, v, -1)
- sin = strings.Replace(sin, strings.ToUpper(k), v, -1)
- }
- return AsValue(sin), nil
-}
-
-func filterPluralize(in *Value, param *Value) (*Value, *Error) {
- if in.IsNumber() {
- // Works only on numbers
- if param.Len() > 0 {
- endings := strings.Split(param.String(), ",")
- if len(endings) > 2 {
- return nil, &Error{
- Sender: "filter:pluralize",
- OrigError: errors.New("you cannot pass more than 2 arguments to filter 'pluralize'"),
- }
- }
- if len(endings) == 1 {
- // 1 argument
- if in.Integer() != 1 {
- return AsValue(endings[0]), nil
- }
- } else {
- if in.Integer() != 1 {
- // 2 arguments
- return AsValue(endings[1]), nil
- }
- return AsValue(endings[0]), nil
- }
- } else {
- if in.Integer() != 1 {
- // return default 's'
- return AsValue("s"), nil
- }
- }
-
- return AsValue(""), nil
- }
- return nil, &Error{
- Sender: "filter:pluralize",
- OrigError: errors.New("filter 'pluralize' does only work on numbers"),
- }
-}
-
-func filterRandom(in *Value, param *Value) (*Value, *Error) {
- if !in.CanSlice() || in.Len() <= 0 {
- return in, nil
- }
- i := rand.Intn(in.Len())
- return in.Index(i), nil
-}
-
-func filterRemovetags(in *Value, param *Value) (*Value, *Error) {
- s := in.String()
- tags := strings.Split(param.String(), ",")
-
- // Strip only specific tags
- for _, tag := range tags {
- re := regexp.MustCompile(fmt.Sprintf("?%s/?>", tag))
- s = re.ReplaceAllString(s, "")
- }
-
- return AsValue(strings.TrimSpace(s)), nil
-}
-
-func filterRjust(in *Value, param *Value) (*Value, *Error) {
- return AsValue(fmt.Sprintf(fmt.Sprintf("%%%ds", param.Integer()), in.String())), nil
-}
-
-func filterSlice(in *Value, param *Value) (*Value, *Error) {
- comp := strings.Split(param.String(), ":")
- if len(comp) != 2 {
- return nil, &Error{
- Sender: "filter:slice",
- OrigError: errors.New("Slice string must have the format 'from:to' [from/to can be omitted, but the ':' is required]"),
- }
- }
-
- if !in.CanSlice() {
- return in, nil
- }
-
- // start with [x:len]
- from := AsValue(comp[0]).Integer()
- to := in.Len()
-
- // handle negative x
- if from < 0 {
- from = max(in.Len()+from, 0)
- }
-
- // handle x over bounds
- if from > to {
- from = to
- }
-
- vto := AsValue(comp[1]).Integer()
- // handle missing y
- if strings.TrimSpace(comp[1]) == "" {
- vto = in.Len()
- }
-
- // handle negative y
- if vto < 0 {
- vto = max(in.Len()+vto, 0)
- }
-
- // handle y < x
- if vto < from {
- vto = from
- }
-
- // y is within bounds, return the [x, y] slice
- if vto >= from && vto <= in.Len() {
- to = vto
- } // otherwise, the slice remains [x, len]
-
- return in.Slice(from, to), nil
-}
-
-func filterTitle(in *Value, param *Value) (*Value, *Error) {
- if !in.IsString() {
- return AsValue(""), nil
- }
- return AsValue(strings.Title(strings.ToLower(in.String()))), nil
-}
-
-func filterWordcount(in *Value, param *Value) (*Value, *Error) {
- return AsValue(len(strings.Fields(in.String()))), nil
-}
-
-func filterWordwrap(in *Value, param *Value) (*Value, *Error) {
- words := strings.Fields(in.String())
- wordsLen := len(words)
- wrapAt := param.Integer()
- if wrapAt <= 0 {
- return in, nil
- }
-
- linecount := wordsLen/wrapAt + wordsLen%wrapAt
- lines := make([]string, 0, linecount)
- for i := 0; i < linecount; i++ {
- lines = append(lines, strings.Join(words[wrapAt*i:min(wrapAt*(i+1), wordsLen)], " "))
- }
- return AsValue(strings.Join(lines, "\n")), nil
-}
-
-func filterYesno(in *Value, param *Value) (*Value, *Error) {
- choices := map[int]string{
- 0: "yes",
- 1: "no",
- 2: "maybe",
- }
- paramString := param.String()
- customChoices := strings.Split(paramString, ",")
- if len(paramString) > 0 {
- if len(customChoices) > 3 {
- return nil, &Error{
- Sender: "filter:yesno",
- OrigError: fmt.Errorf("you cannot pass more than 3 options to the 'yesno'-filter (got: '%s')", paramString),
- }
- }
- if len(customChoices) < 2 {
- return nil, &Error{
- Sender: "filter:yesno",
- OrigError: fmt.Errorf("you must either pass no or at least 2 arguments to the 'yesno'-filter (got: '%s')", paramString),
- }
- }
-
- // Map to the options now
- choices[0] = customChoices[0]
- choices[1] = customChoices[1]
- if len(customChoices) == 3 {
- choices[2] = customChoices[2]
- }
- }
-
- // maybe
- if in.IsNil() {
- return AsValue(choices[2]), nil
- }
-
- // yes
- if in.IsTrue() {
- return AsValue(choices[0]), nil
- }
-
- // no
- return AsValue(choices[1]), nil
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/helpers.go b/vendor/github.com/flosch/pongo2/v6/helpers.go
deleted file mode 100644
index 880dbc044..000000000
--- a/vendor/github.com/flosch/pongo2/v6/helpers.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package pongo2
-
-func max(a, b int) int {
- if a > b {
- return a
- }
- return b
-}
-
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/lexer.go b/vendor/github.com/flosch/pongo2/v6/lexer.go
deleted file mode 100644
index 4040c567a..000000000
--- a/vendor/github.com/flosch/pongo2/v6/lexer.go
+++ /dev/null
@@ -1,438 +0,0 @@
-package pongo2
-
-import (
- "errors"
- "fmt"
- "strings"
- "unicode/utf8"
-)
-
-const (
- TokenError = iota
- EOF
-
- TokenHTML
-
- TokenKeyword
- TokenIdentifier
- TokenString
- TokenNumber
- TokenSymbol
- TokenNil
-)
-
-var (
- tokenSpaceChars = " \n\r\t"
- tokenIdentifierChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_"
- tokenIdentifierCharsWithDigits = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789"
- tokenDigits = "0123456789"
-
- // Available symbols in pongo2 (within filters/tag)
- TokenSymbols = []string{
- // 3-Char symbols
- "{{-", "-}}", "{%-", "-%}",
-
- // 2-Char symbols
- "==", ">=", "<=", "&&", "||", "{{", "}}", "{%", "%}", "!=", "<>",
-
- // 1-Char symbol
- "(", ")", "+", "-", "*", "<", ">", "/", "^", ",", ".", "!", "|", ":", "=", "%", "[", "]",
- }
-
- // Available keywords in pongo2
- TokenKeywords = []string{"in", "and", "or", "not", "true", "false", "as", "export"}
-)
-
-type (
- TokenType int
- Token struct {
- Filename string
- Typ TokenType
- Val string
- Line int
- Col int
- TrimWhitespaces bool
- }
-)
-
-type (
- lexerStateFn func() lexerStateFn
- lexer struct {
- name string
- input string
- start int // start pos of the item
- pos int // current pos
- width int // width of last rune
- tokens []*Token
- errored bool
- startline int
- startcol int
- line int
- col int
-
- inVerbatim bool
- verbatimName string
- }
-)
-
-func (t *Token) String() string {
- val := t.Val
- if len(val) > 1000 {
- val = fmt.Sprintf("%s...%s", val[:10], val[len(val)-5:])
- }
-
- typ := ""
- switch t.Typ {
- case TokenHTML:
- typ = "HTML"
- case TokenError:
- typ = "Error"
- case TokenIdentifier:
- typ = "Identifier"
- case TokenKeyword:
- typ = "Keyword"
- case TokenNumber:
- typ = "Number"
- case TokenString:
- typ = "String"
- case TokenSymbol:
- typ = "Symbol"
- case TokenNil:
- typ = "Nil"
- default:
- typ = "Unknown"
- }
-
- return fmt.Sprintf("",
- typ, t.Typ, val, t.Line, t.Col, t.TrimWhitespaces)
-}
-
-func lex(name string, input string) ([]*Token, *Error) {
- l := &lexer{
- name: name,
- input: input,
- tokens: make([]*Token, 0, 100),
- line: 1,
- col: 1,
- startline: 1,
- startcol: 1,
- }
- l.run()
- if l.errored {
- errtoken := l.tokens[len(l.tokens)-1]
- return nil, &Error{
- Filename: name,
- Line: errtoken.Line,
- Column: errtoken.Col,
- Sender: "lexer",
- OrigError: errors.New(errtoken.Val),
- }
- }
- return l.tokens, nil
-}
-
-func (l *lexer) value() string {
- return l.input[l.start:l.pos]
-}
-
-func (l *lexer) length() int {
- return l.pos - l.start
-}
-
-func (l *lexer) emit(t TokenType) {
- tok := &Token{
- Filename: l.name,
- Typ: t,
- Val: l.value(),
- Line: l.startline,
- Col: l.startcol,
- }
-
- if t == TokenString {
- // Escape sequence \" in strings
- tok.Val = strings.Replace(tok.Val, `\"`, `"`, -1)
- tok.Val = strings.Replace(tok.Val, `\\`, `\`, -1)
- }
-
- if t == TokenSymbol && len(tok.Val) == 3 && (strings.HasSuffix(tok.Val, "-") || strings.HasPrefix(tok.Val, "-")) {
- tok.TrimWhitespaces = true
- tok.Val = strings.Replace(tok.Val, "-", "", -1)
- }
-
- l.tokens = append(l.tokens, tok)
- l.start = l.pos
- l.startline = l.line
- l.startcol = l.col
-}
-
-func (l *lexer) next() rune {
- if l.pos >= len(l.input) {
- l.width = 0
- return EOF
- }
- r, w := utf8.DecodeRuneInString(l.input[l.pos:])
- l.width = w
- l.pos += l.width
- l.col += l.width
- return r
-}
-
-func (l *lexer) backup() {
- l.pos -= l.width
- l.col -= l.width
-}
-
-func (l *lexer) peek() rune {
- r := l.next()
- l.backup()
- return r
-}
-
-func (l *lexer) ignore() {
- l.start = l.pos
- l.startline = l.line
- l.startcol = l.col
-}
-
-func (l *lexer) accept(what string) bool {
- if strings.ContainsRune(what, l.next()) {
- return true
- }
- l.backup()
- return false
-}
-
-func (l *lexer) acceptRun(what string) {
- for strings.ContainsRune(what, l.next()) {
- }
- l.backup()
-}
-
-func (l *lexer) errorf(format string, args ...any) lexerStateFn {
- t := &Token{
- Filename: l.name,
- Typ: TokenError,
- Val: fmt.Sprintf(format, args...),
- Line: l.startline,
- Col: l.startcol,
- }
- l.tokens = append(l.tokens, t)
- l.errored = true
- l.startline = l.line
- l.startcol = l.col
- return nil
-}
-
-func (l *lexer) run() {
- for {
- // TODO: Support verbatim tag names
- // https://docs.djangoproject.com/en/dev/ref/templates/builtins/#verbatim
- if l.inVerbatim {
- name := l.verbatimName
- if name != "" {
- name += " "
- }
- if strings.HasPrefix(l.input[l.pos:], fmt.Sprintf("{%% endverbatim %s%%}", name)) { // end verbatim
- if l.pos > l.start {
- l.emit(TokenHTML)
- }
- w := len("{% endverbatim %}")
- l.pos += w
- l.col += w
- l.ignore()
- l.inVerbatim = false
- }
- } else if strings.HasPrefix(l.input[l.pos:], "{% verbatim %}") { // tag
- if l.pos > l.start {
- l.emit(TokenHTML)
- }
- l.inVerbatim = true
- w := len("{% verbatim %}")
- l.pos += w
- l.col += w
- l.ignore()
- }
-
- if !l.inVerbatim {
- // Ignore single-line comments {# ... #}
- if strings.HasPrefix(l.input[l.pos:], "{#") {
- if l.pos > l.start {
- l.emit(TokenHTML)
- }
-
- l.pos += 2 // pass '{#'
- l.col += 2
-
- for {
- switch l.peek() {
- case EOF:
- l.errorf("Single-line comment not closed.")
- return
- case '\n':
- l.errorf("Newline not permitted in a single-line comment.")
- return
- }
-
- if strings.HasPrefix(l.input[l.pos:], "#}") {
- l.pos += 2 // pass '#}'
- l.col += 2
- break
- }
-
- l.next()
- }
- l.ignore() // ignore whole comment
-
- // Comment skipped
- continue // next token
- }
-
- if strings.HasPrefix(l.input[l.pos:], "{{") || // variable
- strings.HasPrefix(l.input[l.pos:], "{%") { // tag
- if l.pos > l.start {
- l.emit(TokenHTML)
- }
- l.tokenize()
- if l.errored {
- return
- }
- continue
- }
- }
-
- switch l.peek() {
- case '\n':
- l.line++
- l.col = 0
- }
- if l.next() == EOF {
- break
- }
- }
-
- if l.pos > l.start {
- l.emit(TokenHTML)
- }
-
- if l.inVerbatim {
- l.errorf("verbatim-tag not closed, got EOF.")
- }
-}
-
-func (l *lexer) tokenize() {
- for state := l.stateCode; state != nil; {
- state = state()
- }
-}
-
-func (l *lexer) stateCode() lexerStateFn {
-outer_loop:
- for {
- switch {
- case l.accept(tokenSpaceChars):
- if l.value() == "\n" {
- return l.errorf("Newline not allowed within tag/variable.")
- }
- l.ignore()
- continue
- case l.accept(tokenIdentifierChars):
- return l.stateIdentifier
- case l.accept(tokenDigits):
- return l.stateNumber
- case l.accept(`"'`):
- return l.stateString
- }
-
- // Check for symbol
- for _, sym := range TokenSymbols {
- if strings.HasPrefix(l.input[l.start:], sym) {
- l.pos += len(sym)
- l.col += l.length()
- l.emit(TokenSymbol)
-
- if sym == "%}" || sym == "-%}" || sym == "}}" || sym == "-}}" {
- // Tag/variable end, return after emit
- return nil
- }
-
- continue outer_loop
- }
- }
-
- break
- }
-
- // Normal shut down
- return nil
-}
-
-func (l *lexer) stateIdentifier() lexerStateFn {
- l.acceptRun(tokenIdentifierChars)
- l.acceptRun(tokenIdentifierCharsWithDigits)
- for _, kw := range TokenKeywords {
- if kw == l.value() {
- l.emit(TokenKeyword)
- return l.stateCode
- }
- if kw == "nil" {
- l.emit(TokenNil)
- return l.stateCode
- }
- }
- l.emit(TokenIdentifier)
- return l.stateCode
-}
-
-func (l *lexer) stateNumber() lexerStateFn {
- l.acceptRun(tokenDigits)
- if l.accept(tokenIdentifierCharsWithDigits) {
- // This seems to be an identifier starting with a number.
- // See https://github.com/flosch/pongo2/issues/151
- return l.stateIdentifier()
- }
- /*
- Maybe context-sensitive number lexing?
- * comments.0.Text // first comment
- * usercomments.1.0 // second user, first comment
- * if (score >= 8.5) // 8.5 as a number
-
- if l.peek() == '.' {
- l.accept(".")
- if !l.accept(tokenDigits) {
- return l.errorf("Malformed number.")
- }
- l.acceptRun(tokenDigits)
- }
- */
- l.emit(TokenNumber)
- return l.stateCode
-}
-
-func (l *lexer) stateString() lexerStateFn {
- quotationMark := l.value()
- l.ignore()
- l.startcol-- // we're starting the position at the first "
- for !l.accept(quotationMark) {
- switch l.next() {
- case '\\':
- // escape sequence
- switch l.peek() {
- case '"', '\\':
- l.next()
- default:
- return l.errorf("Unknown escape sequence: \\%c", l.peek())
- }
- case EOF:
- return l.errorf("Unexpected EOF, string not closed.")
- case '\n':
- return l.errorf("Newline in string is not allowed.")
- }
- }
- l.backup()
- l.emit(TokenString)
-
- l.next()
- l.ignore()
-
- return l.stateCode
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/nodes.go b/vendor/github.com/flosch/pongo2/v6/nodes.go
deleted file mode 100644
index 5b039cdf4..000000000
--- a/vendor/github.com/flosch/pongo2/v6/nodes.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package pongo2
-
-// The root document
-type nodeDocument struct {
- Nodes []INode
-}
-
-func (doc *nodeDocument) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- for _, n := range doc.Nodes {
- err := n.Execute(ctx, writer)
- if err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/nodes_html.go b/vendor/github.com/flosch/pongo2/v6/nodes_html.go
deleted file mode 100644
index b980a3a5c..000000000
--- a/vendor/github.com/flosch/pongo2/v6/nodes_html.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package pongo2
-
-import (
- "strings"
-)
-
-type nodeHTML struct {
- token *Token
- trimLeft bool
- trimRight bool
-}
-
-func (n *nodeHTML) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- res := n.token.Val
- if n.trimLeft {
- res = strings.TrimLeft(res, tokenSpaceChars)
- }
- if n.trimRight {
- res = strings.TrimRight(res, tokenSpaceChars)
- }
- writer.WriteString(res)
- return nil
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/nodes_wrapper.go b/vendor/github.com/flosch/pongo2/v6/nodes_wrapper.go
deleted file mode 100644
index d1bcb8d85..000000000
--- a/vendor/github.com/flosch/pongo2/v6/nodes_wrapper.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package pongo2
-
-type NodeWrapper struct {
- Endtag string
- nodes []INode
-}
-
-func (wrapper *NodeWrapper) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- for _, n := range wrapper.nodes {
- err := n.Execute(ctx, writer)
- if err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/options.go b/vendor/github.com/flosch/pongo2/v6/options.go
deleted file mode 100644
index 9c39e467e..000000000
--- a/vendor/github.com/flosch/pongo2/v6/options.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package pongo2
-
-// Options allow you to change the behavior of template-engine.
-// You can change the options before calling the Execute method.
-type Options struct {
- // If this is set to true the first newline after a block is removed (block, not variable tag!). Defaults to false.
- TrimBlocks bool
-
- // If this is set to true leading spaces and tabs are stripped from the start of a line to a block. Defaults to false
- LStripBlocks bool
-}
-
-func newOptions() *Options {
- return &Options{
- TrimBlocks: false,
- LStripBlocks: false,
- }
-}
-
-// Update updates this options from another options.
-func (opt *Options) Update(other *Options) *Options {
- opt.TrimBlocks = other.TrimBlocks
- opt.LStripBlocks = other.LStripBlocks
-
- return opt
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/parser.go b/vendor/github.com/flosch/pongo2/v6/parser.go
deleted file mode 100644
index 9153090ea..000000000
--- a/vendor/github.com/flosch/pongo2/v6/parser.go
+++ /dev/null
@@ -1,308 +0,0 @@
-package pongo2
-
-import (
- "errors"
- "fmt"
- "strings"
-)
-
-type INode interface {
- Execute(*ExecutionContext, TemplateWriter) *Error
-}
-
-type IEvaluator interface {
- INode
- GetPositionToken() *Token
- Evaluate(*ExecutionContext) (*Value, *Error)
- FilterApplied(name string) bool
-}
-
-// The parser provides you a comprehensive and easy tool to
-// work with the template document and arguments provided by
-// the user for your custom tag.
-//
-// The parser works on a token list which will be provided by pongo2.
-// A token is a unit you can work with. Tokens are either of type identifier,
-// string, number, keyword, HTML or symbol.
-//
-// (See Token's documentation for more about tokens)
-type Parser struct {
- name string
- idx int
- tokens []*Token
- lastToken *Token
-
- // if the parser parses a template document, here will be
- // a reference to it (needed to access the template through Tags)
- template *Template
-}
-
-// Creates a new parser to parse tokens.
-// Used inside pongo2 to parse documents and to provide an easy-to-use
-// parser for tag authors
-func newParser(name string, tokens []*Token, template *Template) *Parser {
- p := &Parser{
- name: name,
- tokens: tokens,
- template: template,
- }
- if len(tokens) > 0 {
- p.lastToken = tokens[len(tokens)-1]
- }
- return p
-}
-
-// Consume one token. It will be gone forever.
-func (p *Parser) Consume() {
- p.ConsumeN(1)
-}
-
-// Consume N tokens. They will be gone forever.
-func (p *Parser) ConsumeN(count int) {
- p.idx += count
-}
-
-// Returns the current token.
-func (p *Parser) Current() *Token {
- return p.Get(p.idx)
-}
-
-// Returns the CURRENT token if the given type matches.
-// Consumes this token on success.
-func (p *Parser) MatchType(typ TokenType) *Token {
- if t := p.PeekType(typ); t != nil {
- p.Consume()
- return t
- }
- return nil
-}
-
-// Returns the CURRENT token if the given type AND value matches.
-// Consumes this token on success.
-func (p *Parser) Match(typ TokenType, val string) *Token {
- if t := p.Peek(typ, val); t != nil {
- p.Consume()
- return t
- }
- return nil
-}
-
-// Returns the CURRENT token if the given type AND *one* of
-// the given values matches.
-// Consumes this token on success.
-func (p *Parser) MatchOne(typ TokenType, vals ...string) *Token {
- for _, val := range vals {
- if t := p.Peek(typ, val); t != nil {
- p.Consume()
- return t
- }
- }
- return nil
-}
-
-// Returns the CURRENT token if the given type matches.
-// It DOES NOT consume the token.
-func (p *Parser) PeekType(typ TokenType) *Token {
- return p.PeekTypeN(0, typ)
-}
-
-// Returns the CURRENT token if the given type AND value matches.
-// It DOES NOT consume the token.
-func (p *Parser) Peek(typ TokenType, val string) *Token {
- return p.PeekN(0, typ, val)
-}
-
-// Returns the CURRENT token if the given type AND *one* of
-// the given values matches.
-// It DOES NOT consume the token.
-func (p *Parser) PeekOne(typ TokenType, vals ...string) *Token {
- for _, v := range vals {
- t := p.PeekN(0, typ, v)
- if t != nil {
- return t
- }
- }
- return nil
-}
-
-// Returns the tokens[current position + shift] token if the
-// given type AND value matches for that token.
-// DOES NOT consume the token.
-func (p *Parser) PeekN(shift int, typ TokenType, val string) *Token {
- t := p.Get(p.idx + shift)
- if t != nil {
- if t.Typ == typ && t.Val == val {
- return t
- }
- }
- return nil
-}
-
-// Returns the tokens[current position + shift] token if the given type matches.
-// DOES NOT consume the token for that token.
-func (p *Parser) PeekTypeN(shift int, typ TokenType) *Token {
- t := p.Get(p.idx + shift)
- if t != nil {
- if t.Typ == typ {
- return t
- }
- }
- return nil
-}
-
-// Returns the UNCONSUMED token count.
-func (p *Parser) Remaining() int {
- return len(p.tokens) - p.idx
-}
-
-// Returns the total token count.
-func (p *Parser) Count() int {
- return len(p.tokens)
-}
-
-// Returns tokens[i] or NIL (if i >= len(tokens))
-func (p *Parser) Get(i int) *Token {
- if i < len(p.tokens) && i >= 0 {
- return p.tokens[i]
- }
- return nil
-}
-
-// Returns tokens[current-position + shift] or NIL
-// (if (current-position + i) >= len(tokens))
-func (p *Parser) GetR(shift int) *Token {
- i := p.idx + shift
- return p.Get(i)
-}
-
-// Error produces a nice error message and returns an error-object.
-// The 'token'-argument is optional. If provided, it will take
-// the token's position information. If not provided, it will
-// automatically use the CURRENT token's position information.
-func (p *Parser) Error(msg string, token *Token) *Error {
- if token == nil {
- // Set current token
- token = p.Current()
- if token == nil {
- // Set to last token
- if len(p.tokens) > 0 {
- token = p.tokens[len(p.tokens)-1]
- }
- }
- }
- var line, col int
- if token != nil {
- line = token.Line
- col = token.Col
- }
- return &Error{
- Template: p.template,
- Filename: p.name,
- Sender: "parser",
- Line: line,
- Column: col,
- Token: token,
- OrigError: errors.New(msg),
- }
-}
-
-// Wraps all nodes between starting tag and "{% endtag %}" and provides
-// one simple interface to execute the wrapped nodes.
-// It returns a parser to process provided arguments to the tag.
-func (p *Parser) WrapUntilTag(names ...string) (*NodeWrapper, *Parser, *Error) {
- wrapper := &NodeWrapper{}
-
- var tagArgs []*Token
-
- for p.Remaining() > 0 {
- // New tag, check whether we have to stop wrapping here
- if p.Peek(TokenSymbol, "{%") != nil {
- tagIdent := p.PeekTypeN(1, TokenIdentifier)
-
- if tagIdent != nil {
- // We've found a (!) end-tag
-
- found := false
- for _, n := range names {
- if tagIdent.Val == n {
- found = true
- break
- }
- }
-
- // We only process the tag if we've found an end tag
- if found {
- // Okay, endtag found.
- p.ConsumeN(2) // '{%' tagname
-
- for {
- if p.Match(TokenSymbol, "%}") != nil {
- // Okay, end the wrapping here
- wrapper.Endtag = tagIdent.Val
- return wrapper, newParser(p.template.name, tagArgs, p.template), nil
- }
- t := p.Current()
- p.Consume()
- if t == nil {
- return nil, nil, p.Error("Unexpected EOF.", p.lastToken)
- }
- tagArgs = append(tagArgs, t)
- }
- }
- }
-
- }
-
- // Otherwise process next element to be wrapped
- node, err := p.parseDocElement()
- if err != nil {
- return nil, nil, err
- }
- wrapper.nodes = append(wrapper.nodes, node)
- }
-
- return nil, nil, p.Error(fmt.Sprintf("Unexpected EOF, expected tag %s.", strings.Join(names, " or ")),
- p.lastToken)
-}
-
-// Skips all nodes between starting tag and "{% endtag %}"
-func (p *Parser) SkipUntilTag(names ...string) *Error {
- for p.Remaining() > 0 {
- // New tag, check whether we have to stop wrapping here
- if p.Peek(TokenSymbol, "{%") != nil {
- tagIdent := p.PeekTypeN(1, TokenIdentifier)
-
- if tagIdent != nil {
- // We've found a (!) end-tag
-
- found := false
- for _, n := range names {
- if tagIdent.Val == n {
- found = true
- break
- }
- }
-
- // We only process the tag if we've found an end tag
- if found {
- // Okay, endtag found.
- p.ConsumeN(2) // '{%' tagname
-
- for {
- if p.Match(TokenSymbol, "%}") != nil {
- // Done skipping, exit.
- return nil
- }
- }
- }
- }
- }
- t := p.Current()
- p.Consume()
- if t == nil {
- return p.Error("Unexpected EOF.", p.lastToken)
- }
- }
-
- return p.Error(fmt.Sprintf("Unexpected EOF, expected tag %s.", strings.Join(names, " or ")), p.lastToken)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/parser_document.go b/vendor/github.com/flosch/pongo2/v6/parser_document.go
deleted file mode 100644
index e3ac2c8e9..000000000
--- a/vendor/github.com/flosch/pongo2/v6/parser_document.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package pongo2
-
-// Doc = { ( Filter | Tag | HTML ) }
-func (p *Parser) parseDocElement() (INode, *Error) {
- t := p.Current()
-
- switch t.Typ {
- case TokenHTML:
- n := &nodeHTML{token: t}
- left := p.PeekTypeN(-1, TokenSymbol)
- right := p.PeekTypeN(1, TokenSymbol)
- n.trimLeft = left != nil && left.TrimWhitespaces
- n.trimRight = right != nil && right.TrimWhitespaces
- p.Consume() // consume HTML element
- return n, nil
- case TokenSymbol:
- switch t.Val {
- case "{{":
- // parse variable
- variable, err := p.parseVariableElement()
- if err != nil {
- return nil, err
- }
- return variable, nil
- case "{%":
- // parse tag
- tag, err := p.parseTagElement()
- if err != nil {
- return nil, err
- }
- return tag, nil
- }
- }
- return nil, p.Error("Unexpected token (only HTML/tags/filters in templates allowed)", t)
-}
-
-func (tpl *Template) parse() *Error {
- tpl.parser = newParser(tpl.name, tpl.tokens, tpl)
- doc, err := tpl.parser.parseDocument()
- if err != nil {
- return err
- }
- tpl.root = doc
- return nil
-}
-
-func (p *Parser) parseDocument() (*nodeDocument, *Error) {
- doc := &nodeDocument{}
-
- for p.Remaining() > 0 {
- node, err := p.parseDocElement()
- if err != nil {
- return nil, err
- }
- doc.Nodes = append(doc.Nodes, node)
- }
-
- return doc, nil
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/parser_expression.go b/vendor/github.com/flosch/pongo2/v6/parser_expression.go
deleted file mode 100644
index 5daa5fabd..000000000
--- a/vendor/github.com/flosch/pongo2/v6/parser_expression.go
+++ /dev/null
@@ -1,521 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "math"
-)
-
-type Expression struct {
- // TODO: Add location token?
- expr1 IEvaluator
- expr2 IEvaluator
- opToken *Token
-}
-
-type relationalExpression struct {
- // TODO: Add location token?
- expr1 IEvaluator
- expr2 IEvaluator
- opToken *Token
-}
-
-type simpleExpression struct {
- negate bool
- negativeSign bool
- term1 IEvaluator
- term2 IEvaluator
- opToken *Token
-}
-
-type term struct {
- // TODO: Add location token?
- factor1 IEvaluator
- factor2 IEvaluator
- opToken *Token
-}
-
-type power struct {
- // TODO: Add location token?
- power1 IEvaluator
- power2 IEvaluator
-}
-
-func (expr *Expression) FilterApplied(name string) bool {
- return expr.expr1.FilterApplied(name) && (expr.expr2 == nil ||
- (expr.expr2 != nil && expr.expr2.FilterApplied(name)))
-}
-
-func (expr *relationalExpression) FilterApplied(name string) bool {
- return expr.expr1.FilterApplied(name) && (expr.expr2 == nil ||
- (expr.expr2 != nil && expr.expr2.FilterApplied(name)))
-}
-
-func (expr *simpleExpression) FilterApplied(name string) bool {
- return expr.term1.FilterApplied(name) && (expr.term2 == nil ||
- (expr.term2 != nil && expr.term2.FilterApplied(name)))
-}
-
-func (expr *term) FilterApplied(name string) bool {
- return expr.factor1.FilterApplied(name) && (expr.factor2 == nil ||
- (expr.factor2 != nil && expr.factor2.FilterApplied(name)))
-}
-
-func (expr *power) FilterApplied(name string) bool {
- return expr.power1.FilterApplied(name) && (expr.power2 == nil ||
- (expr.power2 != nil && expr.power2.FilterApplied(name)))
-}
-
-func (expr *Expression) GetPositionToken() *Token {
- return expr.expr1.GetPositionToken()
-}
-
-func (expr *relationalExpression) GetPositionToken() *Token {
- return expr.expr1.GetPositionToken()
-}
-
-func (expr *simpleExpression) GetPositionToken() *Token {
- return expr.term1.GetPositionToken()
-}
-
-func (expr *term) GetPositionToken() *Token {
- return expr.factor1.GetPositionToken()
-}
-
-func (expr *power) GetPositionToken() *Token {
- return expr.power1.GetPositionToken()
-}
-
-func (expr *Expression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := expr.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (expr *relationalExpression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := expr.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (expr *simpleExpression) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := expr.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (expr *term) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := expr.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (expr *power) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := expr.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (expr *Expression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- v1, err := expr.expr1.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- if expr.expr2 != nil {
- switch expr.opToken.Val {
- case "and", "&&":
- if !v1.IsTrue() {
- return AsValue(false), nil
- } else {
- v2, err := expr.expr2.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- return AsValue(v2.IsTrue()), nil
- }
- case "or", "||":
- if v1.IsTrue() {
- return AsValue(true), nil
- } else {
- v2, err := expr.expr2.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- return AsValue(v2.IsTrue()), nil
- }
- default:
- return nil, ctx.Error(fmt.Sprintf("unimplemented: %s", expr.opToken.Val), expr.opToken)
- }
- } else {
- return v1, nil
- }
-}
-
-func (expr *relationalExpression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- v1, err := expr.expr1.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- if expr.expr2 != nil {
- v2, err := expr.expr2.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- switch expr.opToken.Val {
- case "<=":
- if v1.IsFloat() || v2.IsFloat() {
- return AsValue(v1.Float() <= v2.Float()), nil
- }
- if v1.IsTime() && v2.IsTime() {
- tm1, tm2 := v1.Time(), v2.Time()
- return AsValue(tm1.Before(tm2) || tm1.Equal(tm2)), nil
- }
- return AsValue(v1.Integer() <= v2.Integer()), nil
- case ">=":
- if v1.IsFloat() || v2.IsFloat() {
- return AsValue(v1.Float() >= v2.Float()), nil
- }
- if v1.IsTime() && v2.IsTime() {
- tm1, tm2 := v1.Time(), v2.Time()
- return AsValue(tm1.After(tm2) || tm1.Equal(tm2)), nil
- }
- return AsValue(v1.Integer() >= v2.Integer()), nil
- case "==":
- return AsValue(v1.EqualValueTo(v2)), nil
- case ">":
- if v1.IsFloat() || v2.IsFloat() {
- return AsValue(v1.Float() > v2.Float()), nil
- }
- if v1.IsTime() && v2.IsTime() {
- return AsValue(v1.Time().After(v2.Time())), nil
- }
- return AsValue(v1.Integer() > v2.Integer()), nil
- case "<":
- if v1.IsFloat() || v2.IsFloat() {
- return AsValue(v1.Float() < v2.Float()), nil
- }
- if v1.IsTime() && v2.IsTime() {
- return AsValue(v1.Time().Before(v2.Time())), nil
- }
- return AsValue(v1.Integer() < v2.Integer()), nil
- case "!=", "<>":
- return AsValue(!v1.EqualValueTo(v2)), nil
- case "in":
- return AsValue(v2.Contains(v1)), nil
- default:
- return nil, ctx.Error(fmt.Sprintf("unimplemented: %s", expr.opToken.Val), expr.opToken)
- }
- } else {
- return v1, nil
- }
-}
-
-func (expr *simpleExpression) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- t1, err := expr.term1.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- result := t1
-
- if expr.negate {
- result = result.Negate()
- }
-
- if expr.negativeSign {
- if result.IsNumber() {
- switch {
- case result.IsFloat():
- result = AsValue(-1 * result.Float())
- case result.IsInteger():
- result = AsValue(-1 * result.Integer())
- default:
- return nil, ctx.Error("Operation between a number and a non-(float/integer) is not possible", nil)
- }
- } else {
- return nil, ctx.Error("Negative sign on a non-number expression", expr.GetPositionToken())
- }
- }
-
- if expr.term2 != nil {
- t2, err := expr.term2.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- switch expr.opToken.Val {
- case "+":
- if result.IsString() || t2.IsString() {
- // Result will be a string
- return AsValue(result.String() + t2.String()), nil
- }
- if result.IsFloat() || t2.IsFloat() {
- // Result will be a float
- return AsValue(result.Float() + t2.Float()), nil
- }
- // Result will be an integer
- return AsValue(result.Integer() + t2.Integer()), nil
- case "-":
- if result.IsFloat() || t2.IsFloat() {
- // Result will be a float
- return AsValue(result.Float() - t2.Float()), nil
- }
- // Result will be an integer
- return AsValue(result.Integer() - t2.Integer()), nil
- default:
- return nil, ctx.Error("Unimplemented", expr.GetPositionToken())
- }
- }
-
- return result, nil
-}
-
-func (expr *term) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- f1, err := expr.factor1.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- if expr.factor2 != nil {
- f2, err := expr.factor2.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- switch expr.opToken.Val {
- case "*":
- if f1.IsFloat() || f2.IsFloat() {
- // Result will be float
- return AsValue(f1.Float() * f2.Float()), nil
- }
- // Result will be int
- return AsValue(f1.Integer() * f2.Integer()), nil
- case "/":
- if f1.IsFloat() || f2.IsFloat() {
- // Result will be float
- return AsValue(f1.Float() / f2.Float()), nil
- }
- // Result will be int
- return AsValue(f1.Integer() / f2.Integer()), nil
- case "%":
- // Result will be int
- return AsValue(f1.Integer() % f2.Integer()), nil
- default:
- return nil, ctx.Error("unimplemented", expr.opToken)
- }
- } else {
- return f1, nil
- }
-}
-
-func (expr *power) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- p1, err := expr.power1.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- if expr.power2 != nil {
- p2, err := expr.power2.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- return AsValue(math.Pow(p1.Float(), p2.Float())), nil
- }
- return p1, nil
-}
-
-func (p *Parser) parseFactor() (IEvaluator, *Error) {
- if p.Match(TokenSymbol, "(") != nil {
- expr, err := p.ParseExpression()
- if err != nil {
- return nil, err
- }
- if p.Match(TokenSymbol, ")") == nil {
- return nil, p.Error("Closing bracket expected after expression", nil)
- }
- return expr, nil
- }
-
- return p.parseVariableOrLiteralWithFilter()
-}
-
-func (p *Parser) parsePower() (IEvaluator, *Error) {
- pw := new(power)
-
- power1, err := p.parseFactor()
- if err != nil {
- return nil, err
- }
- pw.power1 = power1
-
- if p.Match(TokenSymbol, "^") != nil {
- power2, err := p.parsePower()
- if err != nil {
- return nil, err
- }
- pw.power2 = power2
- }
-
- if pw.power2 == nil {
- // Shortcut for faster evaluation
- return pw.power1, nil
- }
-
- return pw, nil
-}
-
-func (p *Parser) parseTerm() (IEvaluator, *Error) {
- returnTerm := new(term)
-
- factor1, err := p.parsePower()
- if err != nil {
- return nil, err
- }
- returnTerm.factor1 = factor1
-
- for p.PeekOne(TokenSymbol, "*", "/", "%") != nil {
- if returnTerm.opToken != nil {
- // Create new sub-term
- returnTerm = &term{
- factor1: returnTerm,
- }
- }
-
- op := p.Current()
- p.Consume()
-
- factor2, err := p.parsePower()
- if err != nil {
- return nil, err
- }
-
- returnTerm.opToken = op
- returnTerm.factor2 = factor2
- }
-
- if returnTerm.opToken == nil {
- // Shortcut for faster evaluation
- return returnTerm.factor1, nil
- }
-
- return returnTerm, nil
-}
-
-func (p *Parser) parseSimpleExpression() (IEvaluator, *Error) {
- expr := new(simpleExpression)
-
- if sign := p.MatchOne(TokenSymbol, "+", "-"); sign != nil {
- if sign.Val == "-" {
- expr.negativeSign = true
- }
- }
-
- if p.Match(TokenSymbol, "!") != nil || p.Match(TokenKeyword, "not") != nil {
- expr.negate = true
- }
-
- term1, err := p.parseTerm()
- if err != nil {
- return nil, err
- }
- expr.term1 = term1
-
- for p.PeekOne(TokenSymbol, "+", "-") != nil {
- if expr.opToken != nil {
- // New sub expr
- expr = &simpleExpression{
- term1: expr,
- }
- }
-
- op := p.Current()
- p.Consume()
-
- term2, err := p.parseTerm()
- if err != nil {
- return nil, err
- }
-
- expr.term2 = term2
- expr.opToken = op
- }
-
- if !expr.negate && !expr.negativeSign && expr.term2 == nil {
- // Shortcut for faster evaluation
- return expr.term1, nil
- }
-
- return expr, nil
-}
-
-func (p *Parser) parseRelationalExpression() (IEvaluator, *Error) {
- expr1, err := p.parseSimpleExpression()
- if err != nil {
- return nil, err
- }
-
- expr := &relationalExpression{
- expr1: expr1,
- }
-
- if t := p.MatchOne(TokenSymbol, "==", "<=", ">=", "!=", "<>", ">", "<"); t != nil {
- expr2, err := p.parseRelationalExpression()
- if err != nil {
- return nil, err
- }
- expr.opToken = t
- expr.expr2 = expr2
- } else if t := p.MatchOne(TokenKeyword, "in"); t != nil {
- expr2, err := p.parseSimpleExpression()
- if err != nil {
- return nil, err
- }
- expr.opToken = t
- expr.expr2 = expr2
- }
-
- if expr.expr2 == nil {
- // Shortcut for faster evaluation
- return expr.expr1, nil
- }
-
- return expr, nil
-}
-
-func (p *Parser) ParseExpression() (IEvaluator, *Error) {
- rexpr1, err := p.parseRelationalExpression()
- if err != nil {
- return nil, err
- }
-
- exp := &Expression{
- expr1: rexpr1,
- }
-
- if p.PeekOne(TokenSymbol, "&&", "||") != nil || p.PeekOne(TokenKeyword, "and", "or") != nil {
- op := p.Current()
- p.Consume()
- expr2, err := p.ParseExpression()
- if err != nil {
- return nil, err
- }
- exp.expr2 = expr2
- exp.opToken = op
- }
-
- if exp.expr2 == nil {
- // Shortcut for faster evaluation
- return exp.expr1, nil
- }
-
- return exp, nil
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/pongo2.go b/vendor/github.com/flosch/pongo2/v6/pongo2.go
deleted file mode 100644
index 0cceef745..000000000
--- a/vendor/github.com/flosch/pongo2/v6/pongo2.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package pongo2
-
-// Version string
-const Version = "6.0.0"
-
-// Must panics, if a Template couldn't successfully parsed. This is how you
-// would use it:
-// var baseTemplate = pongo2.Must(pongo2.FromFile("templates/base.html"))
-func Must(tpl *Template, err error) *Template {
- if err != nil {
- panic(err)
- }
- return tpl
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/tags.go b/vendor/github.com/flosch/pongo2/v6/tags.go
deleted file mode 100644
index 5bc9f1c21..000000000
--- a/vendor/github.com/flosch/pongo2/v6/tags.go
+++ /dev/null
@@ -1,130 +0,0 @@
-package pongo2
-
-/* Incomplete:
- -----------
-
- verbatim (only the "name" argument is missing for verbatim)
-
- Reconsideration:
- ----------------
-
- debug (reason: not sure what to output yet)
- regroup / Grouping on other properties (reason: maybe too python-specific; not sure how useful this would be in Go)
-
- Following built-in tags wont be added:
- --------------------------------------
-
- csrf_token (reason: web-framework specific)
- load (reason: python-specific)
- url (reason: web-framework specific)
-*/
-
-import (
- "fmt"
-)
-
-type INodeTag interface {
- INode
-}
-
-// This is the function signature of the tag's parser you will have
-// to implement in order to create a new tag.
-//
-// 'doc' is providing access to the whole document while 'arguments'
-// is providing access to the user's arguments to the tag:
-//
-// {% your_tag_name some "arguments" 123 %}
-//
-// start_token will be the *Token with the tag's name in it (here: your_tag_name).
-//
-// Please see the Parser documentation on how to use the parser.
-// See RegisterTag()'s documentation for more information about
-// writing a tag as well.
-type TagParser func(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error)
-
-type tag struct {
- name string
- parser TagParser
-}
-
-var tags map[string]*tag
-
-func init() {
- tags = make(map[string]*tag)
-}
-
-// Registers a new tag. You usually want to call this
-// function in the tag's init() function:
-// http://golang.org/doc/effective_go.html#init
-func RegisterTag(name string, parserFn TagParser) error {
- _, existing := tags[name]
- if existing {
- return fmt.Errorf("tag with name '%s' is already registered", name)
- }
- tags[name] = &tag{
- name: name,
- parser: parserFn,
- }
- return nil
-}
-
-// Replaces an already registered tag with a new implementation. Use this
-// function with caution since it allows you to change existing tag behaviour.
-func ReplaceTag(name string, parserFn TagParser) error {
- _, existing := tags[name]
- if !existing {
- return fmt.Errorf("tag with name '%s' does not exist (therefore cannot be overridden)", name)
- }
- tags[name] = &tag{
- name: name,
- parser: parserFn,
- }
- return nil
-}
-
-// Tag = "{%" IDENT ARGS "%}"
-func (p *Parser) parseTagElement() (INodeTag, *Error) {
- p.Consume() // consume "{%"
- tokenName := p.MatchType(TokenIdentifier)
-
- // Check for identifier
- if tokenName == nil {
- return nil, p.Error("Tag name must be an identifier.", nil)
- }
-
- // Check for the existing tag
- tag, exists := tags[tokenName.Val]
- if !exists {
- // Does not exists
- return nil, p.Error(fmt.Sprintf("Tag '%s' not found (or beginning tag not provided)", tokenName.Val), tokenName)
- }
-
- // Check sandbox tag restriction
- if _, isBanned := p.template.set.bannedTags[tokenName.Val]; isBanned {
- return nil, p.Error(fmt.Sprintf("Usage of tag '%s' is not allowed (sandbox restriction active).", tokenName.Val), tokenName)
- }
-
- var argsToken []*Token
- for p.Peek(TokenSymbol, "%}") == nil && p.Remaining() > 0 {
- // Add token to args
- argsToken = append(argsToken, p.Current())
- p.Consume() // next token
- }
-
- // EOF?
- if p.Remaining() == 0 {
- return nil, p.Error("Unexpectedly reached EOF, no tag end found.", p.lastToken)
- }
-
- p.Match(TokenSymbol, "%}")
-
- argParser := newParser(p.name, argsToken, p.template)
- if len(argsToken) == 0 {
- // This is done to have nice EOF error messages
- argParser.lastToken = tokenName
- }
-
- p.template.level++
- defer func() { p.template.level-- }()
- return tag.parser(p, tokenName, argParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/tags_autoescape.go b/vendor/github.com/flosch/pongo2/v6/tags_autoescape.go
deleted file mode 100644
index 590a1db35..000000000
--- a/vendor/github.com/flosch/pongo2/v6/tags_autoescape.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package pongo2
-
-type tagAutoescapeNode struct {
- wrapper *NodeWrapper
- autoescape bool
-}
-
-func (node *tagAutoescapeNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- old := ctx.Autoescape
- ctx.Autoescape = node.autoescape
-
- err := node.wrapper.Execute(ctx, writer)
- if err != nil {
- return err
- }
-
- ctx.Autoescape = old
-
- return nil
-}
-
-func tagAutoescapeParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- autoescapeNode := &tagAutoescapeNode{}
-
- wrapper, _, err := doc.WrapUntilTag("endautoescape")
- if err != nil {
- return nil, err
- }
- autoescapeNode.wrapper = wrapper
-
- modeToken := arguments.MatchType(TokenIdentifier)
- if modeToken == nil {
- return nil, arguments.Error("A mode is required for autoescape-tag.", nil)
- }
- if modeToken.Val == "on" {
- autoescapeNode.autoescape = true
- } else if modeToken.Val == "off" {
- autoescapeNode.autoescape = false
- } else {
- return nil, arguments.Error("Only 'on' or 'off' is valid as an autoescape-mode.", nil)
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed autoescape-tag arguments.", nil)
- }
-
- return autoescapeNode, nil
-}
-
-func init() {
- RegisterTag("autoescape", tagAutoescapeParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/tags_block.go b/vendor/github.com/flosch/pongo2/v6/tags_block.go
deleted file mode 100644
index 35fb145ca..000000000
--- a/vendor/github.com/flosch/pongo2/v6/tags_block.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package pongo2
-
-import (
- "bytes"
- "fmt"
-)
-
-type tagBlockNode struct {
- name string
-}
-
-func (node *tagBlockNode) getBlockWrappers(tpl *Template) []*NodeWrapper {
- nodeWrappers := make([]*NodeWrapper, 0)
- var t *NodeWrapper
-
- for tpl != nil {
- t = tpl.blocks[node.name]
- if t != nil {
- nodeWrappers = append(nodeWrappers, t)
- }
- tpl = tpl.child
- }
-
- return nodeWrappers
-}
-
-func (node *tagBlockNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- tpl := ctx.template
- if tpl == nil {
- panic("internal error: tpl == nil")
- }
-
- // Determine the block to execute
- blockWrappers := node.getBlockWrappers(tpl)
- lenBlockWrappers := len(blockWrappers)
-
- if lenBlockWrappers == 0 {
- return ctx.Error("internal error: len(block_wrappers) == 0 in tagBlockNode.Execute()", nil)
- }
-
- blockWrapper := blockWrappers[lenBlockWrappers-1]
- ctx.Private["block"] = tagBlockInformation{
- ctx: ctx,
- wrappers: blockWrappers[0 : lenBlockWrappers-1],
- }
- err := blockWrapper.Execute(ctx, writer)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-type tagBlockInformation struct {
- ctx *ExecutionContext
- wrappers []*NodeWrapper
-}
-
-func (t tagBlockInformation) Super() (*Value, error) {
- lenWrappers := len(t.wrappers)
-
- if lenWrappers == 0 {
- return AsSafeValue(""), nil
- }
-
- superCtx := NewChildExecutionContext(t.ctx)
- superCtx.Private["block"] = tagBlockInformation{
- ctx: t.ctx,
- wrappers: t.wrappers[0 : lenWrappers-1],
- }
-
- blockWrapper := t.wrappers[lenWrappers-1]
- buf := bytes.NewBufferString("")
- err := blockWrapper.Execute(superCtx, &templateWriter{buf})
- if err != nil {
- return AsSafeValue(""), err
- }
- return AsSafeValue(buf.String()), nil
-}
-
-func tagBlockParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- if arguments.Count() == 0 {
- return nil, arguments.Error("Tag 'block' requires an identifier.", nil)
- }
-
- nameToken := arguments.MatchType(TokenIdentifier)
- if nameToken == nil {
- return nil, arguments.Error("First argument for tag 'block' must be an identifier.", nil)
- }
-
- if arguments.Remaining() != 0 {
- return nil, arguments.Error("Tag 'block' takes exactly 1 argument (an identifier).", nil)
- }
-
- wrapper, endtagargs, err := doc.WrapUntilTag("endblock")
- if err != nil {
- return nil, err
- }
- if endtagargs.Remaining() > 0 {
- endtagnameToken := endtagargs.MatchType(TokenIdentifier)
- if endtagnameToken != nil {
- if endtagnameToken.Val != nameToken.Val {
- return nil, endtagargs.Error(fmt.Sprintf("Name for 'endblock' must equal to 'block'-tag's name ('%s' != '%s').",
- nameToken.Val, endtagnameToken.Val), nil)
- }
- }
-
- if endtagnameToken == nil || endtagargs.Remaining() > 0 {
- return nil, endtagargs.Error("Either no or only one argument (identifier) allowed for 'endblock'.", nil)
- }
- }
-
- tpl := doc.template
- if tpl == nil {
- panic("internal error: tpl == nil")
- }
- _, hasBlock := tpl.blocks[nameToken.Val]
- if !hasBlock {
- tpl.blocks[nameToken.Val] = wrapper
- } else {
- return nil, arguments.Error(fmt.Sprintf("Block named '%s' already defined", nameToken.Val), nil)
- }
-
- return &tagBlockNode{name: nameToken.Val}, nil
-}
-
-func init() {
- RegisterTag("block", tagBlockParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/tags_comment.go b/vendor/github.com/flosch/pongo2/v6/tags_comment.go
deleted file mode 100644
index 56a02ed99..000000000
--- a/vendor/github.com/flosch/pongo2/v6/tags_comment.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package pongo2
-
-type tagCommentNode struct{}
-
-func (node *tagCommentNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- return nil
-}
-
-func tagCommentParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- commentNode := &tagCommentNode{}
-
- // TODO: Process the endtag's arguments (see django 'comment'-tag documentation)
- err := doc.SkipUntilTag("endcomment")
- if err != nil {
- return nil, err
- }
-
- if arguments.Count() != 0 {
- return nil, arguments.Error("Tag 'comment' does not take any argument.", nil)
- }
-
- return commentNode, nil
-}
-
-func init() {
- RegisterTag("comment", tagCommentParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/tags_cycle.go b/vendor/github.com/flosch/pongo2/v6/tags_cycle.go
deleted file mode 100644
index ffbd254ee..000000000
--- a/vendor/github.com/flosch/pongo2/v6/tags_cycle.go
+++ /dev/null
@@ -1,106 +0,0 @@
-package pongo2
-
-type tagCycleValue struct {
- node *tagCycleNode
- value *Value
-}
-
-type tagCycleNode struct {
- position *Token
- args []IEvaluator
- idx int
- asName string
- silent bool
-}
-
-func (cv *tagCycleValue) String() string {
- return cv.value.String()
-}
-
-func (node *tagCycleNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- item := node.args[node.idx%len(node.args)]
- node.idx++
-
- val, err := item.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- if t, ok := val.Interface().(*tagCycleValue); ok {
- // {% cycle "test1" "test2"
- // {% cycle cycleitem %}
-
- // Update the cycle value with next value
- item := t.node.args[t.node.idx%len(t.node.args)]
- t.node.idx++
-
- val, err := item.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- t.value = val
-
- if !t.node.silent {
- writer.WriteString(val.String())
- }
- } else {
- // Regular call
-
- cycleValue := &tagCycleValue{
- node: node,
- value: val,
- }
-
- if node.asName != "" {
- ctx.Private[node.asName] = cycleValue
- }
- if !node.silent {
- writer.WriteString(val.String())
- }
- }
-
- return nil
-}
-
-// HINT: We're not supporting the old comma-separated list of expressions argument-style
-func tagCycleParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- cycleNode := &tagCycleNode{
- position: start,
- }
-
- for arguments.Remaining() > 0 {
- node, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- cycleNode.args = append(cycleNode.args, node)
-
- if arguments.MatchOne(TokenKeyword, "as") != nil {
- // as
-
- nameToken := arguments.MatchType(TokenIdentifier)
- if nameToken == nil {
- return nil, arguments.Error("Name (identifier) expected after 'as'.", nil)
- }
- cycleNode.asName = nameToken.Val
-
- if arguments.MatchOne(TokenIdentifier, "silent") != nil {
- cycleNode.silent = true
- }
-
- // Now we're finished
- break
- }
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed cycle-tag.", nil)
- }
-
- return cycleNode, nil
-}
-
-func init() {
- RegisterTag("cycle", tagCycleParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/tags_extends.go b/vendor/github.com/flosch/pongo2/v6/tags_extends.go
deleted file mode 100644
index 5771020a0..000000000
--- a/vendor/github.com/flosch/pongo2/v6/tags_extends.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package pongo2
-
-type tagExtendsNode struct {
- filename string
-}
-
-func (node *tagExtendsNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- return nil
-}
-
-func tagExtendsParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- extendsNode := &tagExtendsNode{}
-
- if doc.template.level > 1 {
- return nil, arguments.Error("The 'extends' tag can only defined on root level.", start)
- }
-
- if doc.template.parent != nil {
- // Already one parent
- return nil, arguments.Error("This template has already one parent.", start)
- }
-
- if filenameToken := arguments.MatchType(TokenString); filenameToken != nil {
- // prepared, static template
-
- // Get parent's filename
- parentFilename := doc.template.set.resolveFilename(doc.template, filenameToken.Val)
-
- // Parse the parent
- parentTemplate, err := doc.template.set.FromFile(parentFilename)
- if err != nil {
- return nil, err.(*Error)
- }
-
- // Keep track of things
- parentTemplate.child = doc.template
- doc.template.parent = parentTemplate
- extendsNode.filename = parentFilename
- } else {
- return nil, arguments.Error("Tag 'extends' requires a template filename as string.", nil)
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Tag 'extends' does only take 1 argument.", nil)
- }
-
- return extendsNode, nil
-}
-
-func init() {
- RegisterTag("extends", tagExtendsParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/tags_filter.go b/vendor/github.com/flosch/pongo2/v6/tags_filter.go
deleted file mode 100644
index b38fd9298..000000000
--- a/vendor/github.com/flosch/pongo2/v6/tags_filter.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package pongo2
-
-import (
- "bytes"
-)
-
-type nodeFilterCall struct {
- name string
- paramExpr IEvaluator
-}
-
-type tagFilterNode struct {
- position *Token
- bodyWrapper *NodeWrapper
- filterChain []*nodeFilterCall
-}
-
-func (node *tagFilterNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- temp := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB size
-
- err := node.bodyWrapper.Execute(ctx, temp)
- if err != nil {
- return err
- }
-
- value := AsValue(temp.String())
-
- for _, call := range node.filterChain {
- var param *Value
- if call.paramExpr != nil {
- param, err = call.paramExpr.Evaluate(ctx)
- if err != nil {
- return err
- }
- } else {
- param = AsValue(nil)
- }
- value, err = ApplyFilter(call.name, value, param)
- if err != nil {
- return ctx.Error(err.Error(), node.position)
- }
- }
-
- writer.WriteString(value.String())
-
- return nil
-}
-
-func tagFilterParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- filterNode := &tagFilterNode{
- position: start,
- }
-
- wrapper, _, err := doc.WrapUntilTag("endfilter")
- if err != nil {
- return nil, err
- }
- filterNode.bodyWrapper = wrapper
-
- for arguments.Remaining() > 0 {
- filterCall := &nodeFilterCall{}
-
- nameToken := arguments.MatchType(TokenIdentifier)
- if nameToken == nil {
- return nil, arguments.Error("Expected a filter name (identifier).", nil)
- }
- filterCall.name = nameToken.Val
-
- if arguments.MatchOne(TokenSymbol, ":") != nil {
- // Filter parameter
- // NOTICE: we can't use ParseExpression() here, because it would parse the next filter "|..." as well in the argument list
- expr, err := arguments.parseVariableOrLiteral()
- if err != nil {
- return nil, err
- }
- filterCall.paramExpr = expr
- }
-
- filterNode.filterChain = append(filterNode.filterChain, filterCall)
-
- if arguments.MatchOne(TokenSymbol, "|") == nil {
- break
- }
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed filter-tag arguments.", nil)
- }
-
- return filterNode, nil
-}
-
-func init() {
- RegisterTag("filter", tagFilterParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/tags_firstof.go b/vendor/github.com/flosch/pongo2/v6/tags_firstof.go
deleted file mode 100644
index 5b2888e2b..000000000
--- a/vendor/github.com/flosch/pongo2/v6/tags_firstof.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package pongo2
-
-type tagFirstofNode struct {
- position *Token
- args []IEvaluator
-}
-
-func (node *tagFirstofNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- for _, arg := range node.args {
- val, err := arg.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- if val.IsTrue() {
- if ctx.Autoescape && !arg.FilterApplied("safe") {
- val, err = ApplyFilter("escape", val, nil)
- if err != nil {
- return err
- }
- }
-
- writer.WriteString(val.String())
- return nil
- }
- }
-
- return nil
-}
-
-func tagFirstofParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- firstofNode := &tagFirstofNode{
- position: start,
- }
-
- for arguments.Remaining() > 0 {
- node, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- firstofNode.args = append(firstofNode.args, node)
- }
-
- return firstofNode, nil
-}
-
-func init() {
- RegisterTag("firstof", tagFirstofParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/tags_for.go b/vendor/github.com/flosch/pongo2/v6/tags_for.go
deleted file mode 100644
index 5b0b5554c..000000000
--- a/vendor/github.com/flosch/pongo2/v6/tags_for.go
+++ /dev/null
@@ -1,159 +0,0 @@
-package pongo2
-
-type tagForNode struct {
- key string
- value string // only for maps: for key, value in map
- objectEvaluator IEvaluator
- reversed bool
- sorted bool
-
- bodyWrapper *NodeWrapper
- emptyWrapper *NodeWrapper
-}
-
-type tagForLoopInformation struct {
- Counter int
- Counter0 int
- Revcounter int
- Revcounter0 int
- First bool
- Last bool
- Parentloop *tagForLoopInformation
-}
-
-func (node *tagForNode) Execute(ctx *ExecutionContext, writer TemplateWriter) (forError *Error) {
- // Backup forloop (as parentloop in public context), key-name and value-name
- forCtx := NewChildExecutionContext(ctx)
- parentloop := forCtx.Private["forloop"]
-
- // Create loop struct
- loopInfo := &tagForLoopInformation{
- First: true,
- }
-
- // Is it a loop in a loop?
- if parentloop != nil {
- loopInfo.Parentloop = parentloop.(*tagForLoopInformation)
- }
-
- // Register loopInfo in public context
- forCtx.Private["forloop"] = loopInfo
-
- obj, err := node.objectEvaluator.Evaluate(forCtx)
- if err != nil {
- return err
- }
-
- obj.IterateOrder(func(idx, count int, key, value *Value) bool {
- // There's something to iterate over (correct type and at least 1 item)
-
- // Update loop infos and public context
- forCtx.Private[node.key] = key
- if value != nil {
- forCtx.Private[node.value] = value
- }
- loopInfo.Counter = idx + 1
- loopInfo.Counter0 = idx
- if idx == 1 {
- loopInfo.First = false
- }
- if idx+1 == count {
- loopInfo.Last = true
- }
- loopInfo.Revcounter = count - idx // TODO: Not sure about this, have to look it up
- loopInfo.Revcounter0 = count - (idx + 1) // TODO: Not sure about this, have to look it up
-
- // Render elements with updated context
- err := node.bodyWrapper.Execute(forCtx, writer)
- if err != nil {
- forError = err
- return false
- }
- return true
- }, func() {
- // Nothing to iterate over (maybe wrong type or no items)
- if node.emptyWrapper != nil {
- err := node.emptyWrapper.Execute(forCtx, writer)
- if err != nil {
- forError = err
- }
- }
- }, node.reversed, node.sorted)
-
- return forError
-}
-
-func tagForParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- forNode := &tagForNode{}
-
- // Arguments parsing
- var valueToken *Token
- keyToken := arguments.MatchType(TokenIdentifier)
- if keyToken == nil {
- return nil, arguments.Error("Expected an key identifier as first argument for 'for'-tag", nil)
- }
-
- if arguments.Match(TokenSymbol, ",") != nil {
- // Value name is provided
- valueToken = arguments.MatchType(TokenIdentifier)
- if valueToken == nil {
- return nil, arguments.Error("Value name must be an identifier.", nil)
- }
- }
-
- if arguments.Match(TokenKeyword, "in") == nil {
- return nil, arguments.Error("Expected keyword 'in'.", nil)
- }
-
- objectEvaluator, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- forNode.objectEvaluator = objectEvaluator
- forNode.key = keyToken.Val
- if valueToken != nil {
- forNode.value = valueToken.Val
- }
-
- if arguments.MatchOne(TokenIdentifier, "reversed") != nil {
- forNode.reversed = true
- }
-
- if arguments.MatchOne(TokenIdentifier, "sorted") != nil {
- forNode.sorted = true
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed for-loop arguments.", nil)
- }
-
- // Body wrapping
- wrapper, endargs, err := doc.WrapUntilTag("empty", "endfor")
- if err != nil {
- return nil, err
- }
- forNode.bodyWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
-
- if wrapper.Endtag == "empty" {
- // if there's an else in the if-statement, we need the else-Block as well
- wrapper, endargs, err = doc.WrapUntilTag("endfor")
- if err != nil {
- return nil, err
- }
- forNode.emptyWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
- }
-
- return forNode, nil
-}
-
-func init() {
- RegisterTag("for", tagForParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/tags_if.go b/vendor/github.com/flosch/pongo2/v6/tags_if.go
deleted file mode 100644
index 3eeaf3b49..000000000
--- a/vendor/github.com/flosch/pongo2/v6/tags_if.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package pongo2
-
-type tagIfNode struct {
- conditions []IEvaluator
- wrappers []*NodeWrapper
-}
-
-func (node *tagIfNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- for i, condition := range node.conditions {
- result, err := condition.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- if result.IsTrue() {
- return node.wrappers[i].Execute(ctx, writer)
- }
- // Last condition?
- if len(node.conditions) == i+1 && len(node.wrappers) > i+1 {
- return node.wrappers[i+1].Execute(ctx, writer)
- }
- }
- return nil
-}
-
-func tagIfParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- ifNode := &tagIfNode{}
-
- // Parse first and main IF condition
- condition, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- ifNode.conditions = append(ifNode.conditions, condition)
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("If-condition is malformed.", nil)
- }
-
- // Check the rest
- for {
- wrapper, tagArgs, err := doc.WrapUntilTag("elif", "else", "endif")
- if err != nil {
- return nil, err
- }
- ifNode.wrappers = append(ifNode.wrappers, wrapper)
-
- if wrapper.Endtag == "elif" {
- // elif can take a condition
- condition, err = tagArgs.ParseExpression()
- if err != nil {
- return nil, err
- }
- ifNode.conditions = append(ifNode.conditions, condition)
-
- if tagArgs.Remaining() > 0 {
- return nil, tagArgs.Error("Elif-condition is malformed.", nil)
- }
- } else {
- if tagArgs.Count() > 0 {
- // else/endif can't take any conditions
- return nil, tagArgs.Error("Arguments not allowed here.", nil)
- }
- }
-
- if wrapper.Endtag == "endif" {
- break
- }
- }
-
- return ifNode, nil
-}
-
-func init() {
- RegisterTag("if", tagIfParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/tags_ifchanged.go b/vendor/github.com/flosch/pongo2/v6/tags_ifchanged.go
deleted file mode 100644
index 45296a0a3..000000000
--- a/vendor/github.com/flosch/pongo2/v6/tags_ifchanged.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package pongo2
-
-import (
- "bytes"
-)
-
-type tagIfchangedNode struct {
- watchedExpr []IEvaluator
- lastValues []*Value
- lastContent []byte
- thenWrapper *NodeWrapper
- elseWrapper *NodeWrapper
-}
-
-func (node *tagIfchangedNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- if len(node.watchedExpr) == 0 {
- // Check against own rendered body
-
- buf := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB
- err := node.thenWrapper.Execute(ctx, buf)
- if err != nil {
- return err
- }
-
- bufBytes := buf.Bytes()
- if !bytes.Equal(node.lastContent, bufBytes) {
- // Rendered content changed, output it
- writer.Write(bufBytes)
- node.lastContent = bufBytes
- }
- } else {
- nowValues := make([]*Value, 0, len(node.watchedExpr))
- for _, expr := range node.watchedExpr {
- val, err := expr.Evaluate(ctx)
- if err != nil {
- return err
- }
- nowValues = append(nowValues, val)
- }
-
- // Compare old to new values now
- changed := len(node.lastValues) == 0
-
- for idx, oldVal := range node.lastValues {
- if !oldVal.EqualValueTo(nowValues[idx]) {
- changed = true
- break // we can stop here because ONE value changed
- }
- }
-
- node.lastValues = nowValues
-
- if changed {
- // Render thenWrapper
- err := node.thenWrapper.Execute(ctx, writer)
- if err != nil {
- return err
- }
- } else {
- // Render elseWrapper
- err := node.elseWrapper.Execute(ctx, writer)
- if err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func tagIfchangedParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- ifchangedNode := &tagIfchangedNode{}
-
- for arguments.Remaining() > 0 {
- // Parse condition
- expr, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- ifchangedNode.watchedExpr = append(ifchangedNode.watchedExpr, expr)
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Ifchanged-arguments are malformed.", nil)
- }
-
- // Wrap then/else-blocks
- wrapper, endargs, err := doc.WrapUntilTag("else", "endifchanged")
- if err != nil {
- return nil, err
- }
- ifchangedNode.thenWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
-
- if wrapper.Endtag == "else" {
- // if there's an else in the if-statement, we need the else-Block as well
- wrapper, endargs, err = doc.WrapUntilTag("endifchanged")
- if err != nil {
- return nil, err
- }
- ifchangedNode.elseWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
- }
-
- return ifchangedNode, nil
-}
-
-func init() {
- RegisterTag("ifchanged", tagIfchangedParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/tags_ifequal.go b/vendor/github.com/flosch/pongo2/v6/tags_ifequal.go
deleted file mode 100644
index 103f1c7ba..000000000
--- a/vendor/github.com/flosch/pongo2/v6/tags_ifequal.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package pongo2
-
-type tagIfEqualNode struct {
- var1, var2 IEvaluator
- thenWrapper *NodeWrapper
- elseWrapper *NodeWrapper
-}
-
-func (node *tagIfEqualNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- r1, err := node.var1.Evaluate(ctx)
- if err != nil {
- return err
- }
- r2, err := node.var2.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- result := r1.EqualValueTo(r2)
-
- if result {
- return node.thenWrapper.Execute(ctx, writer)
- }
- if node.elseWrapper != nil {
- return node.elseWrapper.Execute(ctx, writer)
- }
- return nil
-}
-
-func tagIfEqualParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- ifequalNode := &tagIfEqualNode{}
-
- // Parse two expressions
- var1, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- var2, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- ifequalNode.var1 = var1
- ifequalNode.var2 = var2
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("ifequal only takes 2 arguments.", nil)
- }
-
- // Wrap then/else-blocks
- wrapper, endargs, err := doc.WrapUntilTag("else", "endifequal")
- if err != nil {
- return nil, err
- }
- ifequalNode.thenWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
-
- if wrapper.Endtag == "else" {
- // if there's an else in the if-statement, we need the else-Block as well
- wrapper, endargs, err = doc.WrapUntilTag("endifequal")
- if err != nil {
- return nil, err
- }
- ifequalNode.elseWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
- }
-
- return ifequalNode, nil
-}
-
-func init() {
- RegisterTag("ifequal", tagIfEqualParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/tags_ifnotequal.go b/vendor/github.com/flosch/pongo2/v6/tags_ifnotequal.go
deleted file mode 100644
index 0d287d349..000000000
--- a/vendor/github.com/flosch/pongo2/v6/tags_ifnotequal.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package pongo2
-
-type tagIfNotEqualNode struct {
- var1, var2 IEvaluator
- thenWrapper *NodeWrapper
- elseWrapper *NodeWrapper
-}
-
-func (node *tagIfNotEqualNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- r1, err := node.var1.Evaluate(ctx)
- if err != nil {
- return err
- }
- r2, err := node.var2.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- result := !r1.EqualValueTo(r2)
-
- if result {
- return node.thenWrapper.Execute(ctx, writer)
- }
- if node.elseWrapper != nil {
- return node.elseWrapper.Execute(ctx, writer)
- }
- return nil
-}
-
-func tagIfNotEqualParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- ifnotequalNode := &tagIfNotEqualNode{}
-
- // Parse two expressions
- var1, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- var2, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- ifnotequalNode.var1 = var1
- ifnotequalNode.var2 = var2
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("ifequal only takes 2 arguments.", nil)
- }
-
- // Wrap then/else-blocks
- wrapper, endargs, err := doc.WrapUntilTag("else", "endifnotequal")
- if err != nil {
- return nil, err
- }
- ifnotequalNode.thenWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
-
- if wrapper.Endtag == "else" {
- // if there's an else in the if-statement, we need the else-Block as well
- wrapper, endargs, err = doc.WrapUntilTag("endifnotequal")
- if err != nil {
- return nil, err
- }
- ifnotequalNode.elseWrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
- }
-
- return ifnotequalNode, nil
-}
-
-func init() {
- RegisterTag("ifnotequal", tagIfNotEqualParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/tags_import.go b/vendor/github.com/flosch/pongo2/v6/tags_import.go
deleted file mode 100644
index a64c82076..000000000
--- a/vendor/github.com/flosch/pongo2/v6/tags_import.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package pongo2
-
-import (
- "fmt"
-)
-
-type tagImportNode struct {
- position *Token
- filename string
- macros map[string]*tagMacroNode // alias/name -> macro instance
-}
-
-func (node *tagImportNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- for name, macro := range node.macros {
- func(name string, macro *tagMacroNode) {
- ctx.Private[name] = func(args ...*Value) (*Value, error) {
- return macro.call(ctx, args...)
- }
- }(name, macro)
- }
- return nil
-}
-
-func tagImportParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- importNode := &tagImportNode{
- position: start,
- macros: make(map[string]*tagMacroNode),
- }
-
- filenameToken := arguments.MatchType(TokenString)
- if filenameToken == nil {
- return nil, arguments.Error("Import-tag needs a filename as string.", nil)
- }
-
- importNode.filename = doc.template.set.resolveFilename(doc.template, filenameToken.Val)
-
- if arguments.Remaining() == 0 {
- return nil, arguments.Error("You must at least specify one macro to import.", nil)
- }
-
- // Compile the given template
- tpl, err := doc.template.set.FromFile(importNode.filename)
- if err != nil {
- return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, start)
- }
-
- for arguments.Remaining() > 0 {
- macroNameToken := arguments.MatchType(TokenIdentifier)
- if macroNameToken == nil {
- return nil, arguments.Error("Expected macro name (identifier).", nil)
- }
-
- asName := macroNameToken.Val
- if arguments.Match(TokenKeyword, "as") != nil {
- aliasToken := arguments.MatchType(TokenIdentifier)
- if aliasToken == nil {
- return nil, arguments.Error("Expected macro alias name (identifier).", nil)
- }
- asName = aliasToken.Val
- }
-
- macroInstance, has := tpl.exportedMacros[macroNameToken.Val]
- if !has {
- return nil, arguments.Error(fmt.Sprintf("Macro '%s' not found (or not exported) in '%s'.", macroNameToken.Val,
- importNode.filename), macroNameToken)
- }
-
- importNode.macros[asName] = macroInstance
-
- if arguments.Remaining() == 0 {
- break
- }
-
- if arguments.Match(TokenSymbol, ",") == nil {
- return nil, arguments.Error("Expected ','.", nil)
- }
- }
-
- return importNode, nil
-}
-
-func init() {
- RegisterTag("import", tagImportParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/tags_include.go b/vendor/github.com/flosch/pongo2/v6/tags_include.go
deleted file mode 100644
index 6d619fdab..000000000
--- a/vendor/github.com/flosch/pongo2/v6/tags_include.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package pongo2
-
-type tagIncludeNode struct {
- tpl *Template
- filenameEvaluator IEvaluator
- lazy bool
- only bool
- filename string
- withPairs map[string]IEvaluator
- ifExists bool
-}
-
-func (node *tagIncludeNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- // Building the context for the template
- includeCtx := make(Context)
-
- // Fill the context with all data from the parent
- if !node.only {
- includeCtx.Update(ctx.Public)
- includeCtx.Update(ctx.Private)
- }
-
- // Put all custom with-pairs into the context
- for key, value := range node.withPairs {
- val, err := value.Evaluate(ctx)
- if err != nil {
- return err
- }
- includeCtx[key] = val
- }
-
- // Execute the template
- if node.lazy {
- // Evaluate the filename
- filename, err := node.filenameEvaluator.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- if filename.String() == "" {
- return ctx.Error("Filename for 'include'-tag evaluated to an empty string.", nil)
- }
-
- // Get include-filename
- includedFilename := ctx.template.set.resolveFilename(ctx.template, filename.String())
-
- includedTpl, err2 := ctx.template.set.FromFile(includedFilename)
- if err2 != nil {
- // if this is ReadFile error, and "if_exists" flag is enabled
- if node.ifExists && err2.(*Error).Sender == "fromfile" {
- return nil
- }
- return err2.(*Error)
- }
- err2 = includedTpl.ExecuteWriter(includeCtx, writer)
- if err2 != nil {
- return err2.(*Error)
- }
- return nil
- }
- // Template is already parsed with static filename
- err := node.tpl.ExecuteWriter(includeCtx, writer)
- if err != nil {
- return err.(*Error)
- }
- return nil
-}
-
-type tagIncludeEmptyNode struct{}
-
-func (node *tagIncludeEmptyNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- return nil
-}
-
-func tagIncludeParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- includeNode := &tagIncludeNode{
- withPairs: make(map[string]IEvaluator),
- }
-
- if filenameToken := arguments.MatchType(TokenString); filenameToken != nil {
- // prepared, static template
-
- // "if_exists" flag
- ifExists := arguments.Match(TokenIdentifier, "if_exists") != nil
-
- // Get include-filename
- includedFilename := doc.template.set.resolveFilename(doc.template, filenameToken.Val)
-
- // Parse the parent
- includeNode.filename = includedFilename
- includedTpl, err := doc.template.set.FromFile(includedFilename)
- if err != nil {
- // if this is ReadFile error, and "if_exists" token presents we should create and empty node
- if err.(*Error).Sender == "fromfile" && ifExists {
- return &tagIncludeEmptyNode{}, nil
- }
- return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, filenameToken)
- }
- includeNode.tpl = includedTpl
- } else {
- // No String, then the user wants to use lazy-evaluation (slower, but possible)
- filenameEvaluator, err := arguments.ParseExpression()
- if err != nil {
- return nil, err.updateFromTokenIfNeeded(doc.template, filenameToken)
- }
- includeNode.filenameEvaluator = filenameEvaluator
- includeNode.lazy = true
- includeNode.ifExists = arguments.Match(TokenIdentifier, "if_exists") != nil // "if_exists" flag
- }
-
- // After having parsed the filename we're gonna parse the with+only options
- if arguments.Match(TokenIdentifier, "with") != nil {
- for arguments.Remaining() > 0 {
- // We have at least one key=expr pair (because of starting "with")
- keyToken := arguments.MatchType(TokenIdentifier)
- if keyToken == nil {
- return nil, arguments.Error("Expected an identifier", nil)
- }
- if arguments.Match(TokenSymbol, "=") == nil {
- return nil, arguments.Error("Expected '='.", nil)
- }
- valueExpr, err := arguments.ParseExpression()
- if err != nil {
- return nil, err.updateFromTokenIfNeeded(doc.template, keyToken)
- }
-
- includeNode.withPairs[keyToken.Val] = valueExpr
-
- // Only?
- if arguments.Match(TokenIdentifier, "only") != nil {
- includeNode.only = true
- break // stop parsing arguments because it's the last option
- }
- }
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed 'include'-tag arguments.", nil)
- }
-
- return includeNode, nil
-}
-
-func init() {
- RegisterTag("include", tagIncludeParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/tags_lorem.go b/vendor/github.com/flosch/pongo2/v6/tags_lorem.go
deleted file mode 100644
index 7794f6c12..000000000
--- a/vendor/github.com/flosch/pongo2/v6/tags_lorem.go
+++ /dev/null
@@ -1,132 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "math/rand"
- "strings"
- "time"
-)
-
-var (
- tagLoremParagraphs = strings.Split(tagLoremText, "\n")
- tagLoremWords = strings.Fields(tagLoremText)
-)
-
-type tagLoremNode struct {
- position *Token
- count int // number of paragraphs
- method string // w = words, p = HTML paragraphs, b = plain-text (default is b)
- random bool // does not use the default paragraph "Lorem ipsum dolor sit amet, ..."
-}
-
-func (node *tagLoremNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- switch node.method {
- case "b":
- if node.random {
- for i := 0; i < node.count; i++ {
- if i > 0 {
- writer.WriteString("\n")
- }
- par := tagLoremParagraphs[rand.Intn(len(tagLoremParagraphs))]
- writer.WriteString(par)
- }
- } else {
- for i := 0; i < node.count; i++ {
- if i > 0 {
- writer.WriteString("\n")
- }
- par := tagLoremParagraphs[i%len(tagLoremParagraphs)]
- writer.WriteString(par)
- }
- }
- case "w":
- if node.random {
- for i := 0; i < node.count; i++ {
- if i > 0 {
- writer.WriteString(" ")
- }
- word := tagLoremWords[rand.Intn(len(tagLoremWords))]
- writer.WriteString(word)
- }
- } else {
- for i := 0; i < node.count; i++ {
- if i > 0 {
- writer.WriteString(" ")
- }
- word := tagLoremWords[i%len(tagLoremWords)]
- writer.WriteString(word)
- }
- }
- case "p":
- if node.random {
- for i := 0; i < node.count; i++ {
- if i > 0 {
- writer.WriteString("\n")
- }
- writer.WriteString("")
- par := tagLoremParagraphs[rand.Intn(len(tagLoremParagraphs))]
- writer.WriteString(par)
- writer.WriteString("
")
- }
- } else {
- for i := 0; i < node.count; i++ {
- if i > 0 {
- writer.WriteString("\n")
- }
- writer.WriteString("")
- par := tagLoremParagraphs[i%len(tagLoremParagraphs)]
- writer.WriteString(par)
- writer.WriteString("
")
-
- }
- }
- default:
- return ctx.OrigError(fmt.Errorf("unsupported method: %s", node.method), nil)
- }
-
- return nil
-}
-
-func tagLoremParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- loremNode := &tagLoremNode{
- position: start,
- count: 1,
- method: "b",
- }
-
- if countToken := arguments.MatchType(TokenNumber); countToken != nil {
- loremNode.count = AsValue(countToken.Val).Integer()
- }
-
- if methodToken := arguments.MatchType(TokenIdentifier); methodToken != nil {
- if methodToken.Val != "w" && methodToken.Val != "p" && methodToken.Val != "b" {
- return nil, arguments.Error("lorem-method must be either 'w', 'p' or 'b'.", nil)
- }
-
- loremNode.method = methodToken.Val
- }
-
- if arguments.MatchOne(TokenIdentifier, "random") != nil {
- loremNode.random = true
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed lorem-tag arguments.", nil)
- }
-
- return loremNode, nil
-}
-
-func init() {
- rand.Seed(time.Now().Unix())
-
- RegisterTag("lorem", tagLoremParser)
-}
-
-const tagLoremText = `Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
-Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat.
-Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat. Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis at vero eros et accumsan et iusto odio dignissim qui blandit praesent luptatum zzril delenit augue duis dolore te feugait nulla facilisi.
-Nam liber tempor cum soluta nobis eleifend option congue nihil imperdiet doming id quod mazim placerat facer possim assum. Lorem ipsum dolor sit amet, consectetuer adipiscing elit, sed diam nonummy nibh euismod tincidunt ut laoreet dolore magna aliquam erat volutpat. Ut wisi enim ad minim veniam, quis nostrud exerci tation ullamcorper suscipit lobortis nisl ut aliquip ex ea commodo consequat.
-Duis autem vel eum iriure dolor in hendrerit in vulputate velit esse molestie consequat, vel illum dolore eu feugiat nulla facilisis.
-At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, At accusam aliquyam diam diam dolore dolores duo eirmod eos erat, et nonumy sed tempor et et invidunt justo labore Stet clita ea et gubergren, kasd magna no rebum. sanctus sea sed takimata ut vero voluptua. est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat.
-Consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. Lorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.`
diff --git a/vendor/github.com/flosch/pongo2/v6/tags_macro.go b/vendor/github.com/flosch/pongo2/v6/tags_macro.go
deleted file mode 100644
index 44e99b771..000000000
--- a/vendor/github.com/flosch/pongo2/v6/tags_macro.go
+++ /dev/null
@@ -1,148 +0,0 @@
-package pongo2
-
-import (
- "bytes"
- "fmt"
-)
-
-type tagMacroNode struct {
- position *Token
- name string
- argsOrder []string
- args map[string]IEvaluator
- exported bool
-
- wrapper *NodeWrapper
-}
-
-func (node *tagMacroNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- ctx.Private[node.name] = func(args ...*Value) (*Value, error) {
- return node.call(ctx, args...)
- }
-
- return nil
-}
-
-func (node *tagMacroNode) call(ctx *ExecutionContext, args ...*Value) (*Value, error) {
- argsCtx := make(Context)
-
- for k, v := range node.args {
- if v == nil {
- // User did not provided a default value
- argsCtx[k] = nil
- } else {
- // Evaluate the default value
- valueExpr, err := v.Evaluate(ctx)
- if err != nil {
- ctx.Logf(err.Error())
- return AsSafeValue(""), err
- }
-
- argsCtx[k] = valueExpr
- }
- }
-
- if len(args) > len(node.argsOrder) {
- // Too many arguments, we're ignoring them and just logging into debug mode.
- err := ctx.Error(fmt.Sprintf("Macro '%s' called with too many arguments (%d instead of %d).",
- node.name, len(args), len(node.argsOrder)), nil).updateFromTokenIfNeeded(ctx.template, node.position)
-
- return AsSafeValue(""), err
- }
-
- // Make a context for the macro execution
- macroCtx := NewChildExecutionContext(ctx)
-
- // Register all arguments in the private context
- macroCtx.Private.Update(argsCtx)
-
- for idx, argValue := range args {
- macroCtx.Private[node.argsOrder[idx]] = argValue.Interface()
- }
-
- var b bytes.Buffer
- err := node.wrapper.Execute(macroCtx, &b)
- if err != nil {
- return AsSafeValue(""), err.updateFromTokenIfNeeded(ctx.template, node.position)
- }
-
- return AsSafeValue(b.String()), nil
-}
-
-func tagMacroParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- macroNode := &tagMacroNode{
- position: start,
- args: make(map[string]IEvaluator),
- }
-
- nameToken := arguments.MatchType(TokenIdentifier)
- if nameToken == nil {
- return nil, arguments.Error("Macro-tag needs at least an identifier as name.", nil)
- }
- macroNode.name = nameToken.Val
-
- if arguments.MatchOne(TokenSymbol, "(") == nil {
- return nil, arguments.Error("Expected '('.", nil)
- }
-
- for arguments.Match(TokenSymbol, ")") == nil {
- argNameToken := arguments.MatchType(TokenIdentifier)
- if argNameToken == nil {
- return nil, arguments.Error("Expected argument name as identifier.", nil)
- }
- macroNode.argsOrder = append(macroNode.argsOrder, argNameToken.Val)
-
- if arguments.Match(TokenSymbol, "=") != nil {
- // Default expression follows
- argDefaultExpr, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- macroNode.args[argNameToken.Val] = argDefaultExpr
- } else {
- // No default expression
- macroNode.args[argNameToken.Val] = nil
- }
-
- if arguments.Match(TokenSymbol, ")") != nil {
- break
- }
- if arguments.Match(TokenSymbol, ",") == nil {
- return nil, arguments.Error("Expected ',' or ')'.", nil)
- }
- }
-
- if arguments.Match(TokenKeyword, "export") != nil {
- macroNode.exported = true
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed macro-tag.", nil)
- }
-
- // Body wrapping
- wrapper, endargs, err := doc.WrapUntilTag("endmacro")
- if err != nil {
- return nil, err
- }
- macroNode.wrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
-
- if macroNode.exported {
- // Now register the macro if it wants to be exported
- _, has := doc.template.exportedMacros[macroNode.name]
- if has {
- return nil, doc.Error(fmt.Sprintf("another macro with name '%s' already exported", macroNode.name), start)
- }
- doc.template.exportedMacros[macroNode.name] = macroNode
- }
-
- return macroNode, nil
-}
-
-func init() {
- RegisterTag("macro", tagMacroParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/tags_now.go b/vendor/github.com/flosch/pongo2/v6/tags_now.go
deleted file mode 100644
index d9fa4a371..000000000
--- a/vendor/github.com/flosch/pongo2/v6/tags_now.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package pongo2
-
-import (
- "time"
-)
-
-type tagNowNode struct {
- position *Token
- format string
- fake bool
-}
-
-func (node *tagNowNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- var t time.Time
- if node.fake {
- t = time.Date(2014, time.February, 05, 18, 31, 45, 00, time.UTC)
- } else {
- t = time.Now()
- }
-
- writer.WriteString(t.Format(node.format))
-
- return nil
-}
-
-func tagNowParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- nowNode := &tagNowNode{
- position: start,
- }
-
- formatToken := arguments.MatchType(TokenString)
- if formatToken == nil {
- return nil, arguments.Error("Expected a format string.", nil)
- }
- nowNode.format = formatToken.Val
-
- if arguments.MatchOne(TokenIdentifier, "fake") != nil {
- nowNode.fake = true
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed now-tag arguments.", nil)
- }
-
- return nowNode, nil
-}
-
-func init() {
- RegisterTag("now", tagNowParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/tags_set.go b/vendor/github.com/flosch/pongo2/v6/tags_set.go
deleted file mode 100644
index be121c12a..000000000
--- a/vendor/github.com/flosch/pongo2/v6/tags_set.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package pongo2
-
-type tagSetNode struct {
- name string
- expression IEvaluator
-}
-
-func (node *tagSetNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- // Evaluate expression
- value, err := node.expression.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- ctx.Private[node.name] = value
- return nil
-}
-
-func tagSetParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- node := &tagSetNode{}
-
- // Parse variable name
- typeToken := arguments.MatchType(TokenIdentifier)
- if typeToken == nil {
- return nil, arguments.Error("Expected an identifier.", nil)
- }
- node.name = typeToken.Val
-
- if arguments.Match(TokenSymbol, "=") == nil {
- return nil, arguments.Error("Expected '='.", nil)
- }
-
- // Variable expression
- keyExpression, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- node.expression = keyExpression
-
- // Remaining arguments
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed 'set'-tag arguments.", nil)
- }
-
- return node, nil
-}
-
-func init() {
- RegisterTag("set", tagSetParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/tags_spaceless.go b/vendor/github.com/flosch/pongo2/v6/tags_spaceless.go
deleted file mode 100644
index 4fa851ba4..000000000
--- a/vendor/github.com/flosch/pongo2/v6/tags_spaceless.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package pongo2
-
-import (
- "bytes"
- "regexp"
-)
-
-type tagSpacelessNode struct {
- wrapper *NodeWrapper
-}
-
-var tagSpacelessRegexp = regexp.MustCompile(`(?U:(<.*>))([\t\n\v\f\r ]+)(?U:(<.*>))`)
-
-func (node *tagSpacelessNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- b := bytes.NewBuffer(make([]byte, 0, 1024)) // 1 KiB
-
- err := node.wrapper.Execute(ctx, b)
- if err != nil {
- return err
- }
-
- s := b.String()
- // Repeat this recursively
- changed := true
- for changed {
- s2 := tagSpacelessRegexp.ReplaceAllString(s, "$1$3")
- changed = s != s2
- s = s2
- }
-
- writer.WriteString(s)
-
- return nil
-}
-
-func tagSpacelessParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- spacelessNode := &tagSpacelessNode{}
-
- wrapper, _, err := doc.WrapUntilTag("endspaceless")
- if err != nil {
- return nil, err
- }
- spacelessNode.wrapper = wrapper
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed spaceless-tag arguments.", nil)
- }
-
- return spacelessNode, nil
-}
-
-func init() {
- RegisterTag("spaceless", tagSpacelessParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/tags_ssi.go b/vendor/github.com/flosch/pongo2/v6/tags_ssi.go
deleted file mode 100644
index c33858d5f..000000000
--- a/vendor/github.com/flosch/pongo2/v6/tags_ssi.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package pongo2
-
-import (
- "io/ioutil"
-)
-
-type tagSSINode struct {
- filename string
- content string
- template *Template
-}
-
-func (node *tagSSINode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- if node.template != nil {
- // Execute the template within the current context
- includeCtx := make(Context)
- includeCtx.Update(ctx.Public)
- includeCtx.Update(ctx.Private)
-
- err := node.template.execute(includeCtx, writer)
- if err != nil {
- return err.(*Error)
- }
- } else {
- // Just print out the content
- writer.WriteString(node.content)
- }
- return nil
-}
-
-func tagSSIParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- SSINode := &tagSSINode{}
-
- if fileToken := arguments.MatchType(TokenString); fileToken != nil {
- SSINode.filename = fileToken.Val
-
- if arguments.Match(TokenIdentifier, "parsed") != nil {
- // parsed
- temporaryTpl, err := doc.template.set.FromFile(doc.template.set.resolveFilename(doc.template, fileToken.Val))
- if err != nil {
- return nil, err.(*Error).updateFromTokenIfNeeded(doc.template, fileToken)
- }
- SSINode.template = temporaryTpl
- } else {
- // plaintext
- buf, err := ioutil.ReadFile(doc.template.set.resolveFilename(doc.template, fileToken.Val))
- if err != nil {
- return nil, (&Error{
- Sender: "tag:ssi",
- OrigError: err,
- }).updateFromTokenIfNeeded(doc.template, fileToken)
- }
- SSINode.content = string(buf)
- }
- } else {
- return nil, arguments.Error("First argument must be a string.", nil)
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed SSI-tag argument.", nil)
- }
-
- return SSINode, nil
-}
-
-func init() {
- RegisterTag("ssi", tagSSIParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/tags_templatetag.go b/vendor/github.com/flosch/pongo2/v6/tags_templatetag.go
deleted file mode 100644
index 164b4dc3d..000000000
--- a/vendor/github.com/flosch/pongo2/v6/tags_templatetag.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package pongo2
-
-type tagTemplateTagNode struct {
- content string
-}
-
-var templateTagMapping = map[string]string{
- "openblock": "{%",
- "closeblock": "%}",
- "openvariable": "{{",
- "closevariable": "}}",
- "openbrace": "{",
- "closebrace": "}",
- "opencomment": "{#",
- "closecomment": "#}",
-}
-
-func (node *tagTemplateTagNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- writer.WriteString(node.content)
- return nil
-}
-
-func tagTemplateTagParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- ttNode := &tagTemplateTagNode{}
-
- if argToken := arguments.MatchType(TokenIdentifier); argToken != nil {
- output, found := templateTagMapping[argToken.Val]
- if !found {
- return nil, arguments.Error("Argument not found", argToken)
- }
- ttNode.content = output
- } else {
- return nil, arguments.Error("Identifier expected.", nil)
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed templatetag-tag argument.", nil)
- }
-
- return ttNode, nil
-}
-
-func init() {
- RegisterTag("templatetag", tagTemplateTagParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/tags_widthratio.go b/vendor/github.com/flosch/pongo2/v6/tags_widthratio.go
deleted file mode 100644
index 70c9c3e8a..000000000
--- a/vendor/github.com/flosch/pongo2/v6/tags_widthratio.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "math"
-)
-
-type tagWidthratioNode struct {
- position *Token
- current, max IEvaluator
- width IEvaluator
- ctxName string
-}
-
-func (node *tagWidthratioNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- current, err := node.current.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- max, err := node.max.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- width, err := node.width.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- value := int(math.Ceil(current.Float()/max.Float()*width.Float() + 0.5))
-
- if node.ctxName == "" {
- writer.WriteString(fmt.Sprintf("%d", value))
- } else {
- ctx.Private[node.ctxName] = value
- }
-
- return nil
-}
-
-func tagWidthratioParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- widthratioNode := &tagWidthratioNode{
- position: start,
- }
-
- current, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- widthratioNode.current = current
-
- max, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- widthratioNode.max = max
-
- width, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- widthratioNode.width = width
-
- if arguments.MatchOne(TokenKeyword, "as") != nil {
- // Name follows
- nameToken := arguments.MatchType(TokenIdentifier)
- if nameToken == nil {
- return nil, arguments.Error("Expected name (identifier).", nil)
- }
- widthratioNode.ctxName = nameToken.Val
- }
-
- if arguments.Remaining() > 0 {
- return nil, arguments.Error("Malformed widthratio-tag arguments.", nil)
- }
-
- return widthratioNode, nil
-}
-
-func init() {
- RegisterTag("widthratio", tagWidthratioParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/tags_with.go b/vendor/github.com/flosch/pongo2/v6/tags_with.go
deleted file mode 100644
index 32b3c1c42..000000000
--- a/vendor/github.com/flosch/pongo2/v6/tags_with.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package pongo2
-
-type tagWithNode struct {
- withPairs map[string]IEvaluator
- wrapper *NodeWrapper
-}
-
-func (node *tagWithNode) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- //new context for block
- withctx := NewChildExecutionContext(ctx)
-
- // Put all custom with-pairs into the context
- for key, value := range node.withPairs {
- val, err := value.Evaluate(ctx)
- if err != nil {
- return err
- }
- withctx.Private[key] = val
- }
-
- return node.wrapper.Execute(withctx, writer)
-}
-
-func tagWithParser(doc *Parser, start *Token, arguments *Parser) (INodeTag, *Error) {
- withNode := &tagWithNode{
- withPairs: make(map[string]IEvaluator),
- }
-
- if arguments.Count() == 0 {
- return nil, arguments.Error("Tag 'with' requires at least one argument.", nil)
- }
-
- wrapper, endargs, err := doc.WrapUntilTag("endwith")
- if err != nil {
- return nil, err
- }
- withNode.wrapper = wrapper
-
- if endargs.Count() > 0 {
- return nil, endargs.Error("Arguments not allowed here.", nil)
- }
-
- // Scan through all arguments to see which style the user uses (old or new style).
- // If we find any "as" keyword we will enforce old style; otherwise we will use new style.
- oldStyle := false // by default we're using the new_style
- for i := 0; i < arguments.Count(); i++ {
- if arguments.PeekN(i, TokenKeyword, "as") != nil {
- oldStyle = true
- break
- }
- }
-
- for arguments.Remaining() > 0 {
- if oldStyle {
- valueExpr, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- if arguments.Match(TokenKeyword, "as") == nil {
- return nil, arguments.Error("Expected 'as' keyword.", nil)
- }
- keyToken := arguments.MatchType(TokenIdentifier)
- if keyToken == nil {
- return nil, arguments.Error("Expected an identifier", nil)
- }
- withNode.withPairs[keyToken.Val] = valueExpr
- } else {
- keyToken := arguments.MatchType(TokenIdentifier)
- if keyToken == nil {
- return nil, arguments.Error("Expected an identifier", nil)
- }
- if arguments.Match(TokenSymbol, "=") == nil {
- return nil, arguments.Error("Expected '='.", nil)
- }
- valueExpr, err := arguments.ParseExpression()
- if err != nil {
- return nil, err
- }
- withNode.withPairs[keyToken.Val] = valueExpr
- }
- }
-
- return withNode, nil
-}
-
-func init() {
- RegisterTag("with", tagWithParser)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/template.go b/vendor/github.com/flosch/pongo2/v6/template.go
deleted file mode 100644
index c15b9cd78..000000000
--- a/vendor/github.com/flosch/pongo2/v6/template.go
+++ /dev/null
@@ -1,291 +0,0 @@
-package pongo2
-
-import (
- "bytes"
- "fmt"
- "io"
- "strings"
-)
-
-type TemplateWriter interface {
- io.Writer
- WriteString(string) (int, error)
-}
-
-type templateWriter struct {
- w io.Writer
-}
-
-func (tw *templateWriter) WriteString(s string) (int, error) {
- return tw.w.Write([]byte(s))
-}
-
-func (tw *templateWriter) Write(b []byte) (int, error) {
- return tw.w.Write(b)
-}
-
-type Template struct {
- set *TemplateSet
-
- // Input
- isTplString bool
- name string
- tpl string
- size int
-
- // Calculation
- tokens []*Token
- parser *Parser
-
- // first come, first serve (it's important to not override existing entries in here)
- level int
- parent *Template
- child *Template
- blocks map[string]*NodeWrapper
- exportedMacros map[string]*tagMacroNode
-
- // Output
- root *nodeDocument
-
- // Options allow you to change the behavior of template-engine.
- // You can change the options before calling the Execute method.
- Options *Options
-}
-
-func newTemplateString(set *TemplateSet, tpl []byte) (*Template, error) {
- return newTemplate(set, "", true, tpl)
-}
-
-func newTemplate(set *TemplateSet, name string, isTplString bool, tpl []byte) (*Template, error) {
- strTpl := string(tpl)
-
- // Create the template
- t := &Template{
- set: set,
- isTplString: isTplString,
- name: name,
- tpl: strTpl,
- size: len(strTpl),
- blocks: make(map[string]*NodeWrapper),
- exportedMacros: make(map[string]*tagMacroNode),
- Options: newOptions(),
- }
- // Copy all settings from another Options.
- t.Options.Update(set.Options)
-
- // Tokenize it
- tokens, err := lex(name, strTpl)
- if err != nil {
- return nil, err
- }
- t.tokens = tokens
-
- // For debugging purposes, show all tokens:
- /*for i, t := range tokens {
- fmt.Printf("%3d. %s\n", i, t)
- }*/
-
- // Parse it
- err = t.parse()
- if err != nil {
- return nil, err
- }
-
- return t, nil
-}
-
-func (tpl *Template) newContextForExecution(context Context) (*Template, *ExecutionContext, error) {
- if tpl.Options.TrimBlocks || tpl.Options.LStripBlocks {
- // Issue #94 https://github.com/flosch/pongo2/issues/94
- // If an application configures pongo2 template to trim_blocks,
- // the first newline after a template tag is removed automatically (like in PHP).
- prev := &Token{
- Typ: TokenHTML,
- Val: "\n",
- }
-
- for _, t := range tpl.tokens {
- if tpl.Options.LStripBlocks {
- if prev.Typ == TokenHTML && t.Typ != TokenHTML && t.Val == "{%" {
- prev.Val = strings.TrimRight(prev.Val, "\t ")
- }
- }
-
- if tpl.Options.TrimBlocks {
- if prev.Typ != TokenHTML && t.Typ == TokenHTML && prev.Val == "%}" {
- if len(t.Val) > 0 && t.Val[0] == '\n' {
- t.Val = t.Val[1:len(t.Val)]
- }
- }
- }
-
- prev = t
- }
- }
-
- // Determine the parent to be executed (for template inheritance)
- parent := tpl
- for parent.parent != nil {
- parent = parent.parent
- }
-
- // Create context if none is given
- newContext := make(Context)
- newContext.Update(tpl.set.Globals)
-
- if context != nil {
- newContext.Update(context)
-
- if len(newContext) > 0 {
- // Check for context name syntax
- err := newContext.checkForValidIdentifiers()
- if err != nil {
- return parent, nil, err
- }
-
- // Check for clashes with macro names
- for k := range newContext {
- _, has := tpl.exportedMacros[k]
- if has {
- return parent, nil, &Error{
- Filename: tpl.name,
- Sender: "execution",
- OrigError: fmt.Errorf("context key name '%s' clashes with macro '%s'", k, k),
- }
- }
- }
- }
- }
-
- // Create operational context
- ctx := newExecutionContext(parent, newContext)
-
- return parent, ctx, nil
-}
-
-func (tpl *Template) execute(context Context, writer TemplateWriter) error {
- parent, ctx, err := tpl.newContextForExecution(context)
- if err != nil {
- return err
- }
-
- // Run the selected document
- if err := parent.root.Execute(ctx, writer); err != nil {
- return err
- }
-
- return nil
-}
-
-func (tpl *Template) newTemplateWriterAndExecute(context Context, writer io.Writer) error {
- return tpl.execute(context, &templateWriter{w: writer})
-}
-
-func (tpl *Template) newBufferAndExecute(context Context) (*bytes.Buffer, error) {
- // Create output buffer
- // We assume that the rendered template will be 30% larger
- buffer := bytes.NewBuffer(make([]byte, 0, int(float64(tpl.size)*1.3)))
- if err := tpl.execute(context, buffer); err != nil {
- return nil, err
- }
- return buffer, nil
-}
-
-// Executes the template with the given context and writes to writer (io.Writer)
-// on success. Context can be nil. Nothing is written on error; instead the error
-// is being returned.
-func (tpl *Template) ExecuteWriter(context Context, writer io.Writer) error {
- buf, err := tpl.newBufferAndExecute(context)
- if err != nil {
- return err
- }
- _, err = buf.WriteTo(writer)
- if err != nil {
- return err
- }
- return nil
-}
-
-// Same as ExecuteWriter. The only difference between both functions is that
-// this function might already have written parts of the generated template in the
-// case of an execution error because there's no intermediate buffer involved for
-// performance reasons. This is handy if you need high performance template
-// generation or if you want to manage your own pool of buffers.
-func (tpl *Template) ExecuteWriterUnbuffered(context Context, writer io.Writer) error {
- return tpl.newTemplateWriterAndExecute(context, writer)
-}
-
-// Executes the template and returns the rendered template as a []byte
-func (tpl *Template) ExecuteBytes(context Context) ([]byte, error) {
- // Execute template
- buffer, err := tpl.newBufferAndExecute(context)
- if err != nil {
- return nil, err
- }
- return buffer.Bytes(), nil
-}
-
-// Executes the template and returns the rendered template as a string
-func (tpl *Template) Execute(context Context) (string, error) {
- // Execute template
- buffer, err := tpl.newBufferAndExecute(context)
- if err != nil {
- return "", err
- }
-
- return buffer.String(), nil
-
-}
-
-func (tpl *Template) ExecuteBlocks(context Context, blocks []string) (map[string]string, error) {
- var parents []*Template
- result := make(map[string]string)
-
- parent := tpl
- for parent != nil {
- // We only want to execute the template if it has a block we want
- for _, block := range blocks {
- if _, ok := tpl.blocks[block]; ok {
- parents = append(parents, parent)
- break
- }
- }
- parent = parent.parent
- }
-
- for _, t := range parents {
- var buffer *bytes.Buffer
- var ctx *ExecutionContext
- var err error
- for _, blockName := range blocks {
- if _, ok := result[blockName]; ok {
- continue
- }
- if blockWrapper, ok := t.blocks[blockName]; ok {
- // assign the buffer if we haven't done so
- if buffer == nil {
- buffer = bytes.NewBuffer(make([]byte, 0, int(float64(t.size)*1.3)))
- }
- // assign the context if we haven't done so
- if ctx == nil {
- _, ctx, err = t.newContextForExecution(context)
- if err != nil {
- return nil, err
- }
- }
- bErr := blockWrapper.Execute(ctx, buffer)
- if bErr != nil {
- return nil, bErr
- }
- result[blockName] = buffer.String()
- buffer.Reset()
- }
- }
- // We have found all blocks
- if len(blocks) == len(result) {
- break
- }
- }
-
- return result, nil
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/template_loader.go b/vendor/github.com/flosch/pongo2/v6/template_loader.go
deleted file mode 100644
index e10cf33fb..000000000
--- a/vendor/github.com/flosch/pongo2/v6/template_loader.go
+++ /dev/null
@@ -1,233 +0,0 @@
-package pongo2
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "io/fs"
- "io/ioutil"
- "log"
- "net/http"
- "os"
- "path/filepath"
-)
-
-// FSLoader supports the fs.FS interface for loading templates
-type FSLoader struct {
- fs fs.FS
-}
-
-func NewFSLoader(fs fs.FS) *FSLoader {
- return &FSLoader{
- fs: fs,
- }
-}
-
-func (l *FSLoader) Abs(base, name string) string {
- return filepath.Join(filepath.Dir(base), name)
-}
-
-func (l *FSLoader) Get(path string) (io.Reader, error) {
- return l.fs.Open(path)
-}
-
-// LocalFilesystemLoader represents a local filesystem loader with basic
-// BaseDirectory capabilities. The access to the local filesystem is unrestricted.
-type LocalFilesystemLoader struct {
- baseDir string
-}
-
-// MustNewLocalFileSystemLoader creates a new LocalFilesystemLoader instance
-// and panics if there's any error during instantiation. The parameters
-// are the same like NewLocalFileSystemLoader.
-func MustNewLocalFileSystemLoader(baseDir string) *LocalFilesystemLoader {
- fs, err := NewLocalFileSystemLoader(baseDir)
- if err != nil {
- log.Panic(err)
- }
- return fs
-}
-
-// NewLocalFileSystemLoader creates a new LocalFilesystemLoader and allows
-// templatesto be loaded from disk (unrestricted). If any base directory
-// is given (or being set using SetBaseDir), this base directory is being used
-// for path calculation in template inclusions/imports. Otherwise the path
-// is calculated based relatively to the including template's path.
-func NewLocalFileSystemLoader(baseDir string) (*LocalFilesystemLoader, error) {
- fs := &LocalFilesystemLoader{}
- if baseDir != "" {
- if err := fs.SetBaseDir(baseDir); err != nil {
- return nil, err
- }
- }
- return fs, nil
-}
-
-// SetBaseDir sets the template's base directory. This directory will
-// be used for any relative path in filters, tags and From*-functions to determine
-// your template. See the comment for NewLocalFileSystemLoader as well.
-func (fs *LocalFilesystemLoader) SetBaseDir(path string) error {
- // Make the path absolute
- if !filepath.IsAbs(path) {
- abs, err := filepath.Abs(path)
- if err != nil {
- return err
- }
- path = abs
- }
-
- // Check for existence
- fi, err := os.Stat(path)
- if err != nil {
- return err
- }
- if !fi.IsDir() {
- return fmt.Errorf("the given path '%s' is not a directory", path)
- }
-
- fs.baseDir = path
- return nil
-}
-
-// Get reads the path's content from your local filesystem.
-func (fs *LocalFilesystemLoader) Get(path string) (io.Reader, error) {
- buf, err := ioutil.ReadFile(path)
- if err != nil {
- return nil, err
- }
- return bytes.NewReader(buf), nil
-}
-
-// Abs resolves a filename relative to the base directory. Absolute paths are allowed.
-// When there's no base dir set, the absolute path to the filename
-// will be calculated based on either the provided base directory (which
-// might be a path of a template which includes another template) or
-// the current working directory.
-func (fs *LocalFilesystemLoader) Abs(base, name string) string {
- if filepath.IsAbs(name) {
- return name
- }
-
- // Our own base dir has always priority; if there's none
- // we use the path provided in base.
- var err error
- if fs.baseDir == "" {
- if base == "" {
- base, err = os.Getwd()
- if err != nil {
- panic(err)
- }
- return filepath.Join(base, name)
- }
-
- return filepath.Join(filepath.Dir(base), name)
- }
-
- return filepath.Join(fs.baseDir, name)
-}
-
-// SandboxedFilesystemLoader is still WIP.
-type SandboxedFilesystemLoader struct {
- *LocalFilesystemLoader
-}
-
-// NewSandboxedFilesystemLoader creates a new sandboxed local file system instance.
-func NewSandboxedFilesystemLoader(baseDir string) (*SandboxedFilesystemLoader, error) {
- fs, err := NewLocalFileSystemLoader(baseDir)
- if err != nil {
- return nil, err
- }
- return &SandboxedFilesystemLoader{
- LocalFilesystemLoader: fs,
- }, nil
-}
-
-// Move sandbox to a virtual fs
-
-/*
-if len(set.SandboxDirectories) > 0 {
- defer func() {
- // Remove any ".." or other crap
- resolvedPath = filepath.Clean(resolvedPath)
-
- // Make the path absolute
- absPath, err := filepath.Abs(resolvedPath)
- if err != nil {
- panic(err)
- }
- resolvedPath = absPath
-
- // Check against the sandbox directories (once one pattern matches, we're done and can allow it)
- for _, pattern := range set.SandboxDirectories {
- matched, err := filepath.Match(pattern, resolvedPath)
- if err != nil {
- panic("Wrong sandbox directory match pattern (see http://golang.org/pkg/path/filepath/#Match).")
- }
- if matched {
- // OK!
- return
- }
- }
-
- // No pattern matched, we have to log+deny the request
- set.logf("Access attempt outside of the sandbox directories (blocked): '%s'", resolvedPath)
- resolvedPath = ""
- }()
-}
-*/
-
-// HttpFilesystemLoader supports loading templates
-// from an http.FileSystem - useful for using one of several
-// file-to-code generators that packs static files into
-// a go binary (ex: https://github.com/jteeuwen/go-bindata)
-type HttpFilesystemLoader struct {
- fs http.FileSystem
- baseDir string
-}
-
-// MustNewHttpFileSystemLoader creates a new HttpFilesystemLoader instance
-// and panics if there's any error during instantiation. The parameters
-// are the same like NewHttpFilesystemLoader.
-func MustNewHttpFileSystemLoader(httpfs http.FileSystem, baseDir string) *HttpFilesystemLoader {
- fs, err := NewHttpFileSystemLoader(httpfs, baseDir)
- if err != nil {
- log.Panic(err)
- }
- return fs
-}
-
-// NewHttpFileSystemLoader creates a new HttpFileSystemLoader and allows
-// templates to be loaded from the virtual filesystem. The path
-// is calculated based relatively from the root of the http.Filesystem.
-func NewHttpFileSystemLoader(httpfs http.FileSystem, baseDir string) (*HttpFilesystemLoader, error) {
- hfs := &HttpFilesystemLoader{
- fs: httpfs,
- baseDir: baseDir,
- }
- if httpfs == nil {
- err := errors.New("httpfs cannot be nil")
- return nil, err
- }
- return hfs, nil
-}
-
-// Abs in this instance simply returns the filename, since
-// there's no potential for an unexpanded path in an http.FileSystem
-func (h *HttpFilesystemLoader) Abs(base, name string) string {
- return name
-}
-
-// Get returns an io.Reader where the template's content can be read from.
-func (h *HttpFilesystemLoader) Get(path string) (io.Reader, error) {
- fullPath := path
- if h.baseDir != "" {
- fullPath = fmt.Sprintf(
- "%s/%s",
- h.baseDir,
- fullPath,
- )
- }
-
- return h.fs.Open(fullPath)
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/template_sets.go b/vendor/github.com/flosch/pongo2/v6/template_sets.go
deleted file mode 100644
index 34b45a0cb..000000000
--- a/vendor/github.com/flosch/pongo2/v6/template_sets.go
+++ /dev/null
@@ -1,304 +0,0 @@
-package pongo2
-
-import (
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "os"
- "sync"
-)
-
-// TemplateLoader allows to implement a virtual file system.
-type TemplateLoader interface {
- // Abs calculates the path to a given template. Whenever a path must be resolved
- // due to an import from another template, the base equals the parent template's path.
- Abs(base, name string) string
-
- // Get returns an io.Reader where the template's content can be read from.
- Get(path string) (io.Reader, error)
-}
-
-// TemplateSet allows you to create your own group of templates with their own
-// global context (which is shared among all members of the set) and their own
-// configuration.
-// It's useful for a separation of different kind of templates
-// (e. g. web templates vs. mail templates).
-type TemplateSet struct {
- name string
- loaders []TemplateLoader
-
- // Globals will be provided to all templates created within this template set
- Globals Context
-
- // If debug is true (default false), ExecutionContext.Logf() will work and output
- // to STDOUT. Furthermore, FromCache() won't cache the templates.
- // Make sure to synchronize the access to it in case you're changing this
- // variable during program execution (and template compilation/execution).
- Debug bool
-
- // Options allow you to change the behavior of template-engine.
- // You can change the options before calling the Execute method.
- Options *Options
-
- // Sandbox features
- // - Disallow access to specific tags and/or filters (using BanTag() and BanFilter())
- //
- // For efficiency reasons you can ban tags/filters only *before* you have
- // added your first template to the set (restrictions are statically checked).
- // After you added one, it's not possible anymore (for your personal security).
- firstTemplateCreated bool
- bannedTags map[string]bool
- bannedFilters map[string]bool
-
- // Template cache (for FromCache())
- templateCache map[string]*Template
- templateCacheMutex sync.Mutex
-}
-
-// NewSet can be used to create sets with different kind of templates
-// (e. g. web from mail templates), with different globals or
-// other configurations.
-func NewSet(name string, loaders ...TemplateLoader) *TemplateSet {
- if len(loaders) == 0 {
- panic(fmt.Errorf("at least one template loader must be specified"))
- }
-
- return &TemplateSet{
- name: name,
- loaders: loaders,
- Globals: make(Context),
- bannedTags: make(map[string]bool),
- bannedFilters: make(map[string]bool),
- templateCache: make(map[string]*Template),
- Options: newOptions(),
- }
-}
-
-func (set *TemplateSet) AddLoader(loaders ...TemplateLoader) {
- set.loaders = append(set.loaders, loaders...)
-}
-
-func (set *TemplateSet) resolveFilename(tpl *Template, path string) string {
- return set.resolveFilenameForLoader(set.loaders[0], tpl, path)
-}
-
-func (set *TemplateSet) resolveFilenameForLoader(loader TemplateLoader, tpl *Template, path string) string {
- name := ""
- if tpl != nil && tpl.isTplString {
- return path
- }
- if tpl != nil {
- name = tpl.name
- }
-
- return loader.Abs(name, path)
-}
-
-// BanTag bans a specific tag for this template set. See more in the documentation for TemplateSet.
-func (set *TemplateSet) BanTag(name string) error {
- _, has := tags[name]
- if !has {
- return fmt.Errorf("tag '%s' not found", name)
- }
- if set.firstTemplateCreated {
- return errors.New("you cannot ban any tags after you've added your first template to your template set")
- }
- _, has = set.bannedTags[name]
- if has {
- return fmt.Errorf("tag '%s' is already banned", name)
- }
- set.bannedTags[name] = true
-
- return nil
-}
-
-// BanFilter bans a specific filter for this template set. See more in the documentation for TemplateSet.
-func (set *TemplateSet) BanFilter(name string) error {
- _, has := filters[name]
- if !has {
- return fmt.Errorf("filter '%s' not found", name)
- }
- if set.firstTemplateCreated {
- return errors.New("you cannot ban any filters after you've added your first template to your template set")
- }
- _, has = set.bannedFilters[name]
- if has {
- return fmt.Errorf("filter '%s' is already banned", name)
- }
- set.bannedFilters[name] = true
-
- return nil
-}
-
-func (set *TemplateSet) resolveTemplate(tpl *Template, path string) (name string, loader TemplateLoader, fd io.Reader, err error) {
- // iterate over loaders until we appear to have a valid template
- for _, loader = range set.loaders {
- name = set.resolveFilenameForLoader(loader, tpl, path)
- fd, err = loader.Get(name)
- if err == nil {
- return
- }
- }
-
- return path, nil, nil, fmt.Errorf("unable to resolve template")
-}
-
-// CleanCache cleans the template cache. If filenames is not empty,
-// it will remove the template caches of those filenames.
-// Or it will empty the whole template cache. It is thread-safe.
-func (set *TemplateSet) CleanCache(filenames ...string) {
- set.templateCacheMutex.Lock()
- defer set.templateCacheMutex.Unlock()
-
- if len(filenames) == 0 {
- set.templateCache = make(map[string]*Template, len(set.templateCache))
- }
-
- for _, filename := range filenames {
- delete(set.templateCache, set.resolveFilename(nil, filename))
- }
-}
-
-// FromCache is a convenient method to cache templates. It is thread-safe
-// and will only compile the template associated with a filename once.
-// If TemplateSet.Debug is true (for example during development phase),
-// FromCache() will not cache the template and instead recompile it on any
-// call (to make changes to a template live instantaneously).
-func (set *TemplateSet) FromCache(filename string) (*Template, error) {
- if set.Debug {
- // Recompile on any request
- return set.FromFile(filename)
- }
- // Cache the template
- cleanedFilename := set.resolveFilename(nil, filename)
-
- set.templateCacheMutex.Lock()
- defer set.templateCacheMutex.Unlock()
-
- tpl, has := set.templateCache[cleanedFilename]
-
- // Cache miss
- if !has {
- tpl, err := set.FromFile(cleanedFilename)
- if err != nil {
- return nil, err
- }
- set.templateCache[cleanedFilename] = tpl
- return tpl, nil
- }
-
- // Cache hit
- return tpl, nil
-}
-
-// FromString loads a template from string and returns a Template instance.
-func (set *TemplateSet) FromString(tpl string) (*Template, error) {
- set.firstTemplateCreated = true
-
- return newTemplateString(set, []byte(tpl))
-}
-
-// FromBytes loads a template from bytes and returns a Template instance.
-func (set *TemplateSet) FromBytes(tpl []byte) (*Template, error) {
- set.firstTemplateCreated = true
-
- return newTemplateString(set, tpl)
-}
-
-// FromFile loads a template from a filename and returns a Template instance.
-func (set *TemplateSet) FromFile(filename string) (*Template, error) {
- set.firstTemplateCreated = true
-
- _, _, fd, err := set.resolveTemplate(nil, filename)
- if err != nil {
- return nil, &Error{
- Filename: filename,
- Sender: "fromfile",
- OrigError: err,
- }
- }
- buf, err := ioutil.ReadAll(fd)
- if err != nil {
- return nil, &Error{
- Filename: filename,
- Sender: "fromfile",
- OrigError: err,
- }
- }
-
- return newTemplate(set, filename, false, buf)
-}
-
-// RenderTemplateString is a shortcut and renders a template string directly.
-func (set *TemplateSet) RenderTemplateString(s string, ctx Context) (string, error) {
- set.firstTemplateCreated = true
-
- tpl := Must(set.FromString(s))
- result, err := tpl.Execute(ctx)
- if err != nil {
- return "", err
- }
- return result, nil
-}
-
-// RenderTemplateBytes is a shortcut and renders template bytes directly.
-func (set *TemplateSet) RenderTemplateBytes(b []byte, ctx Context) (string, error) {
- set.firstTemplateCreated = true
-
- tpl := Must(set.FromBytes(b))
- result, err := tpl.Execute(ctx)
- if err != nil {
- return "", err
- }
- return result, nil
-}
-
-// RenderTemplateFile is a shortcut and renders a template file directly.
-func (set *TemplateSet) RenderTemplateFile(fn string, ctx Context) (string, error) {
- set.firstTemplateCreated = true
-
- tpl := Must(set.FromFile(fn))
- result, err := tpl.Execute(ctx)
- if err != nil {
- return "", err
- }
- return result, nil
-}
-
-func (set *TemplateSet) logf(format string, args ...any) {
- if set.Debug {
- logger.Printf(fmt.Sprintf("[template set: %s] %s", set.name, format), args...)
- }
-}
-
-// Logging function (internally used)
-func logf(format string, items ...any) {
- if debug {
- logger.Printf(format, items...)
- }
-}
-
-var (
- debug bool // internal debugging
- logger = log.New(os.Stdout, "[pongo2] ", log.LstdFlags|log.Lshortfile)
-
- // DefaultLoader allows the default un-sandboxed access to the local file
- // system and is being used by the DefaultSet.
- DefaultLoader = MustNewLocalFileSystemLoader("")
-
- // DefaultSet is a set created for you for convinience reasons.
- DefaultSet = NewSet("default", DefaultLoader)
-
- // Methods on the default set
- FromString = DefaultSet.FromString
- FromBytes = DefaultSet.FromBytes
- FromFile = DefaultSet.FromFile
- FromCache = DefaultSet.FromCache
- RenderTemplateString = DefaultSet.RenderTemplateString
- RenderTemplateFile = DefaultSet.RenderTemplateFile
-
- // Globals for the default set
- Globals = DefaultSet.Globals
-)
diff --git a/vendor/github.com/flosch/pongo2/v6/value.go b/vendor/github.com/flosch/pongo2/v6/value.go
deleted file mode 100644
index f62df808d..000000000
--- a/vendor/github.com/flosch/pongo2/v6/value.go
+++ /dev/null
@@ -1,540 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "time"
-)
-
-type Value struct {
- val reflect.Value
- safe bool // used to indicate whether a Value needs explicit escaping in the template
-}
-
-// AsValue converts any given value to a pongo2.Value
-// Usually being used within own functions passed to a template
-// through a Context or within filter functions.
-//
-// Example:
-// AsValue("my string")
-func AsValue(i any) *Value {
- return &Value{
- val: reflect.ValueOf(i),
- }
-}
-
-// AsSafeValue works like AsValue, but does not apply the 'escape' filter.
-func AsSafeValue(i any) *Value {
- return &Value{
- val: reflect.ValueOf(i),
- safe: true,
- }
-}
-
-func (v *Value) getResolvedValue() reflect.Value {
- if v.val.IsValid() && v.val.Kind() == reflect.Ptr {
- return v.val.Elem()
- }
- return v.val
-}
-
-// IsString checks whether the underlying value is a string
-func (v *Value) IsString() bool {
- return v.getResolvedValue().Kind() == reflect.String
-}
-
-// IsBool checks whether the underlying value is a bool
-func (v *Value) IsBool() bool {
- return v.getResolvedValue().Kind() == reflect.Bool
-}
-
-// IsFloat checks whether the underlying value is a float
-func (v *Value) IsFloat() bool {
- return v.getResolvedValue().Kind() == reflect.Float32 ||
- v.getResolvedValue().Kind() == reflect.Float64
-}
-
-// IsInteger checks whether the underlying value is an integer
-func (v *Value) IsInteger() bool {
- return v.getResolvedValue().Kind() == reflect.Int ||
- v.getResolvedValue().Kind() == reflect.Int8 ||
- v.getResolvedValue().Kind() == reflect.Int16 ||
- v.getResolvedValue().Kind() == reflect.Int32 ||
- v.getResolvedValue().Kind() == reflect.Int64 ||
- v.getResolvedValue().Kind() == reflect.Uint ||
- v.getResolvedValue().Kind() == reflect.Uint8 ||
- v.getResolvedValue().Kind() == reflect.Uint16 ||
- v.getResolvedValue().Kind() == reflect.Uint32 ||
- v.getResolvedValue().Kind() == reflect.Uint64
-}
-
-// IsNumber checks whether the underlying value is either an integer
-// or a float.
-func (v *Value) IsNumber() bool {
- return v.IsInteger() || v.IsFloat()
-}
-
-// IsTime checks whether the underlying value is a time.Time.
-func (v *Value) IsTime() bool {
- _, ok := v.Interface().(time.Time)
- return ok
-}
-
-// IsNil checks whether the underlying value is NIL
-func (v *Value) IsNil() bool {
- // fmt.Printf("%+v\n", v.getResolvedValue().Type().String())
- return !v.getResolvedValue().IsValid()
-}
-
-// String returns a string for the underlying value. If this value is not
-// of type string, pongo2 tries to convert it. Currently the following
-// types for underlying values are supported:
-//
-// 1. string
-// 2. int/uint (any size)
-// 3. float (any precision)
-// 4. bool
-// 5. time.Time
-// 6. String() will be called on the underlying value if provided
-//
-// NIL values will lead to an empty string. Unsupported types are leading
-// to their respective type name.
-func (v *Value) String() string {
- if v.IsNil() {
- return ""
- }
-
- if t, ok := v.Interface().(fmt.Stringer); ok {
- return t.String()
- }
-
- switch v.getResolvedValue().Kind() {
- case reflect.String:
- return v.getResolvedValue().String()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return strconv.FormatInt(v.getResolvedValue().Int(), 10)
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return strconv.FormatUint(v.getResolvedValue().Uint(), 10)
- case reflect.Float32, reflect.Float64:
- return fmt.Sprintf("%f", v.getResolvedValue().Float())
- case reflect.Bool:
- if v.Bool() {
- return "True"
- }
- return "False"
- }
-
- logf("Value.String() not implemented for type: %s\n", v.getResolvedValue().Kind().String())
- return v.getResolvedValue().String()
-}
-
-// Integer returns the underlying value as an integer (converts the underlying
-// value, if necessary). If it's not possible to convert the underlying value,
-// it will return 0.
-func (v *Value) Integer() int {
- switch v.getResolvedValue().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return int(v.getResolvedValue().Int())
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return int(v.getResolvedValue().Uint())
- case reflect.Float32, reflect.Float64:
- return int(v.getResolvedValue().Float())
- case reflect.String:
- // Try to convert from string to int (base 10)
- f, err := strconv.ParseFloat(v.getResolvedValue().String(), 64)
- if err != nil {
- return 0
- }
- return int(f)
- default:
- logf("Value.Integer() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return 0
- }
-}
-
-// Float returns the underlying value as a float (converts the underlying
-// value, if necessary). If it's not possible to convert the underlying value,
-// it will return 0.0.
-func (v *Value) Float() float64 {
- switch v.getResolvedValue().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return float64(v.getResolvedValue().Int())
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return float64(v.getResolvedValue().Uint())
- case reflect.Float32, reflect.Float64:
- return v.getResolvedValue().Float()
- case reflect.String:
- // Try to convert from string to float64 (base 10)
- f, err := strconv.ParseFloat(v.getResolvedValue().String(), 64)
- if err != nil {
- return 0.0
- }
- return f
- default:
- logf("Value.Float() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return 0.0
- }
-}
-
-// Bool returns the underlying value as bool. If the value is not bool, false
-// will always be returned. If you're looking for true/false-evaluation of the
-// underlying value, have a look on the IsTrue()-function.
-func (v *Value) Bool() bool {
- switch v.getResolvedValue().Kind() {
- case reflect.Bool:
- return v.getResolvedValue().Bool()
- default:
- logf("Value.Bool() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return false
- }
-}
-
-// Time returns the underlying value as time.Time.
-// If the underlying value is not a time.Time, it returns the zero value of time.Time.
-func (v *Value) Time() time.Time {
- tm, ok := v.Interface().(time.Time)
- if ok {
- return tm
- }
- return time.Time{}
-}
-
-// IsTrue tries to evaluate the underlying value the Pythonic-way:
-//
-// Returns TRUE in one the following cases:
-//
-// * int != 0
-// * uint != 0
-// * float != 0.0
-// * len(array/chan/map/slice/string) > 0
-// * bool == true
-// * underlying value is a struct
-//
-// Otherwise returns always FALSE.
-func (v *Value) IsTrue() bool {
- switch v.getResolvedValue().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.getResolvedValue().Int() != 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return v.getResolvedValue().Uint() != 0
- case reflect.Float32, reflect.Float64:
- return v.getResolvedValue().Float() != 0
- case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
- return v.getResolvedValue().Len() > 0
- case reflect.Bool:
- return v.getResolvedValue().Bool()
- case reflect.Struct:
- return true // struct instance is always true
- default:
- logf("Value.IsTrue() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return false
- }
-}
-
-// Negate tries to negate the underlying value. It's mainly used for
-// the NOT-operator and in conjunction with a call to
-// return_value.IsTrue() afterwards.
-//
-// Example:
-// AsValue(1).Negate().IsTrue() == false
-func (v *Value) Negate() *Value {
- switch v.getResolvedValue().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- if v.Integer() != 0 {
- return AsValue(0)
- }
- return AsValue(1)
- case reflect.Float32, reflect.Float64:
- if v.Float() != 0.0 {
- return AsValue(float64(0.0))
- }
- return AsValue(float64(1.1))
- case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
- return AsValue(v.getResolvedValue().Len() == 0)
- case reflect.Bool:
- return AsValue(!v.getResolvedValue().Bool())
- case reflect.Struct:
- return AsValue(false)
- default:
- logf("Value.IsTrue() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return AsValue(true)
- }
-}
-
-// Len returns the length for an array, chan, map, slice or string.
-// Otherwise it will return 0.
-func (v *Value) Len() int {
- switch v.getResolvedValue().Kind() {
- case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
- return v.getResolvedValue().Len()
- case reflect.String:
- runes := []rune(v.getResolvedValue().String())
- return len(runes)
- default:
- logf("Value.Len() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return 0
- }
-}
-
-// Slice slices an array, slice or string. Otherwise it will
-// return an empty []int.
-func (v *Value) Slice(i, j int) *Value {
- switch v.getResolvedValue().Kind() {
- case reflect.Array, reflect.Slice:
- return AsValue(v.getResolvedValue().Slice(i, j).Interface())
- case reflect.String:
- runes := []rune(v.getResolvedValue().String())
- return AsValue(string(runes[i:j]))
- default:
- logf("Value.Slice() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return AsValue([]int{})
- }
-}
-
-// Index gets the i-th item of an array, slice or string. Otherwise
-// it will return NIL.
-func (v *Value) Index(i int) *Value {
- switch v.getResolvedValue().Kind() {
- case reflect.Array, reflect.Slice:
- if i >= v.Len() {
- return AsValue(nil)
- }
- return AsValue(v.getResolvedValue().Index(i).Interface())
- case reflect.String:
- // return AsValue(v.getResolvedValue().Slice(i, i+1).Interface())
- s := v.getResolvedValue().String()
- runes := []rune(s)
- if i < len(runes) {
- return AsValue(string(runes[i]))
- }
- return AsValue("")
- default:
- logf("Value.Slice() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return AsValue([]int{})
- }
-}
-
-// Contains checks whether the underlying value (which must be of type struct, map,
-// string, array or slice) contains of another Value (e. g. used to check
-// whether a struct contains of a specific field or a map contains a specific key).
-//
-// Example:
-// AsValue("Hello, World!").Contains(AsValue("World")) == true
-func (v *Value) Contains(other *Value) bool {
- switch v.getResolvedValue().Kind() {
- case reflect.Struct:
- fieldValue := v.getResolvedValue().FieldByName(other.String())
- return fieldValue.IsValid()
- case reflect.Map:
- var mapValue reflect.Value
- switch other.Interface().(type) {
- case int:
- mapValue = v.getResolvedValue().MapIndex(other.getResolvedValue())
- case string:
- mapValue = v.getResolvedValue().MapIndex(other.getResolvedValue())
- default:
- logf("Value.Contains() does not support lookup type '%s'\n", other.getResolvedValue().Kind().String())
- return false
- }
-
- return mapValue.IsValid()
- case reflect.String:
- return strings.Contains(v.getResolvedValue().String(), other.String())
-
- case reflect.Slice, reflect.Array:
- for i := 0; i < v.getResolvedValue().Len(); i++ {
- item := v.getResolvedValue().Index(i)
- if other.EqualValueTo(AsValue(item.Interface())) {
- return true
- }
- }
- return false
-
- default:
- logf("Value.Contains() not available for type: %s\n", v.getResolvedValue().Kind().String())
- return false
- }
-}
-
-// CanSlice checks whether the underlying value is of type array, slice or string.
-// You normally would use CanSlice() before using the Slice() operation.
-func (v *Value) CanSlice() bool {
- switch v.getResolvedValue().Kind() {
- case reflect.Array, reflect.Slice, reflect.String:
- return true
- }
- return false
-}
-
-// Iterate iterates over a map, array, slice or a string. It calls the
-// function's first argument for every value with the following arguments:
-//
-// idx current 0-index
-// count total amount of items
-// key *Value for the key or item
-// value *Value (only for maps, the respective value for a specific key)
-//
-// If the underlying value has no items or is not one of the types above,
-// the empty function (function's second argument) will be called.
-func (v *Value) Iterate(fn func(idx, count int, key, value *Value) bool, empty func()) {
- v.IterateOrder(fn, empty, false, false)
-}
-
-// IterateOrder behaves like Value.Iterate, but can iterate through an array/slice/string in reverse. Does
-// not affect the iteration through a map because maps don't have any particular order.
-// However, you can force an order using the `sorted` keyword (and even use `reversed sorted`).
-func (v *Value) IterateOrder(fn func(idx, count int, key, value *Value) bool, empty func(), reverse bool, sorted bool) {
- switch v.getResolvedValue().Kind() {
- case reflect.Map:
- keys := sortedKeys(v.getResolvedValue().MapKeys())
- if sorted {
- if reverse {
- sort.Sort(sort.Reverse(keys))
- } else {
- sort.Sort(keys)
- }
- }
- keyLen := len(keys)
- for idx, key := range keys {
- value := v.getResolvedValue().MapIndex(key)
- if !fn(idx, keyLen, &Value{val: key}, &Value{val: value}) {
- return
- }
- }
- if keyLen == 0 {
- empty()
- }
- return // done
- case reflect.Array, reflect.Slice:
- var items valuesList
-
- itemCount := v.getResolvedValue().Len()
- for i := 0; i < itemCount; i++ {
- items = append(items, &Value{val: v.getResolvedValue().Index(i)})
- }
-
- if sorted {
- if reverse {
- sort.Sort(sort.Reverse(items))
- } else {
- sort.Sort(items)
- }
- } else {
- if reverse {
- for i := 0; i < itemCount/2; i++ {
- items[i], items[itemCount-1-i] = items[itemCount-1-i], items[i]
- }
- }
- }
-
- if len(items) > 0 {
- for idx, item := range items {
- if !fn(idx, itemCount, item, nil) {
- return
- }
- }
- } else {
- empty()
- }
- return // done
- case reflect.String:
- if sorted {
- // TODO(flosch): Handle sorted
- panic("TODO: handle sort for type string")
- }
-
- // TODO(flosch): Not utf8-compatible (utf8-decoding necessary)
- charCount := v.getResolvedValue().Len()
- if charCount > 0 {
- if reverse {
- for i := charCount - 1; i >= 0; i-- {
- if !fn(i, charCount, &Value{val: v.getResolvedValue().Slice(i, i+1)}, nil) {
- return
- }
- }
- } else {
- for i := 0; i < charCount; i++ {
- if !fn(i, charCount, &Value{val: v.getResolvedValue().Slice(i, i+1)}, nil) {
- return
- }
- }
- }
- } else {
- empty()
- }
- return // done
- default:
- logf("Value.Iterate() not available for type: %s\n", v.getResolvedValue().Kind().String())
- }
- empty()
-}
-
-// Interface gives you access to the underlying value.
-func (v *Value) Interface() any {
- if v.val.IsValid() {
- return v.val.Interface()
- }
- return nil
-}
-
-// EqualValueTo checks whether two values are containing the same value or object.
-func (v *Value) EqualValueTo(other *Value) bool {
- // comparison of uint with int fails using .Interface()-comparison (see issue #64)
- if v.IsInteger() && other.IsInteger() {
- return v.Integer() == other.Integer()
- }
- if v.IsTime() && other.IsTime() {
- return v.Time().Equal(other.Time())
- }
- return v.Interface() == other.Interface()
-}
-
-type sortedKeys []reflect.Value
-
-func (sk sortedKeys) Len() int {
- return len(sk)
-}
-
-func (sk sortedKeys) Less(i, j int) bool {
- vi := &Value{val: sk[i]}
- vj := &Value{val: sk[j]}
- switch {
- case vi.IsInteger() && vj.IsInteger():
- return vi.Integer() < vj.Integer()
- case vi.IsFloat() && vj.IsFloat():
- return vi.Float() < vj.Float()
- default:
- return vi.String() < vj.String()
- }
-}
-
-func (sk sortedKeys) Swap(i, j int) {
- sk[i], sk[j] = sk[j], sk[i]
-}
-
-type valuesList []*Value
-
-func (vl valuesList) Len() int {
- return len(vl)
-}
-
-func (vl valuesList) Less(i, j int) bool {
- vi := vl[i]
- vj := vl[j]
- switch {
- case vi.IsInteger() && vj.IsInteger():
- return vi.Integer() < vj.Integer()
- case vi.IsFloat() && vj.IsFloat():
- return vi.Float() < vj.Float()
- default:
- return vi.String() < vj.String()
- }
-}
-
-func (vl valuesList) Swap(i, j int) {
- vl[i], vl[j] = vl[j], vl[i]
-}
diff --git a/vendor/github.com/flosch/pongo2/v6/variable.go b/vendor/github.com/flosch/pongo2/v6/variable.go
deleted file mode 100644
index 31e420e4f..000000000
--- a/vendor/github.com/flosch/pongo2/v6/variable.go
+++ /dev/null
@@ -1,761 +0,0 @@
-package pongo2
-
-import (
- "fmt"
- "reflect"
- "strconv"
- "strings"
-)
-
-const (
- varTypeInt = iota
- varTypeIdent
- varTypeSubscript
- varTypeNil
-)
-
-var (
- typeOfValuePtr = reflect.TypeOf(new(Value))
- typeOfExecCtxPtr = reflect.TypeOf(new(ExecutionContext))
-)
-
-type variablePart struct {
- typ int
- s string
- i int
- subscript IEvaluator
- isNil bool
-
- isFunctionCall bool
- callingArgs []functionCallArgument // needed for a function call, represents all argument nodes (INode supports nested function calls)
-}
-
-type functionCallArgument interface {
- Evaluate(*ExecutionContext) (*Value, *Error)
-}
-
-// TODO: Add location tokens
-type stringResolver struct {
- locationToken *Token
- val string
-}
-
-type intResolver struct {
- locationToken *Token
- val int
-}
-
-type floatResolver struct {
- locationToken *Token
- val float64
-}
-
-type boolResolver struct {
- locationToken *Token
- val bool
-}
-
-type variableResolver struct {
- locationToken *Token
-
- parts []*variablePart
-}
-
-type nodeFilteredVariable struct {
- locationToken *Token
-
- resolver IEvaluator
- filterChain []*filterCall
-}
-
-type nodeVariable struct {
- locationToken *Token
- expr IEvaluator
-}
-
-type executionCtxEval struct{}
-
-func (v *nodeFilteredVariable) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := v.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (vr *variableResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := vr.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (s *stringResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := s.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (i *intResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := i.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (f *floatResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := f.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (b *boolResolver) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := b.Evaluate(ctx)
- if err != nil {
- return err
- }
- writer.WriteString(value.String())
- return nil
-}
-
-func (v *nodeFilteredVariable) GetPositionToken() *Token {
- return v.locationToken
-}
-
-func (vr *variableResolver) GetPositionToken() *Token {
- return vr.locationToken
-}
-
-func (s *stringResolver) GetPositionToken() *Token {
- return s.locationToken
-}
-
-func (i *intResolver) GetPositionToken() *Token {
- return i.locationToken
-}
-
-func (f *floatResolver) GetPositionToken() *Token {
- return f.locationToken
-}
-
-func (b *boolResolver) GetPositionToken() *Token {
- return b.locationToken
-}
-
-func (s *stringResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- return AsValue(s.val), nil
-}
-
-func (i *intResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- return AsValue(i.val), nil
-}
-
-func (f *floatResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- return AsValue(f.val), nil
-}
-
-func (b *boolResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- return AsValue(b.val), nil
-}
-
-func (s *stringResolver) FilterApplied(name string) bool {
- return false
-}
-
-func (i *intResolver) FilterApplied(name string) bool {
- return false
-}
-
-func (f *floatResolver) FilterApplied(name string) bool {
- return false
-}
-
-func (b *boolResolver) FilterApplied(name string) bool {
- return false
-}
-
-func (nv *nodeVariable) FilterApplied(name string) bool {
- return nv.expr.FilterApplied(name)
-}
-
-func (nv *nodeVariable) Execute(ctx *ExecutionContext, writer TemplateWriter) *Error {
- value, err := nv.expr.Evaluate(ctx)
- if err != nil {
- return err
- }
-
- if !nv.expr.FilterApplied("safe") && !value.safe && value.IsString() && ctx.Autoescape {
- // apply escape filter
- value, err = filters["escape"](value, nil)
- if err != nil {
- return err
- }
- }
-
- writer.WriteString(value.String())
- return nil
-}
-
-func (executionCtxEval) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- return AsValue(ctx), nil
-}
-
-func (vr *variableResolver) FilterApplied(name string) bool {
- return false
-}
-
-func (vr *variableResolver) String() string {
- parts := make([]string, 0, len(vr.parts))
- for _, p := range vr.parts {
- switch p.typ {
- case varTypeInt:
- parts = append(parts, strconv.Itoa(p.i))
- case varTypeIdent:
- parts = append(parts, p.s)
- default:
- panic("unimplemented")
- }
- }
- return strings.Join(parts, ".")
-}
-
-func (vr *variableResolver) resolve(ctx *ExecutionContext) (*Value, error) {
- var current reflect.Value
- var isSafe bool
-
- for idx, part := range vr.parts {
- if idx == 0 {
- // We're looking up the first part of the variable.
- // First we're having a look in our private
- // context (e. g. information provided by tags, like the forloop)
- val, inPrivate := ctx.Private[vr.parts[0].s]
- if !inPrivate {
- // Nothing found? Then have a final lookup in the public context
- val = ctx.Public[vr.parts[0].s]
- }
- current = reflect.ValueOf(val) // Get the initial value
- } else {
- // Next parts, resolve it from current
-
- // Before resolving the pointer, let's see if we have a method to call
- // Problem with resolving the pointer is we're changing the receiver
- isFunc := false
- if part.typ == varTypeIdent {
- funcValue := current.MethodByName(part.s)
- if funcValue.IsValid() {
- current = funcValue
- isFunc = true
- }
- }
-
- if !isFunc {
- // If current a pointer, resolve it
- if current.Kind() == reflect.Ptr {
- current = current.Elem()
- if !current.IsValid() {
- // Value is not valid (anymore)
- return AsValue(nil), nil
- }
- }
-
- // Look up which part must be called now
- switch part.typ {
- case varTypeInt:
- // Calling an index is only possible for:
- // * slices/arrays/strings
- switch current.Kind() {
- case reflect.String, reflect.Array, reflect.Slice:
- if part.i >= 0 && current.Len() > part.i {
- current = current.Index(part.i)
- } else {
- // In Django, exceeding the length of a list is just empty.
- return AsValue(nil), nil
- }
- default:
- return nil, fmt.Errorf("can't access an index on type %s (variable %s)",
- current.Kind().String(), vr.String())
- }
- case varTypeIdent:
- // debugging:
- // fmt.Printf("now = %s (kind: %s)\n", part.s, current.Kind().String())
-
- // Calling a field or key
- switch current.Kind() {
- case reflect.Struct:
- current = current.FieldByName(part.s)
- case reflect.Map:
- current = current.MapIndex(reflect.ValueOf(part.s))
- default:
- return nil, fmt.Errorf("can't access a field by name on type %s (variable %s)",
- current.Kind().String(), vr.String())
- }
- case varTypeSubscript:
- // Calling an index is only possible for:
- // * slices/arrays/strings
- switch current.Kind() {
- case reflect.String, reflect.Array, reflect.Slice:
- sv, err := part.subscript.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- si := sv.Integer()
- if si >= 0 && current.Len() > si {
- current = current.Index(si)
- } else {
- // In Django, exceeding the length of a list is just empty.
- return AsValue(nil), nil
- }
- // Calling a field or key
- case reflect.Struct:
- sv, err := part.subscript.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- current = current.FieldByName(sv.String())
- case reflect.Map:
- sv, err := part.subscript.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
- if sv.val.Type().AssignableTo(current.Type().Key()) {
- current = current.MapIndex(sv.val)
- } else {
- return AsValue(nil), nil
- }
- default:
- return nil, fmt.Errorf("can't access an index on type %s (variable %s)",
- current.Kind().String(), vr.String())
- }
- default:
- panic("unimplemented")
- }
- }
- }
-
- if !current.IsValid() {
- // Value is not valid (anymore)
- return AsValue(nil), nil
- }
-
- // If current is a reflect.ValueOf(pongo2.Value), then unpack it
- // Happens in function calls (as a return value) or by injecting
- // into the execution context (e.g. in a for-loop)
- if current.Type() == typeOfValuePtr {
- tmpValue := current.Interface().(*Value)
- current = tmpValue.val
- isSafe = tmpValue.safe
- }
-
- // Check whether this is an interface and resolve it where required
- if current.Kind() == reflect.Interface {
- current = reflect.ValueOf(current.Interface())
- }
-
- // Check if the part is a function call
- if part.isFunctionCall || current.Kind() == reflect.Func {
- // Check for callable
- if current.Kind() != reflect.Func {
- return nil, fmt.Errorf("'%s' is not a function (it is %s)", vr.String(), current.Kind().String())
- }
-
- // Check for correct function syntax and types
- // func(*Value, ...) *Value
- t := current.Type()
- currArgs := part.callingArgs
-
- // If an implicit ExecCtx is needed
- if t.NumIn() > 0 && t.In(0) == typeOfExecCtxPtr {
- currArgs = append([]functionCallArgument{executionCtxEval{}}, currArgs...)
- }
-
- // Input arguments
- if len(currArgs) != t.NumIn() && !(len(currArgs) >= t.NumIn()-1 && t.IsVariadic()) {
- return nil,
- fmt.Errorf("function input argument count (%d) of '%s' must be equal to the calling argument count (%d)",
- t.NumIn(), vr.String(), len(currArgs))
- }
-
- // Output arguments
- if t.NumOut() != 1 && t.NumOut() != 2 {
- return nil, fmt.Errorf("'%s' must have exactly 1 or 2 output arguments, the second argument must be of type error", vr.String())
- }
-
- // Evaluate all parameters
- var parameters []reflect.Value
-
- numArgs := t.NumIn()
- isVariadic := t.IsVariadic()
- var fnArg reflect.Type
-
- for idx, arg := range currArgs {
- pv, err := arg.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
-
- if isVariadic {
- if idx >= t.NumIn()-1 {
- fnArg = t.In(numArgs - 1).Elem()
- } else {
- fnArg = t.In(idx)
- }
- } else {
- fnArg = t.In(idx)
- }
-
- if fnArg != typeOfValuePtr {
- // Function's argument is not a *pongo2.Value, then we have to check whether input argument is of the same type as the function's argument
- if !isVariadic {
- if fnArg != reflect.TypeOf(pv.Interface()) && fnArg.Kind() != reflect.Interface {
- return nil, fmt.Errorf("function input argument %d of '%s' must be of type %s or *pongo2.Value (not %T)",
- idx, vr.String(), fnArg.String(), pv.Interface())
- }
- } else {
- if fnArg != reflect.TypeOf(pv.Interface()) && fnArg.Kind() != reflect.Interface {
- return nil, fmt.Errorf("function variadic input argument of '%s' must be of type %s or *pongo2.Value (not %T)",
- vr.String(), fnArg.String(), pv.Interface())
- }
- }
-
- if pv.IsNil() {
- // Workaround to present an interface nil as reflect.Value
- var empty any = nil
- parameters = append(parameters, reflect.ValueOf(&empty).Elem())
- } else {
- parameters = append(parameters, reflect.ValueOf(pv.Interface()))
- }
- } else {
- // Function's argument is a *pongo2.Value
- parameters = append(parameters, reflect.ValueOf(pv))
- }
- }
-
- // Check if any of the values are invalid
- for _, p := range parameters {
- if p.Kind() == reflect.Invalid {
- return nil, fmt.Errorf("calling a function using an invalid parameter")
- }
- }
-
- // Call it and get first return parameter back
- values := current.Call(parameters)
- rv := values[0]
- if t.NumOut() == 2 {
- e := values[1].Interface()
- if e != nil {
- err, ok := e.(error)
- if !ok {
- return nil, fmt.Errorf("the second return value is not an error")
- }
- if err != nil {
- return nil, err
- }
- }
- }
-
- if rv.Type() != typeOfValuePtr {
- current = reflect.ValueOf(rv.Interface())
- } else {
- // Return the function call value
- current = rv.Interface().(*Value).val
- isSafe = rv.Interface().(*Value).safe
- }
- }
-
- if !current.IsValid() {
- // Value is not valid (e. g. NIL value)
- return AsValue(nil), nil
- }
- }
-
- return &Value{val: current, safe: isSafe}, nil
-}
-
-func (vr *variableResolver) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- value, err := vr.resolve(ctx)
- if err != nil {
- return AsValue(nil), ctx.Error(err.Error(), vr.locationToken)
- }
- return value, nil
-}
-
-func (v *nodeFilteredVariable) FilterApplied(name string) bool {
- for _, filter := range v.filterChain {
- if filter.name == name {
- return true
- }
- }
- return false
-}
-
-func (v *nodeFilteredVariable) Evaluate(ctx *ExecutionContext) (*Value, *Error) {
- value, err := v.resolver.Evaluate(ctx)
- if err != nil {
- return nil, err
- }
-
- for _, filter := range v.filterChain {
- value, err = filter.Execute(value, ctx)
- if err != nil {
- return nil, err
- }
- }
-
- return value, nil
-}
-
-// IDENT | IDENT.(IDENT|NUMBER)... | IDENT[expr]...
-func (p *Parser) parseVariableOrLiteral() (IEvaluator, *Error) {
- t := p.Current()
-
- if t == nil {
- return nil, p.Error("Unexpected EOF, expected a number, string, keyword or identifier.", p.lastToken)
- }
-
- // Is first part a number or a string, there's nothing to resolve (because there's only to return the value then)
- switch t.Typ {
- case TokenNumber:
- p.Consume()
-
- // One exception to the rule that we don't have float64 literals is at the beginning
- // of an expression (or a variable name). Since we know we started with an integer
- // which can't obviously be a variable name, we can check whether the first number
- // is followed by dot (and then a number again). If so we're converting it to a float64.
-
- if p.Match(TokenSymbol, ".") != nil {
- // float64
- t2 := p.MatchType(TokenNumber)
- if t2 == nil {
- return nil, p.Error("Expected a number after the '.'.", nil)
- }
- f, err := strconv.ParseFloat(fmt.Sprintf("%s.%s", t.Val, t2.Val), 64)
- if err != nil {
- return nil, p.Error(err.Error(), t)
- }
- fr := &floatResolver{
- locationToken: t,
- val: f,
- }
- return fr, nil
- }
- i, err := strconv.Atoi(t.Val)
- if err != nil {
- return nil, p.Error(err.Error(), t)
- }
- nr := &intResolver{
- locationToken: t,
- val: i,
- }
- return nr, nil
-
- case TokenString:
- p.Consume()
- sr := &stringResolver{
- locationToken: t,
- val: t.Val,
- }
- return sr, nil
- case TokenKeyword:
- p.Consume()
- switch t.Val {
- case "true":
- br := &boolResolver{
- locationToken: t,
- val: true,
- }
- return br, nil
- case "false":
- br := &boolResolver{
- locationToken: t,
- val: false,
- }
- return br, nil
- default:
- return nil, p.Error("This keyword is not allowed here.", nil)
- }
- }
-
- resolver := &variableResolver{
- locationToken: t,
- }
-
- // First part of a variable MUST be an identifier
- if t.Typ != TokenIdentifier {
- return nil, p.Error("Expected either a number, string, keyword or identifier.", t)
- }
-
- resolver.parts = append(resolver.parts, &variablePart{
- typ: varTypeIdent,
- s: t.Val,
- })
-
- p.Consume() // we consumed the first identifier of the variable name
-
-variableLoop:
- for p.Remaining() > 0 {
- t = p.Current()
-
- if p.Match(TokenSymbol, ".") != nil {
- // Next variable part (can be either NUMBER or IDENT)
- t2 := p.Current()
- if t2 != nil {
- switch t2.Typ {
- case TokenIdentifier:
- resolver.parts = append(resolver.parts, &variablePart{
- typ: varTypeIdent,
- s: t2.Val,
- })
- p.Consume() // consume: IDENT
- continue variableLoop
- case TokenNumber:
- i, err := strconv.Atoi(t2.Val)
- if err != nil {
- return nil, p.Error(err.Error(), t2)
- }
- resolver.parts = append(resolver.parts, &variablePart{
- typ: varTypeInt,
- i: i,
- })
- p.Consume() // consume: NUMBER
- continue variableLoop
- case TokenNil:
- resolver.parts = append(resolver.parts, &variablePart{
- typ: varTypeNil,
- isNil: true,
- })
- p.Consume() // consume: NIL
- continue variableLoop
- default:
- return nil, p.Error("This token is not allowed within a variable name.", t2)
- }
- } else {
- // EOF
- return nil, p.Error("Unexpected EOF, expected either IDENTIFIER or NUMBER after DOT.",
- p.lastToken)
- }
- } else if p.Match(TokenSymbol, "[") != nil {
- // Variable subscript
- if p.Remaining() == 0 {
- return nil, p.Error("Unexpected EOF, expected subscript subscript.", p.lastToken)
- }
- exprSubscript, err := p.ParseExpression()
- if err != nil {
- return nil, err
- }
- resolver.parts = append(resolver.parts, &variablePart{
- typ: varTypeSubscript,
- subscript: exprSubscript,
- })
- if p.Match(TokenSymbol, "]") == nil {
- return nil, p.Error("Missing closing bracket after subscript argument.", nil)
- }
- } else if p.Match(TokenSymbol, "(") != nil {
- // Function call
- // FunctionName '(' Comma-separated list of expressions ')'
- part := resolver.parts[len(resolver.parts)-1]
- part.isFunctionCall = true
- argumentLoop:
- for {
- if p.Remaining() == 0 {
- return nil, p.Error("Unexpected EOF, expected function call argument list.", p.lastToken)
- }
-
- if p.Peek(TokenSymbol, ")") == nil {
- // No closing bracket, so we're parsing an expression
- exprArg, err := p.ParseExpression()
- if err != nil {
- return nil, err
- }
- part.callingArgs = append(part.callingArgs, exprArg)
-
- if p.Match(TokenSymbol, ")") != nil {
- // If there's a closing bracket after an expression, we will stop parsing the arguments
- break argumentLoop
- } else {
- // If there's NO closing bracket, there MUST be an comma
- if p.Match(TokenSymbol, ",") == nil {
- return nil, p.Error("Missing comma or closing bracket after argument.", nil)
- }
- }
- } else {
- // We got a closing bracket, so stop parsing arguments
- p.Consume()
- break argumentLoop
- }
-
- }
- // We're done parsing the function call, next variable part
- continue variableLoop
- }
-
- // No dot, subscript or function call? Then we're done with the variable parsing
- break
- }
-
- return resolver, nil
-}
-
-func (p *Parser) parseVariableOrLiteralWithFilter() (*nodeFilteredVariable, *Error) {
- v := &nodeFilteredVariable{
- locationToken: p.Current(),
- }
-
- // Parse the variable name
- resolver, err := p.parseVariableOrLiteral()
- if err != nil {
- return nil, err
- }
- v.resolver = resolver
-
- // Parse all the filters
-filterLoop:
- for p.Match(TokenSymbol, "|") != nil {
- // Parse one single filter
- filter, err := p.parseFilter()
- if err != nil {
- return nil, err
- }
-
- // Check sandbox filter restriction
- if _, isBanned := p.template.set.bannedFilters[filter.name]; isBanned {
- return nil, p.Error(fmt.Sprintf("Usage of filter '%s' is not allowed (sandbox restriction active).", filter.name), nil)
- }
-
- v.filterChain = append(v.filterChain, filter)
-
- continue filterLoop
- }
-
- return v, nil
-}
-
-func (p *Parser) parseVariableElement() (INode, *Error) {
- node := &nodeVariable{
- locationToken: p.Current(),
- }
-
- p.Consume() // consume '{{'
-
- expr, err := p.ParseExpression()
- if err != nil {
- return nil, err
- }
- node.expr = expr
-
- if p.Match(TokenSymbol, "}}") == nil {
- return nil, p.Error("'}}' expected", nil)
- }
-
- return node, nil
-}
diff --git a/vendor/github.com/huandu/xstrings/.gitignore b/vendor/github.com/huandu/xstrings/.gitignore
new file mode 100644
index 000000000..daf913b1b
--- /dev/null
+++ b/vendor/github.com/huandu/xstrings/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/huandu/xstrings/CONTRIBUTING.md b/vendor/github.com/huandu/xstrings/CONTRIBUTING.md
new file mode 100644
index 000000000..d7b4b8d58
--- /dev/null
+++ b/vendor/github.com/huandu/xstrings/CONTRIBUTING.md
@@ -0,0 +1,23 @@
+# Contributing #
+
+Thanks for your contribution in advance. No matter what you will contribute to this project, pull request or bug report or feature discussion, it's always highly appreciated.
+
+## New API or feature ##
+
+I want to speak more about how to add new functions to this package.
+
+Package `xstring` is a collection of useful string functions which should be implemented in Go. It's a bit subject to say which function should be included and which should not. I set up following rules in order to make it clear and as objective as possible.
+
+* Rule 1: Only string algorithm, which takes string as input, can be included.
+* Rule 2: If a function has been implemented in package `string`, it must not be included.
+* Rule 3: If a function is not language neutral, it must not be included.
+* Rule 4: If a function is a part of standard library in other languages, it can be included.
+* Rule 5: If a function is quite useful in some famous framework or library, it can be included.
+
+New function must be discussed in project issues before submitting any code. If a pull request with new functions is sent without any ref issue, it will be rejected.
+
+## Pull request ##
+
+Pull request is always welcome. Just make sure you have run `go fmt` and all test cases passed before submit.
+
+If the pull request is to add a new API or feature, don't forget to update README.md and add new API in function list.
diff --git a/vendor/github.com/huandu/xstrings/LICENSE b/vendor/github.com/huandu/xstrings/LICENSE
new file mode 100644
index 000000000..270177259
--- /dev/null
+++ b/vendor/github.com/huandu/xstrings/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Huan Du
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/huandu/xstrings/README.md b/vendor/github.com/huandu/xstrings/README.md
new file mode 100644
index 000000000..750c3c7eb
--- /dev/null
+++ b/vendor/github.com/huandu/xstrings/README.md
@@ -0,0 +1,117 @@
+# xstrings
+
+[![Build Status](https://github.com/huandu/xstrings/workflows/Go/badge.svg)](https://github.com/huandu/xstrings/actions)
+[![Go Doc](https://godoc.org/github.com/huandu/xstrings?status.svg)](https://pkg.go.dev/github.com/huandu/xstrings)
+[![Go Report](https://goreportcard.com/badge/github.com/huandu/xstrings)](https://goreportcard.com/report/github.com/huandu/xstrings)
+[![Coverage Status](https://coveralls.io/repos/github/huandu/xstrings/badge.svg?branch=master)](https://coveralls.io/github/huandu/xstrings?branch=master)
+
+Go package [xstrings](https://godoc.org/github.com/huandu/xstrings) is a collection of string functions, which are widely used in other languages but absent in Go package [strings](http://golang.org/pkg/strings).
+
+All functions are well tested and carefully tuned for performance.
+
+## Propose a new function
+
+Please review [contributing guideline](CONTRIBUTING.md) and [create new issue](https://github.com/huandu/xstrings/issues) to state why it should be included.
+
+## Install
+
+Use `go get` to install this library.
+
+ go get github.com/huandu/xstrings
+
+## API document
+
+See [GoDoc](https://godoc.org/github.com/huandu/xstrings) for full document.
+
+## Function list
+
+Go functions have a unique naming style. One, who has experience in other language but new in Go, may have difficulties to find out right string function to use.
+
+Here is a list of functions in [strings](http://golang.org/pkg/strings) and [xstrings](https://godoc.org/github.com/huandu/xstrings) with enough extra information about how to map these functions to their friends in other languages. Hope this list could be helpful for fresh gophers.
+
+### Package `xstrings` functions
+
+_Keep this table sorted by Function in ascending order._
+
+| Function | Friends | # |
+| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------- | --------------------------------------------------- |
+| [Center](https://godoc.org/github.com/huandu/xstrings#Center) | `str.center` in Python; `String#center` in Ruby | [#30](https://github.com/huandu/xstrings/issues/30) |
+| [Count](https://godoc.org/github.com/huandu/xstrings#Count) | `String#count` in Ruby | [#16](https://github.com/huandu/xstrings/issues/16) |
+| [Delete](https://godoc.org/github.com/huandu/xstrings#Delete) | `String#delete` in Ruby | [#17](https://github.com/huandu/xstrings/issues/17) |
+| [ExpandTabs](https://godoc.org/github.com/huandu/xstrings#ExpandTabs) | `str.expandtabs` in Python | [#27](https://github.com/huandu/xstrings/issues/27) |
+| [FirstRuneToLower](https://godoc.org/github.com/huandu/xstrings#FirstRuneToLower) | `lcfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) |
+| [FirstRuneToUpper](https://godoc.org/github.com/huandu/xstrings#FirstRuneToUpper) | `String#capitalize` in Ruby; `ucfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) |
+| [Insert](https://godoc.org/github.com/huandu/xstrings#Insert) | `String#insert` in Ruby | [#18](https://github.com/huandu/xstrings/issues/18) |
+| [LastPartition](https://godoc.org/github.com/huandu/xstrings#LastPartition) | `str.rpartition` in Python; `String#rpartition` in Ruby | [#19](https://github.com/huandu/xstrings/issues/19) |
+| [LeftJustify](https://godoc.org/github.com/huandu/xstrings#LeftJustify) | `str.ljust` in Python; `String#ljust` in Ruby | [#28](https://github.com/huandu/xstrings/issues/28) |
+| [Len](https://godoc.org/github.com/huandu/xstrings#Len) | `mb_strlen` in PHP | [#23](https://github.com/huandu/xstrings/issues/23) |
+| [Partition](https://godoc.org/github.com/huandu/xstrings#Partition) | `str.partition` in Python; `String#partition` in Ruby | [#10](https://github.com/huandu/xstrings/issues/10) |
+| [Reverse](https://godoc.org/github.com/huandu/xstrings#Reverse) | `String#reverse` in Ruby; `strrev` in PHP; `reverse` in Perl | [#7](https://github.com/huandu/xstrings/issues/7) |
+| [RightJustify](https://godoc.org/github.com/huandu/xstrings#RightJustify) | `str.rjust` in Python; `String#rjust` in Ruby | [#29](https://github.com/huandu/xstrings/issues/29) |
+| [RuneWidth](https://godoc.org/github.com/huandu/xstrings#RuneWidth) | - | [#27](https://github.com/huandu/xstrings/issues/27) |
+| [Scrub](https://godoc.org/github.com/huandu/xstrings#Scrub) | `String#scrub` in Ruby | [#20](https://github.com/huandu/xstrings/issues/20) |
+| [Shuffle](https://godoc.org/github.com/huandu/xstrings#Shuffle) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) |
+| [ShuffleSource](https://godoc.org/github.com/huandu/xstrings#ShuffleSource) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) |
+| [Slice](https://godoc.org/github.com/huandu/xstrings#Slice) | `mb_substr` in PHP | [#9](https://github.com/huandu/xstrings/issues/9) |
+| [Squeeze](https://godoc.org/github.com/huandu/xstrings#Squeeze) | `String#squeeze` in Ruby | [#11](https://github.com/huandu/xstrings/issues/11) |
+| [Successor](https://godoc.org/github.com/huandu/xstrings#Successor) | `String#succ` or `String#next` in Ruby | [#22](https://github.com/huandu/xstrings/issues/22) |
+| [SwapCase](https://godoc.org/github.com/huandu/xstrings#SwapCase) | `str.swapcase` in Python; `String#swapcase` in Ruby | [#12](https://github.com/huandu/xstrings/issues/12) |
+| [ToCamelCase](https://godoc.org/github.com/huandu/xstrings#ToCamelCase) | `String#camelize` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) |
+| [ToKebab](https://godoc.org/github.com/huandu/xstrings#ToKebabCase) | - | [#41](https://github.com/huandu/xstrings/issues/41) |
+| [ToSnakeCase](https://godoc.org/github.com/huandu/xstrings#ToSnakeCase) | `String#underscore` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) |
+| [Translate](https://godoc.org/github.com/huandu/xstrings#Translate) | `str.translate` in Python; `String#tr` in Ruby; `strtr` in PHP; `tr///` in Perl | [#21](https://github.com/huandu/xstrings/issues/21) |
+| [Width](https://godoc.org/github.com/huandu/xstrings#Width) | `mb_strwidth` in PHP | [#26](https://github.com/huandu/xstrings/issues/26) |
+| [WordCount](https://godoc.org/github.com/huandu/xstrings#WordCount) | `str_word_count` in PHP | [#14](https://github.com/huandu/xstrings/issues/14) |
+| [WordSplit](https://godoc.org/github.com/huandu/xstrings#WordSplit) | - | [#14](https://github.com/huandu/xstrings/issues/14) |
+
+### Package `strings` functions
+
+_Keep this table sorted by Function in ascending order._
+
+| Function | Friends |
+| --------------------------------------------------------------- | ----------------------------------------------------------------------------------- |
+| [Contains](http://golang.org/pkg/strings/#Contains) | `String#include?` in Ruby |
+| [ContainsAny](http://golang.org/pkg/strings/#ContainsAny) | - |
+| [ContainsRune](http://golang.org/pkg/strings/#ContainsRune) | - |
+| [Count](http://golang.org/pkg/strings/#Count) | `str.count` in Python; `substr_count` in PHP |
+| [EqualFold](http://golang.org/pkg/strings/#EqualFold) | `stricmp` in PHP; `String#casecmp` in Ruby |
+| [Fields](http://golang.org/pkg/strings/#Fields) | `str.split` in Python; `split` in Perl; `String#split` in Ruby |
+| [FieldsFunc](http://golang.org/pkg/strings/#FieldsFunc) | - |
+| [HasPrefix](http://golang.org/pkg/strings/#HasPrefix) | `str.startswith` in Python; `String#start_with?` in Ruby |
+| [HasSuffix](http://golang.org/pkg/strings/#HasSuffix) | `str.endswith` in Python; `String#end_with?` in Ruby |
+| [Index](http://golang.org/pkg/strings/#Index) | `str.index` in Python; `String#index` in Ruby; `strpos` in PHP; `index` in Perl |
+| [IndexAny](http://golang.org/pkg/strings/#IndexAny) | - |
+| [IndexByte](http://golang.org/pkg/strings/#IndexByte) | - |
+| [IndexFunc](http://golang.org/pkg/strings/#IndexFunc) | - |
+| [IndexRune](http://golang.org/pkg/strings/#IndexRune) | - |
+| [Join](http://golang.org/pkg/strings/#Join) | `str.join` in Python; `Array#join` in Ruby; `implode` in PHP; `join` in Perl |
+| [LastIndex](http://golang.org/pkg/strings/#LastIndex) | `str.rindex` in Python; `String#rindex`; `strrpos` in PHP; `rindex` in Perl |
+| [LastIndexAny](http://golang.org/pkg/strings/#LastIndexAny) | - |
+| [LastIndexFunc](http://golang.org/pkg/strings/#LastIndexFunc) | - |
+| [Map](http://golang.org/pkg/strings/#Map) | `String#each_codepoint` in Ruby |
+| [Repeat](http://golang.org/pkg/strings/#Repeat) | operator `*` in Python and Ruby; `str_repeat` in PHP |
+| [Replace](http://golang.org/pkg/strings/#Replace) | `str.replace` in Python; `String#sub` in Ruby; `str_replace` in PHP |
+| [Split](http://golang.org/pkg/strings/#Split) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl |
+| [SplitAfter](http://golang.org/pkg/strings/#SplitAfter) | - |
+| [SplitAfterN](http://golang.org/pkg/strings/#SplitAfterN) | - |
+| [SplitN](http://golang.org/pkg/strings/#SplitN) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl |
+| [Title](http://golang.org/pkg/strings/#Title) | `str.title` in Python |
+| [ToLower](http://golang.org/pkg/strings/#ToLower) | `str.lower` in Python; `String#downcase` in Ruby; `strtolower` in PHP; `lc` in Perl |
+| [ToLowerSpecial](http://golang.org/pkg/strings/#ToLowerSpecial) | - |
+| [ToTitle](http://golang.org/pkg/strings/#ToTitle) | - |
+| [ToTitleSpecial](http://golang.org/pkg/strings/#ToTitleSpecial) | - |
+| [ToUpper](http://golang.org/pkg/strings/#ToUpper) | `str.upper` in Python; `String#upcase` in Ruby; `strtoupper` in PHP; `uc` in Perl |
+| [ToUpperSpecial](http://golang.org/pkg/strings/#ToUpperSpecial) | - |
+| [Trim](http://golang.org/pkg/strings/#Trim) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP |
+| [TrimFunc](http://golang.org/pkg/strings/#TrimFunc) | - |
+| [TrimLeft](http://golang.org/pkg/strings/#TrimLeft) | `str.lstrip` in Python; `String#lstrip` in Ruby; `ltrim` in PHP |
+| [TrimLeftFunc](http://golang.org/pkg/strings/#TrimLeftFunc) | - |
+| [TrimPrefix](http://golang.org/pkg/strings/#TrimPrefix) | - |
+| [TrimRight](http://golang.org/pkg/strings/#TrimRight) | `str.rstrip` in Python; `String#rstrip` in Ruby; `rtrim` in PHP |
+| [TrimRightFunc](http://golang.org/pkg/strings/#TrimRightFunc) | - |
+| [TrimSpace](http://golang.org/pkg/strings/#TrimSpace) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP |
+| [TrimSuffix](http://golang.org/pkg/strings/#TrimSuffix) | `String#chomp` in Ruby; `chomp` in Perl |
+
+## License
+
+This library is licensed under MIT license. See LICENSE for details.
diff --git a/vendor/github.com/huandu/xstrings/common.go b/vendor/github.com/huandu/xstrings/common.go
new file mode 100644
index 000000000..f427cc84e
--- /dev/null
+++ b/vendor/github.com/huandu/xstrings/common.go
@@ -0,0 +1,21 @@
+// Copyright 2015 Huan Du. All rights reserved.
+// Licensed under the MIT license that can be found in the LICENSE file.
+
+package xstrings
+
+const bufferMaxInitGrowSize = 2048
+
+// Lazy initialize a buffer.
+func allocBuffer(orig, cur string) *stringBuilder {
+ output := &stringBuilder{}
+ maxSize := len(orig) * 4
+
+ // Avoid to reserve too much memory at once.
+ if maxSize > bufferMaxInitGrowSize {
+ maxSize = bufferMaxInitGrowSize
+ }
+
+ output.Grow(maxSize)
+ output.WriteString(orig[:len(orig)-len(cur)])
+ return output
+}
diff --git a/vendor/github.com/huandu/xstrings/convert.go b/vendor/github.com/huandu/xstrings/convert.go
new file mode 100644
index 000000000..151c3151d
--- /dev/null
+++ b/vendor/github.com/huandu/xstrings/convert.go
@@ -0,0 +1,590 @@
+// Copyright 2015 Huan Du. All rights reserved.
+// Licensed under the MIT license that can be found in the LICENSE file.
+
+package xstrings
+
+import (
+ "math/rand"
+ "unicode"
+ "unicode/utf8"
+)
+
+// ToCamelCase is to convert words separated by space, underscore and hyphen to camel case.
+//
+// Some samples.
+// "some_words" => "SomeWords"
+// "http_server" => "HttpServer"
+// "no_https" => "NoHttps"
+// "_complex__case_" => "_Complex_Case_"
+// "some words" => "SomeWords"
+func ToCamelCase(str string) string {
+ if len(str) == 0 {
+ return ""
+ }
+
+ buf := &stringBuilder{}
+ var r0, r1 rune
+ var size int
+
+ // leading connector will appear in output.
+ for len(str) > 0 {
+ r0, size = utf8.DecodeRuneInString(str)
+ str = str[size:]
+
+ if !isConnector(r0) {
+ r0 = unicode.ToUpper(r0)
+ break
+ }
+
+ buf.WriteRune(r0)
+ }
+
+ if len(str) == 0 {
+ // A special case for a string contains only 1 rune.
+ if size != 0 {
+ buf.WriteRune(r0)
+ }
+
+ return buf.String()
+ }
+
+ for len(str) > 0 {
+ r1 = r0
+ r0, size = utf8.DecodeRuneInString(str)
+ str = str[size:]
+
+ if isConnector(r0) && isConnector(r1) {
+ buf.WriteRune(r1)
+ continue
+ }
+
+ if isConnector(r1) {
+ r0 = unicode.ToUpper(r0)
+ } else {
+ r0 = unicode.ToLower(r0)
+ buf.WriteRune(r1)
+ }
+ }
+
+ buf.WriteRune(r0)
+ return buf.String()
+}
+
+// ToSnakeCase can convert all upper case characters in a string to
+// snake case format.
+//
+// Some samples.
+// "FirstName" => "first_name"
+// "HTTPServer" => "http_server"
+// "NoHTTPS" => "no_https"
+// "GO_PATH" => "go_path"
+// "GO PATH" => "go_path" // space is converted to underscore.
+// "GO-PATH" => "go_path" // hyphen is converted to underscore.
+// "http2xx" => "http_2xx" // insert an underscore before a number and after an alphabet.
+// "HTTP20xOK" => "http_20x_ok"
+// "Duration2m3s" => "duration_2m3s"
+// "Bld4Floor3rd" => "bld4_floor_3rd"
+func ToSnakeCase(str string) string {
+ return camelCaseToLowerCase(str, '_')
+}
+
+// ToKebabCase can convert all upper case characters in a string to
+// kebab case format.
+//
+// Some samples.
+// "FirstName" => "first-name"
+// "HTTPServer" => "http-server"
+// "NoHTTPS" => "no-https"
+// "GO_PATH" => "go-path"
+// "GO PATH" => "go-path" // space is converted to '-'.
+// "GO-PATH" => "go-path" // hyphen is converted to '-'.
+// "http2xx" => "http-2xx" // insert an underscore before a number and after an alphabet.
+// "HTTP20xOK" => "http-20x-ok"
+// "Duration2m3s" => "duration-2m3s"
+// "Bld4Floor3rd" => "bld4-floor-3rd"
+func ToKebabCase(str string) string {
+ return camelCaseToLowerCase(str, '-')
+}
+
+func camelCaseToLowerCase(str string, connector rune) string {
+ if len(str) == 0 {
+ return ""
+ }
+
+ buf := &stringBuilder{}
+ wt, word, remaining := nextWord(str)
+
+ for len(remaining) > 0 {
+ if wt != connectorWord {
+ toLower(buf, wt, word, connector)
+ }
+
+ prev := wt
+ last := word
+ wt, word, remaining = nextWord(remaining)
+
+ switch prev {
+ case numberWord:
+ for wt == alphabetWord || wt == numberWord {
+ toLower(buf, wt, word, connector)
+ wt, word, remaining = nextWord(remaining)
+ }
+
+ if wt != invalidWord && wt != punctWord && wt != connectorWord {
+ buf.WriteRune(connector)
+ }
+
+ case connectorWord:
+ toLower(buf, prev, last, connector)
+
+ case punctWord:
+ // nothing.
+
+ default:
+ if wt != numberWord {
+ if wt != connectorWord && wt != punctWord {
+ buf.WriteRune(connector)
+ }
+
+ break
+ }
+
+ if len(remaining) == 0 {
+ break
+ }
+
+ last := word
+ wt, word, remaining = nextWord(remaining)
+
+ // consider number as a part of previous word.
+ // e.g. "Bld4Floor" => "bld4_floor"
+ if wt != alphabetWord {
+ toLower(buf, numberWord, last, connector)
+
+ if wt != connectorWord && wt != punctWord {
+ buf.WriteRune(connector)
+ }
+
+ break
+ }
+
+ // if there are some lower case letters following a number,
+ // add connector before the number.
+ // e.g. "HTTP2xx" => "http_2xx"
+ buf.WriteRune(connector)
+ toLower(buf, numberWord, last, connector)
+
+ for wt == alphabetWord || wt == numberWord {
+ toLower(buf, wt, word, connector)
+ wt, word, remaining = nextWord(remaining)
+ }
+
+ if wt != invalidWord && wt != connectorWord && wt != punctWord {
+ buf.WriteRune(connector)
+ }
+ }
+ }
+
+ toLower(buf, wt, word, connector)
+ return buf.String()
+}
+
+func isConnector(r rune) bool {
+ return r == '-' || r == '_' || unicode.IsSpace(r)
+}
+
+type wordType int
+
+const (
+ invalidWord wordType = iota
+ numberWord
+ upperCaseWord
+ alphabetWord
+ connectorWord
+ punctWord
+ otherWord
+)
+
+func nextWord(str string) (wt wordType, word, remaining string) {
+ if len(str) == 0 {
+ return
+ }
+
+ var offset int
+ remaining = str
+ r, size := nextValidRune(remaining, utf8.RuneError)
+ offset += size
+
+ if r == utf8.RuneError {
+ wt = invalidWord
+ word = str[:offset]
+ remaining = str[offset:]
+ return
+ }
+
+ switch {
+ case isConnector(r):
+ wt = connectorWord
+ remaining = remaining[size:]
+
+ for len(remaining) > 0 {
+ r, size = nextValidRune(remaining, r)
+
+ if !isConnector(r) {
+ break
+ }
+
+ offset += size
+ remaining = remaining[size:]
+ }
+
+ case unicode.IsPunct(r):
+ wt = punctWord
+ remaining = remaining[size:]
+
+ for len(remaining) > 0 {
+ r, size = nextValidRune(remaining, r)
+
+ if !unicode.IsPunct(r) {
+ break
+ }
+
+ offset += size
+ remaining = remaining[size:]
+ }
+
+ case unicode.IsUpper(r):
+ wt = upperCaseWord
+ remaining = remaining[size:]
+
+ if len(remaining) == 0 {
+ break
+ }
+
+ r, size = nextValidRune(remaining, r)
+
+ switch {
+ case unicode.IsUpper(r):
+ prevSize := size
+ offset += size
+ remaining = remaining[size:]
+
+ for len(remaining) > 0 {
+ r, size = nextValidRune(remaining, r)
+
+ if !unicode.IsUpper(r) {
+ break
+ }
+
+ prevSize = size
+ offset += size
+ remaining = remaining[size:]
+ }
+
+ // it's a bit complex when dealing with a case like "HTTPStatus".
+ // it's expected to be splitted into "HTTP" and "Status".
+ // Therefore "S" should be in remaining instead of word.
+ if len(remaining) > 0 && isAlphabet(r) {
+ offset -= prevSize
+ remaining = str[offset:]
+ }
+
+ case isAlphabet(r):
+ offset += size
+ remaining = remaining[size:]
+
+ for len(remaining) > 0 {
+ r, size = nextValidRune(remaining, r)
+
+ if !isAlphabet(r) || unicode.IsUpper(r) {
+ break
+ }
+
+ offset += size
+ remaining = remaining[size:]
+ }
+ }
+
+ case isAlphabet(r):
+ wt = alphabetWord
+ remaining = remaining[size:]
+
+ for len(remaining) > 0 {
+ r, size = nextValidRune(remaining, r)
+
+ if !isAlphabet(r) || unicode.IsUpper(r) {
+ break
+ }
+
+ offset += size
+ remaining = remaining[size:]
+ }
+
+ case unicode.IsNumber(r):
+ wt = numberWord
+ remaining = remaining[size:]
+
+ for len(remaining) > 0 {
+ r, size = nextValidRune(remaining, r)
+
+ if !unicode.IsNumber(r) {
+ break
+ }
+
+ offset += size
+ remaining = remaining[size:]
+ }
+
+ default:
+ wt = otherWord
+ remaining = remaining[size:]
+
+ for len(remaining) > 0 {
+ r, size = nextValidRune(remaining, r)
+
+ if size == 0 || isConnector(r) || isAlphabet(r) || unicode.IsNumber(r) || unicode.IsPunct(r) {
+ break
+ }
+
+ offset += size
+ remaining = remaining[size:]
+ }
+ }
+
+ word = str[:offset]
+ return
+}
+
+func nextValidRune(str string, prev rune) (r rune, size int) {
+ var sz int
+
+ for len(str) > 0 {
+ r, sz = utf8.DecodeRuneInString(str)
+ size += sz
+
+ if r != utf8.RuneError {
+ return
+ }
+
+ str = str[sz:]
+ }
+
+ r = prev
+ return
+}
+
+func toLower(buf *stringBuilder, wt wordType, str string, connector rune) {
+ buf.Grow(buf.Len() + len(str))
+
+ if wt != upperCaseWord && wt != connectorWord {
+ buf.WriteString(str)
+ return
+ }
+
+ for len(str) > 0 {
+ r, size := utf8.DecodeRuneInString(str)
+ str = str[size:]
+
+ if isConnector(r) {
+ buf.WriteRune(connector)
+ } else if unicode.IsUpper(r) {
+ buf.WriteRune(unicode.ToLower(r))
+ } else {
+ buf.WriteRune(r)
+ }
+ }
+}
+
+// SwapCase will swap characters case from upper to lower or lower to upper.
+func SwapCase(str string) string {
+ var r rune
+ var size int
+
+ buf := &stringBuilder{}
+
+ for len(str) > 0 {
+ r, size = utf8.DecodeRuneInString(str)
+
+ switch {
+ case unicode.IsUpper(r):
+ buf.WriteRune(unicode.ToLower(r))
+
+ case unicode.IsLower(r):
+ buf.WriteRune(unicode.ToUpper(r))
+
+ default:
+ buf.WriteRune(r)
+ }
+
+ str = str[size:]
+ }
+
+ return buf.String()
+}
+
+// FirstRuneToUpper converts first rune to upper case if necessary.
+func FirstRuneToUpper(str string) string {
+ if str == "" {
+ return str
+ }
+
+ r, size := utf8.DecodeRuneInString(str)
+
+ if !unicode.IsLower(r) {
+ return str
+ }
+
+ buf := &stringBuilder{}
+ buf.WriteRune(unicode.ToUpper(r))
+ buf.WriteString(str[size:])
+ return buf.String()
+}
+
+// FirstRuneToLower converts first rune to lower case if necessary.
+func FirstRuneToLower(str string) string {
+ if str == "" {
+ return str
+ }
+
+ r, size := utf8.DecodeRuneInString(str)
+
+ if !unicode.IsUpper(r) {
+ return str
+ }
+
+ buf := &stringBuilder{}
+ buf.WriteRune(unicode.ToLower(r))
+ buf.WriteString(str[size:])
+ return buf.String()
+}
+
+// Shuffle randomizes runes in a string and returns the result.
+// It uses default random source in `math/rand`.
+func Shuffle(str string) string {
+ if str == "" {
+ return str
+ }
+
+ runes := []rune(str)
+ index := 0
+
+ for i := len(runes) - 1; i > 0; i-- {
+ index = rand.Intn(i + 1)
+
+ if i != index {
+ runes[i], runes[index] = runes[index], runes[i]
+ }
+ }
+
+ return string(runes)
+}
+
+// ShuffleSource randomizes runes in a string with given random source.
+func ShuffleSource(str string, src rand.Source) string {
+ if str == "" {
+ return str
+ }
+
+ runes := []rune(str)
+ index := 0
+ r := rand.New(src)
+
+ for i := len(runes) - 1; i > 0; i-- {
+ index = r.Intn(i + 1)
+
+ if i != index {
+ runes[i], runes[index] = runes[index], runes[i]
+ }
+ }
+
+ return string(runes)
+}
+
+// Successor returns the successor to string.
+//
+// If there is one alphanumeric rune is found in string, increase the rune by 1.
+// If increment generates a "carry", the rune to the left of it is incremented.
+// This process repeats until there is no carry, adding an additional rune if necessary.
+//
+// If there is no alphanumeric rune, the rightmost rune will be increased by 1
+// regardless whether the result is a valid rune or not.
+//
+// Only following characters are alphanumeric.
+// * a - z
+// * A - Z
+// * 0 - 9
+//
+// Samples (borrowed from ruby's String#succ document):
+// "abcd" => "abce"
+// "THX1138" => "THX1139"
+// "<>" => "<>"
+// "1999zzz" => "2000aaa"
+// "ZZZ9999" => "AAAA0000"
+// "***" => "**+"
+func Successor(str string) string {
+ if str == "" {
+ return str
+ }
+
+ var r rune
+ var i int
+ carry := ' '
+ runes := []rune(str)
+ l := len(runes)
+ lastAlphanumeric := l
+
+ for i = l - 1; i >= 0; i-- {
+ r = runes[i]
+
+ if ('a' <= r && r <= 'y') ||
+ ('A' <= r && r <= 'Y') ||
+ ('0' <= r && r <= '8') {
+ runes[i]++
+ carry = ' '
+ lastAlphanumeric = i
+ break
+ }
+
+ switch r {
+ case 'z':
+ runes[i] = 'a'
+ carry = 'a'
+ lastAlphanumeric = i
+
+ case 'Z':
+ runes[i] = 'A'
+ carry = 'A'
+ lastAlphanumeric = i
+
+ case '9':
+ runes[i] = '0'
+ carry = '0'
+ lastAlphanumeric = i
+ }
+ }
+
+ // Needs to add one character for carry.
+ if i < 0 && carry != ' ' {
+ buf := &stringBuilder{}
+ buf.Grow(l + 4) // Reserve enough space for write.
+
+ if lastAlphanumeric != 0 {
+ buf.WriteString(str[:lastAlphanumeric])
+ }
+
+ buf.WriteRune(carry)
+
+ for _, r = range runes[lastAlphanumeric:] {
+ buf.WriteRune(r)
+ }
+
+ return buf.String()
+ }
+
+ // No alphanumeric character. Simply increase last rune's value.
+ if lastAlphanumeric == l {
+ runes[l-1]++
+ }
+
+ return string(runes)
+}
diff --git a/vendor/github.com/huandu/xstrings/count.go b/vendor/github.com/huandu/xstrings/count.go
new file mode 100644
index 000000000..f96e38703
--- /dev/null
+++ b/vendor/github.com/huandu/xstrings/count.go
@@ -0,0 +1,120 @@
+// Copyright 2015 Huan Du. All rights reserved.
+// Licensed under the MIT license that can be found in the LICENSE file.
+
+package xstrings
+
+import (
+ "unicode"
+ "unicode/utf8"
+)
+
+// Len returns str's utf8 rune length.
+func Len(str string) int {
+ return utf8.RuneCountInString(str)
+}
+
+// WordCount returns number of words in a string.
+//
+// Word is defined as a locale dependent string containing alphabetic characters,
+// which may also contain but not start with `'` and `-` characters.
+func WordCount(str string) int {
+ var r rune
+ var size, n int
+
+ inWord := false
+
+ for len(str) > 0 {
+ r, size = utf8.DecodeRuneInString(str)
+
+ switch {
+ case isAlphabet(r):
+ if !inWord {
+ inWord = true
+ n++
+ }
+
+ case inWord && (r == '\'' || r == '-'):
+ // Still in word.
+
+ default:
+ inWord = false
+ }
+
+ str = str[size:]
+ }
+
+ return n
+}
+
+const minCJKCharacter = '\u3400'
+
+// Checks r is a letter but not CJK character.
+func isAlphabet(r rune) bool {
+ if !unicode.IsLetter(r) {
+ return false
+ }
+
+ switch {
+ // Quick check for non-CJK character.
+ case r < minCJKCharacter:
+ return true
+
+ // Common CJK characters.
+ case r >= '\u4E00' && r <= '\u9FCC':
+ return false
+
+ // Rare CJK characters.
+ case r >= '\u3400' && r <= '\u4D85':
+ return false
+
+ // Rare and historic CJK characters.
+ case r >= '\U00020000' && r <= '\U0002B81D':
+ return false
+ }
+
+ return true
+}
+
+// Width returns string width in monotype font.
+// Multi-byte characters are usually twice the width of single byte characters.
+//
+// Algorithm comes from `mb_strwidth` in PHP.
+// http://php.net/manual/en/function.mb-strwidth.php
+func Width(str string) int {
+ var r rune
+ var size, n int
+
+ for len(str) > 0 {
+ r, size = utf8.DecodeRuneInString(str)
+ n += RuneWidth(r)
+ str = str[size:]
+ }
+
+ return n
+}
+
+// RuneWidth returns character width in monotype font.
+// Multi-byte characters are usually twice the width of single byte characters.
+//
+// Algorithm comes from `mb_strwidth` in PHP.
+// http://php.net/manual/en/function.mb-strwidth.php
+func RuneWidth(r rune) int {
+ switch {
+ case r == utf8.RuneError || r < '\x20':
+ return 0
+
+ case '\x20' <= r && r < '\u2000':
+ return 1
+
+ case '\u2000' <= r && r < '\uFF61':
+ return 2
+
+ case '\uFF61' <= r && r < '\uFFA0':
+ return 1
+
+ case '\uFFA0' <= r:
+ return 2
+ }
+
+ return 0
+}
diff --git a/vendor/github.com/huandu/xstrings/doc.go b/vendor/github.com/huandu/xstrings/doc.go
new file mode 100644
index 000000000..1a6ef069f
--- /dev/null
+++ b/vendor/github.com/huandu/xstrings/doc.go
@@ -0,0 +1,8 @@
+// Copyright 2015 Huan Du. All rights reserved.
+// Licensed under the MIT license that can be found in the LICENSE file.
+
+// Package xstrings is to provide string algorithms which are useful but not included in `strings` package.
+// See project home page for details. https://github.com/huandu/xstrings
+//
+// Package xstrings assumes all strings are encoded in utf8.
+package xstrings
diff --git a/vendor/github.com/huandu/xstrings/format.go b/vendor/github.com/huandu/xstrings/format.go
new file mode 100644
index 000000000..8cd76c525
--- /dev/null
+++ b/vendor/github.com/huandu/xstrings/format.go
@@ -0,0 +1,169 @@
+// Copyright 2015 Huan Du. All rights reserved.
+// Licensed under the MIT license that can be found in the LICENSE file.
+
+package xstrings
+
+import (
+ "unicode/utf8"
+)
+
+// ExpandTabs can expand tabs ('\t') rune in str to one or more spaces dpending on
+// current column and tabSize.
+// The column number is reset to zero after each newline ('\n') occurring in the str.
+//
+// ExpandTabs uses RuneWidth to decide rune's width.
+// For example, CJK characters will be treated as two characters.
+//
+// If tabSize <= 0, ExpandTabs panics with error.
+//
+// Samples:
+// ExpandTabs("a\tbc\tdef\tghij\tk", 4) => "a bc def ghij k"
+// ExpandTabs("abcdefg\thij\nk\tl", 4) => "abcdefg hij\nk l"
+// ExpandTabs("z中\t文\tw", 4) => "z中 文 w"
+func ExpandTabs(str string, tabSize int) string {
+ if tabSize <= 0 {
+ panic("tab size must be positive")
+ }
+
+ var r rune
+ var i, size, column, expand int
+ var output *stringBuilder
+
+ orig := str
+
+ for len(str) > 0 {
+ r, size = utf8.DecodeRuneInString(str)
+
+ if r == '\t' {
+ expand = tabSize - column%tabSize
+
+ if output == nil {
+ output = allocBuffer(orig, str)
+ }
+
+ for i = 0; i < expand; i++ {
+ output.WriteRune(' ')
+ }
+
+ column += expand
+ } else {
+ if r == '\n' {
+ column = 0
+ } else {
+ column += RuneWidth(r)
+ }
+
+ if output != nil {
+ output.WriteRune(r)
+ }
+ }
+
+ str = str[size:]
+ }
+
+ if output == nil {
+ return orig
+ }
+
+ return output.String()
+}
+
+// LeftJustify returns a string with pad string at right side if str's rune length is smaller than length.
+// If str's rune length is larger than length, str itself will be returned.
+//
+// If pad is an empty string, str will be returned.
+//
+// Samples:
+// LeftJustify("hello", 4, " ") => "hello"
+// LeftJustify("hello", 10, " ") => "hello "
+// LeftJustify("hello", 10, "123") => "hello12312"
+func LeftJustify(str string, length int, pad string) string {
+ l := Len(str)
+
+ if l >= length || pad == "" {
+ return str
+ }
+
+ remains := length - l
+ padLen := Len(pad)
+
+ output := &stringBuilder{}
+ output.Grow(len(str) + (remains/padLen+1)*len(pad))
+ output.WriteString(str)
+ writePadString(output, pad, padLen, remains)
+ return output.String()
+}
+
+// RightJustify returns a string with pad string at left side if str's rune length is smaller than length.
+// If str's rune length is larger than length, str itself will be returned.
+//
+// If pad is an empty string, str will be returned.
+//
+// Samples:
+// RightJustify("hello", 4, " ") => "hello"
+// RightJustify("hello", 10, " ") => " hello"
+// RightJustify("hello", 10, "123") => "12312hello"
+func RightJustify(str string, length int, pad string) string {
+ l := Len(str)
+
+ if l >= length || pad == "" {
+ return str
+ }
+
+ remains := length - l
+ padLen := Len(pad)
+
+ output := &stringBuilder{}
+ output.Grow(len(str) + (remains/padLen+1)*len(pad))
+ writePadString(output, pad, padLen, remains)
+ output.WriteString(str)
+ return output.String()
+}
+
+// Center returns a string with pad string at both side if str's rune length is smaller than length.
+// If str's rune length is larger than length, str itself will be returned.
+//
+// If pad is an empty string, str will be returned.
+//
+// Samples:
+// Center("hello", 4, " ") => "hello"
+// Center("hello", 10, " ") => " hello "
+// Center("hello", 10, "123") => "12hello123"
+func Center(str string, length int, pad string) string {
+ l := Len(str)
+
+ if l >= length || pad == "" {
+ return str
+ }
+
+ remains := length - l
+ padLen := Len(pad)
+
+ output := &stringBuilder{}
+ output.Grow(len(str) + (remains/padLen+1)*len(pad))
+ writePadString(output, pad, padLen, remains/2)
+ output.WriteString(str)
+ writePadString(output, pad, padLen, (remains+1)/2)
+ return output.String()
+}
+
+func writePadString(output *stringBuilder, pad string, padLen, remains int) {
+ var r rune
+ var size int
+
+ repeats := remains / padLen
+
+ for i := 0; i < repeats; i++ {
+ output.WriteString(pad)
+ }
+
+ remains = remains % padLen
+
+ if remains != 0 {
+ for i := 0; i < remains; i++ {
+ r, size = utf8.DecodeRuneInString(pad)
+ output.WriteRune(r)
+ pad = pad[size:]
+ }
+ }
+}
diff --git a/vendor/github.com/huandu/xstrings/manipulate.go b/vendor/github.com/huandu/xstrings/manipulate.go
new file mode 100644
index 000000000..64075f9bb
--- /dev/null
+++ b/vendor/github.com/huandu/xstrings/manipulate.go
@@ -0,0 +1,216 @@
+// Copyright 2015 Huan Du. All rights reserved.
+// Licensed under the MIT license that can be found in the LICENSE file.
+
+package xstrings
+
+import (
+ "strings"
+ "unicode/utf8"
+)
+
+// Reverse a utf8 encoded string.
+func Reverse(str string) string {
+ var size int
+
+ tail := len(str)
+ buf := make([]byte, tail)
+ s := buf
+
+ for len(str) > 0 {
+ _, size = utf8.DecodeRuneInString(str)
+ tail -= size
+ s = append(s[:tail], []byte(str[:size])...)
+ str = str[size:]
+ }
+
+ return string(buf)
+}
+
+// Slice a string by rune.
+//
+// Start must satisfy 0 <= start <= rune length.
+//
+// End can be positive, zero or negative.
+// If end >= 0, start and end must satisfy start <= end <= rune length.
+// If end < 0, it means slice to the end of string.
+//
+// Otherwise, Slice will panic as out of range.
+func Slice(str string, start, end int) string {
+ var size, startPos, endPos int
+
+ origin := str
+
+ if start < 0 || end > len(str) || (end >= 0 && start > end) {
+ panic("out of range")
+ }
+
+ if end >= 0 {
+ end -= start
+ }
+
+ for start > 0 && len(str) > 0 {
+ _, size = utf8.DecodeRuneInString(str)
+ start--
+ startPos += size
+ str = str[size:]
+ }
+
+ if end < 0 {
+ return origin[startPos:]
+ }
+
+ endPos = startPos
+
+ for end > 0 && len(str) > 0 {
+ _, size = utf8.DecodeRuneInString(str)
+ end--
+ endPos += size
+ str = str[size:]
+ }
+
+ if len(str) == 0 && (start > 0 || end > 0) {
+ panic("out of range")
+ }
+
+ return origin[startPos:endPos]
+}
+
+// Partition splits a string by sep into three parts.
+// The return value is a slice of strings with head, match and tail.
+//
+// If str contains sep, for example "hello" and "l", Partition returns
+// "he", "l", "lo"
+//
+// If str doesn't contain sep, for example "hello" and "x", Partition returns
+// "hello", "", ""
+func Partition(str, sep string) (head, match, tail string) {
+ index := strings.Index(str, sep)
+
+ if index == -1 {
+ head = str
+ return
+ }
+
+ head = str[:index]
+ match = str[index : index+len(sep)]
+ tail = str[index+len(sep):]
+ return
+}
+
+// LastPartition splits a string by last instance of sep into three parts.
+// The return value is a slice of strings with head, match and tail.
+//
+// If str contains sep, for example "hello" and "l", LastPartition returns
+// "hel", "l", "o"
+//
+// If str doesn't contain sep, for example "hello" and "x", LastPartition returns
+// "", "", "hello"
+func LastPartition(str, sep string) (head, match, tail string) {
+ index := strings.LastIndex(str, sep)
+
+ if index == -1 {
+ tail = str
+ return
+ }
+
+ head = str[:index]
+ match = str[index : index+len(sep)]
+ tail = str[index+len(sep):]
+ return
+}
+
+// Insert src into dst at given rune index.
+// Index is counted by runes instead of bytes.
+//
+// If index is out of range of dst, panic with out of range.
+func Insert(dst, src string, index int) string {
+ return Slice(dst, 0, index) + src + Slice(dst, index, -1)
+}
+
+// Scrub scrubs invalid utf8 bytes with repl string.
+// Adjacent invalid bytes are replaced only once.
+func Scrub(str, repl string) string {
+ var buf *stringBuilder
+ var r rune
+ var size, pos int
+ var hasError bool
+
+ origin := str
+
+ for len(str) > 0 {
+ r, size = utf8.DecodeRuneInString(str)
+
+ if r == utf8.RuneError {
+ if !hasError {
+ if buf == nil {
+ buf = &stringBuilder{}
+ }
+
+ buf.WriteString(origin[:pos])
+ hasError = true
+ }
+ } else if hasError {
+ hasError = false
+ buf.WriteString(repl)
+
+ origin = origin[pos:]
+ pos = 0
+ }
+
+ pos += size
+ str = str[size:]
+ }
+
+ if buf != nil {
+ buf.WriteString(origin)
+ return buf.String()
+ }
+
+ // No invalid byte.
+ return origin
+}
+
+// WordSplit splits a string into words. Returns a slice of words.
+// If there is no word in a string, return nil.
+//
+// Word is defined as a locale dependent string containing alphabetic characters,
+// which may also contain but not start with `'` and `-` characters.
+func WordSplit(str string) []string {
+ var word string
+ var words []string
+ var r rune
+ var size, pos int
+
+ inWord := false
+
+ for len(str) > 0 {
+ r, size = utf8.DecodeRuneInString(str)
+
+ switch {
+ case isAlphabet(r):
+ if !inWord {
+ inWord = true
+ word = str
+ pos = 0
+ }
+
+ case inWord && (r == '\'' || r == '-'):
+ // Still in word.
+
+ default:
+ if inWord {
+ inWord = false
+ words = append(words, word[:pos])
+ }
+ }
+
+ pos += size
+ str = str[size:]
+ }
+
+ if inWord {
+ words = append(words, word[:pos])
+ }
+
+ return words
+}
diff --git a/vendor/github.com/huandu/xstrings/stringbuilder.go b/vendor/github.com/huandu/xstrings/stringbuilder.go
new file mode 100644
index 000000000..bb0919d32
--- /dev/null
+++ b/vendor/github.com/huandu/xstrings/stringbuilder.go
@@ -0,0 +1,7 @@
+//+build go1.10
+
+package xstrings
+
+import "strings"
+
+type stringBuilder = strings.Builder
diff --git a/vendor/github.com/huandu/xstrings/stringbuilder_go110.go b/vendor/github.com/huandu/xstrings/stringbuilder_go110.go
new file mode 100644
index 000000000..dac389d13
--- /dev/null
+++ b/vendor/github.com/huandu/xstrings/stringbuilder_go110.go
@@ -0,0 +1,9 @@
+//+build !go1.10
+
+package xstrings
+
+import "bytes"
+
+type stringBuilder struct {
+ bytes.Buffer
+}
diff --git a/vendor/github.com/huandu/xstrings/translate.go b/vendor/github.com/huandu/xstrings/translate.go
new file mode 100644
index 000000000..42e694fb1
--- /dev/null
+++ b/vendor/github.com/huandu/xstrings/translate.go
@@ -0,0 +1,546 @@
+// Copyright 2015 Huan Du. All rights reserved.
+// Licensed under the MIT license that can be found in the LICENSE file.
+
+package xstrings
+
+import (
+ "unicode"
+ "unicode/utf8"
+)
+
+type runeRangeMap struct {
+ FromLo rune // Lower bound of range map.
+ FromHi rune // An inclusive higher bound of range map.
+ ToLo rune
+ ToHi rune
+}
+
+type runeDict struct {
+ Dict [unicode.MaxASCII + 1]rune
+}
+
+type runeMap map[rune]rune
+
+// Translator can translate string with pre-compiled from and to patterns.
+// If a from/to pattern pair needs to be used more than once, it's recommended
+// to create a Translator and reuse it.
+type Translator struct {
+ quickDict *runeDict // A quick dictionary to look up rune by index. Only available for latin runes.
+ runeMap runeMap // Rune map for translation.
+ ranges []*runeRangeMap // Ranges of runes.
+ mappedRune rune // If mappedRune >= 0, all matched runes are translated to the mappedRune.
+ reverted bool // If to pattern is empty, all matched characters will be deleted.
+ hasPattern bool
+}
+
+// NewTranslator creates new Translator through a from/to pattern pair.
+func NewTranslator(from, to string) *Translator {
+ tr := &Translator{}
+
+ if from == "" {
+ return tr
+ }
+
+ reverted := from[0] == '^'
+ deletion := len(to) == 0
+
+ if reverted {
+ from = from[1:]
+ }
+
+ var fromStart, fromEnd, fromRangeStep rune
+ var toStart, toEnd, toRangeStep rune
+ var fromRangeSize, toRangeSize rune
+ var singleRunes []rune
+
+ // Update the to rune range.
+ updateRange := func() {
+ // No more rune to read in the to rune pattern.
+ if toEnd == utf8.RuneError {
+ return
+ }
+
+ if toRangeStep == 0 {
+ to, toStart, toEnd, toRangeStep = nextRuneRange(to, toEnd)
+ return
+ }
+
+ // Current range is not empty. Consume 1 rune from start.
+ if toStart != toEnd {
+ toStart += toRangeStep
+ return
+ }
+
+ // No more rune. Repeat the last rune.
+ if to == "" {
+ toEnd = utf8.RuneError
+ return
+ }
+
+ // Both start and end are used. Read two more runes from the to pattern.
+ to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError)
+ }
+
+ if deletion {
+ toStart = utf8.RuneError
+ toEnd = utf8.RuneError
+ } else {
+ // If from pattern is reverted, only the last rune in the to pattern will be used.
+ if reverted {
+ var size int
+
+ for len(to) > 0 {
+ toStart, size = utf8.DecodeRuneInString(to)
+ to = to[size:]
+ }
+
+ toEnd = utf8.RuneError
+ } else {
+ to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError)
+ }
+ }
+
+ fromEnd = utf8.RuneError
+
+ for len(from) > 0 {
+ from, fromStart, fromEnd, fromRangeStep = nextRuneRange(from, fromEnd)
+
+ // fromStart is a single character. Just map it with a rune in the to pattern.
+ if fromRangeStep == 0 {
+ singleRunes = tr.addRune(fromStart, toStart, singleRunes)
+ updateRange()
+ continue
+ }
+
+ for toEnd != utf8.RuneError && fromStart != fromEnd {
+ // If mapped rune is a single character instead of a range, simply shift first
+ // rune in the range.
+ if toRangeStep == 0 {
+ singleRunes = tr.addRune(fromStart, toStart, singleRunes)
+ updateRange()
+ fromStart += fromRangeStep
+ continue
+ }
+
+ fromRangeSize = (fromEnd - fromStart) * fromRangeStep
+ toRangeSize = (toEnd - toStart) * toRangeStep
+
+ // Not enough runes in the to pattern. Need to read more.
+ if fromRangeSize > toRangeSize {
+ fromStart, toStart = tr.addRuneRange(fromStart, fromStart+toRangeSize*fromRangeStep, toStart, toEnd, singleRunes)
+ fromStart += fromRangeStep
+ updateRange()
+
+ // Edge case: If fromRangeSize == toRangeSize + 1, the last fromStart value needs be considered
+ // as a single rune.
+ if fromStart == fromEnd {
+ singleRunes = tr.addRune(fromStart, toStart, singleRunes)
+ updateRange()
+ }
+
+ continue
+ }
+
+ fromStart, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart+fromRangeSize*toRangeStep, singleRunes)
+ updateRange()
+ break
+ }
+
+ if fromStart == fromEnd {
+ fromEnd = utf8.RuneError
+ continue
+ }
+
+ _, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart, singleRunes)
+ fromEnd = utf8.RuneError
+ }
+
+ if fromEnd != utf8.RuneError {
+ tr.addRune(fromEnd, toStart, singleRunes)
+ }
+
+ tr.reverted = reverted
+ tr.mappedRune = -1
+ tr.hasPattern = true
+
+ // Translate RuneError only if in deletion or reverted mode.
+ if deletion || reverted {
+ tr.mappedRune = toStart
+ }
+
+ return tr
+}
+
+func (tr *Translator) addRune(from, to rune, singleRunes []rune) []rune {
+ if from <= unicode.MaxASCII {
+ if tr.quickDict == nil {
+ tr.quickDict = &runeDict{}
+ }
+
+ tr.quickDict.Dict[from] = to
+ } else {
+ if tr.runeMap == nil {
+ tr.runeMap = make(runeMap)
+ }
+
+ tr.runeMap[from] = to
+ }
+
+ singleRunes = append(singleRunes, from)
+ return singleRunes
+}
+
+func (tr *Translator) addRuneRange(fromLo, fromHi, toLo, toHi rune, singleRunes []rune) (rune, rune) {
+ var r rune
+ var rrm *runeRangeMap
+
+ if fromLo < fromHi {
+ rrm = &runeRangeMap{
+ FromLo: fromLo,
+ FromHi: fromHi,
+ ToLo: toLo,
+ ToHi: toHi,
+ }
+ } else {
+ rrm = &runeRangeMap{
+ FromLo: fromHi,
+ FromHi: fromLo,
+ ToLo: toHi,
+ ToHi: toLo,
+ }
+ }
+
+ // If there is any single rune conflicts with this rune range, clear single rune record.
+ for _, r = range singleRunes {
+ if rrm.FromLo <= r && r <= rrm.FromHi {
+ if r <= unicode.MaxASCII {
+ tr.quickDict.Dict[r] = 0
+ } else {
+ delete(tr.runeMap, r)
+ }
+ }
+ }
+
+ tr.ranges = append(tr.ranges, rrm)
+ return fromHi, toHi
+}
+
+func nextRuneRange(str string, last rune) (remaining string, start, end rune, rangeStep rune) {
+ var r rune
+ var size int
+
+ remaining = str
+ escaping := false
+ isRange := false
+
+ for len(remaining) > 0 {
+ r, size = utf8.DecodeRuneInString(remaining)
+ remaining = remaining[size:]
+
+ // Parse special characters.
+ if !escaping {
+ if r == '\\' {
+ escaping = true
+ continue
+ }
+
+ if r == '-' {
+ // Ignore slash at beginning of string.
+ if last == utf8.RuneError {
+ continue
+ }
+
+ start = last
+ isRange = true
+ continue
+ }
+ }
+
+ escaping = false
+
+ if last != utf8.RuneError {
+ // This is a range which start and end are the same.
+ // Considier it as a normal character.
+ if isRange && last == r {
+ isRange = false
+ continue
+ }
+
+ start = last
+ end = r
+
+ if isRange {
+ if start < end {
+ rangeStep = 1
+ } else {
+ rangeStep = -1
+ }
+ }
+
+ return
+ }
+
+ last = r
+ }
+
+ start = last
+ end = utf8.RuneError
+ return
+}
+
+// Translate str with a from/to pattern pair.
+//
+// See comment in Translate function for usage and samples.
+func (tr *Translator) Translate(str string) string {
+ if !tr.hasPattern || str == "" {
+ return str
+ }
+
+ var r rune
+ var size int
+ var needTr bool
+
+ orig := str
+
+ var output *stringBuilder
+
+ for len(str) > 0 {
+ r, size = utf8.DecodeRuneInString(str)
+ r, needTr = tr.TranslateRune(r)
+
+ if needTr && output == nil {
+ output = allocBuffer(orig, str)
+ }
+
+ if r != utf8.RuneError && output != nil {
+ output.WriteRune(r)
+ }
+
+ str = str[size:]
+ }
+
+ // No character is translated.
+ if output == nil {
+ return orig
+ }
+
+ return output.String()
+}
+
+// TranslateRune return translated rune and true if r matches the from pattern.
+// If r doesn't match the pattern, original r is returned and translated is false.
+func (tr *Translator) TranslateRune(r rune) (result rune, translated bool) {
+ switch {
+ case tr.quickDict != nil:
+ if r <= unicode.MaxASCII {
+ result = tr.quickDict.Dict[r]
+
+ if result != 0 {
+ translated = true
+
+ if tr.mappedRune >= 0 {
+ result = tr.mappedRune
+ }
+
+ break
+ }
+ }
+
+ fallthrough
+
+ case tr.runeMap != nil:
+ var ok bool
+
+ if result, ok = tr.runeMap[r]; ok {
+ translated = true
+
+ if tr.mappedRune >= 0 {
+ result = tr.mappedRune
+ }
+
+ break
+ }
+
+ fallthrough
+
+ default:
+ var rrm *runeRangeMap
+ ranges := tr.ranges
+
+ for i := len(ranges) - 1; i >= 0; i-- {
+ rrm = ranges[i]
+
+ if rrm.FromLo <= r && r <= rrm.FromHi {
+ translated = true
+
+ if tr.mappedRune >= 0 {
+ result = tr.mappedRune
+ break
+ }
+
+ if rrm.ToLo < rrm.ToHi {
+ result = rrm.ToLo + r - rrm.FromLo
+ } else if rrm.ToLo > rrm.ToHi {
+ // ToHi can be smaller than ToLo if range is from higher to lower.
+ result = rrm.ToLo - r + rrm.FromLo
+ } else {
+ result = rrm.ToLo
+ }
+
+ break
+ }
+ }
+ }
+
+ if tr.reverted {
+ if !translated {
+ result = tr.mappedRune
+ }
+
+ translated = !translated
+ }
+
+ if !translated {
+ result = r
+ }
+
+ return
+}
+
+// HasPattern returns true if Translator has one pattern at least.
+func (tr *Translator) HasPattern() bool {
+ return tr.hasPattern
+}
+
+// Translate str with the characters defined in from replaced by characters defined in to.
+//
+// From and to are patterns representing a set of characters. Pattern is defined as following.
+//
+// * Special characters
+// * '-' means a range of runes, e.g.
+// * "a-z" means all characters from 'a' to 'z' inclusive;
+// * "z-a" means all characters from 'z' to 'a' inclusive.
+// * '^' as first character means a set of all runes excepted listed, e.g.
+// * "^a-z" means all characters except 'a' to 'z' inclusive.
+// * '\' escapes special characters.
+// * Normal character represents itself, e.g. "abc" is a set including 'a', 'b' and 'c'.
+//
+// Translate will try to find a 1:1 mapping from from to to.
+// If to is smaller than from, last rune in to will be used to map "out of range" characters in from.
+//
+// Note that '^' only works in the from pattern. It will be considered as a normal character in the to pattern.
+//
+// If the to pattern is an empty string, Translate works exactly the same as Delete.
+//
+// Samples:
+// Translate("hello", "aeiou", "12345") => "h2ll4"
+// Translate("hello", "a-z", "A-Z") => "HELLO"
+// Translate("hello", "z-a", "a-z") => "svool"
+// Translate("hello", "aeiou", "*") => "h*ll*"
+// Translate("hello", "^l", "*") => "**ll*"
+// Translate("hello ^ world", `\^lo`, "*") => "he*** * w*r*d"
+func Translate(str, from, to string) string {
+ tr := NewTranslator(from, to)
+ return tr.Translate(str)
+}
+
+// Delete runes in str matching the pattern.
+// Pattern is defined in Translate function.
+//
+// Samples:
+// Delete("hello", "aeiou") => "hll"
+// Delete("hello", "a-k") => "llo"
+// Delete("hello", "^a-k") => "he"
+func Delete(str, pattern string) string {
+ tr := NewTranslator(pattern, "")
+ return tr.Translate(str)
+}
+
+// Count how many runes in str match the pattern.
+// Pattern is defined in Translate function.
+//
+// Samples:
+// Count("hello", "aeiou") => 3
+// Count("hello", "a-k") => 3
+// Count("hello", "^a-k") => 2
+func Count(str, pattern string) int {
+ if pattern == "" || str == "" {
+ return 0
+ }
+
+ var r rune
+ var size int
+ var matched bool
+
+ tr := NewTranslator(pattern, "")
+ cnt := 0
+
+ for len(str) > 0 {
+ r, size = utf8.DecodeRuneInString(str)
+ str = str[size:]
+
+ if _, matched = tr.TranslateRune(r); matched {
+ cnt++
+ }
+ }
+
+ return cnt
+}
+
+// Squeeze deletes adjacent repeated runes in str.
+// If pattern is not empty, only runes matching the pattern will be squeezed.
+//
+// Samples:
+// Squeeze("hello", "") => "helo"
+// Squeeze("hello", "m-z") => "hello"
+// Squeeze("hello world", " ") => "hello world"
+func Squeeze(str, pattern string) string {
+ var last, r rune
+ var size int
+ var skipSqueeze, matched bool
+ var tr *Translator
+ var output *stringBuilder
+
+ orig := str
+ last = -1
+
+ if len(pattern) > 0 {
+ tr = NewTranslator(pattern, "")
+ }
+
+ for len(str) > 0 {
+ r, size = utf8.DecodeRuneInString(str)
+
+ // Need to squeeze the str.
+ if last == r && !skipSqueeze {
+ if tr != nil {
+ if _, matched = tr.TranslateRune(r); !matched {
+ skipSqueeze = true
+ }
+ }
+
+ if output == nil {
+ output = allocBuffer(orig, str)
+ }
+
+ if skipSqueeze {
+ output.WriteRune(r)
+ }
+ } else {
+ if output != nil {
+ output.WriteRune(r)
+ }
+
+ last = r
+ skipSqueeze = false
+ }
+
+ str = str[size:]
+ }
+
+ if output == nil {
+ return orig
+ }
+
+ return output.String()
+}
diff --git a/vendor/github.com/mitchellh/copystructure/.travis.yml b/vendor/github.com/mitchellh/copystructure/.travis.yml
new file mode 100644
index 000000000..d7b9589ab
--- /dev/null
+++ b/vendor/github.com/mitchellh/copystructure/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+
+go:
+ - 1.7
+ - tip
+
+script:
+ - go test
+
+matrix:
+ allow_failures:
+ - go: tip
diff --git a/vendor/github.com/mitchellh/copystructure/LICENSE b/vendor/github.com/mitchellh/copystructure/LICENSE
new file mode 100644
index 000000000..229851590
--- /dev/null
+++ b/vendor/github.com/mitchellh/copystructure/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md
new file mode 100644
index 000000000..f0fbd2e5c
--- /dev/null
+++ b/vendor/github.com/mitchellh/copystructure/README.md
@@ -0,0 +1,21 @@
+# copystructure
+
+copystructure is a Go library for deep copying values in Go.
+
+This allows you to copy Go values that may contain reference values
+such as maps, slices, or pointers, and copy their data as well instead
+of just their references.
+
+## Installation
+
+Standard `go get`:
+
+```
+$ go get github.com/mitchellh/copystructure
+```
+
+## Usage & Example
+
+For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure).
+
+The `Copy` function has examples associated with it there.
diff --git a/vendor/github.com/mitchellh/copystructure/copier_time.go b/vendor/github.com/mitchellh/copystructure/copier_time.go
new file mode 100644
index 000000000..db6a6aa1a
--- /dev/null
+++ b/vendor/github.com/mitchellh/copystructure/copier_time.go
@@ -0,0 +1,15 @@
+package copystructure
+
+import (
+ "reflect"
+ "time"
+)
+
+func init() {
+ Copiers[reflect.TypeOf(time.Time{})] = timeCopier
+}
+
+func timeCopier(v interface{}) (interface{}, error) {
+ // Just... copy it.
+ return v.(time.Time), nil
+}
diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go
new file mode 100644
index 000000000..140435255
--- /dev/null
+++ b/vendor/github.com/mitchellh/copystructure/copystructure.go
@@ -0,0 +1,548 @@
+package copystructure
+
+import (
+ "errors"
+ "reflect"
+ "sync"
+
+ "github.com/mitchellh/reflectwalk"
+)
+
+// Copy returns a deep copy of v.
+func Copy(v interface{}) (interface{}, error) {
+ return Config{}.Copy(v)
+}
+
+// CopierFunc is a function that knows how to deep copy a specific type.
+// Register these globally with the Copiers variable.
+type CopierFunc func(interface{}) (interface{}, error)
+
+// Copiers is a map of types that behave specially when they are copied.
+// If a type is found in this map while deep copying, this function
+// will be called to copy it instead of attempting to copy all fields.
+//
+// The key should be the type, obtained using: reflect.TypeOf(value with type).
+//
+// It is unsafe to write to this map after Copies have started. If you
+// are writing to this map while also copying, wrap all modifications to
+// this map as well as to Copy in a mutex.
+var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc)
+
+// Must is a helper that wraps a call to a function returning
+// (interface{}, error) and panics if the error is non-nil. It is intended
+// for use in variable initializations and should only be used when a copy
+// error should be a crashing case.
+func Must(v interface{}, err error) interface{} {
+ if err != nil {
+ panic("copy error: " + err.Error())
+ }
+
+ return v
+}
+
+var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true")
+
+type Config struct {
+ // Lock any types that are a sync.Locker and are not a mutex while copying.
+ // If there is an RLocker method, use that to get the sync.Locker.
+ Lock bool
+
+ // Copiers is a map of types associated with a CopierFunc. Use the global
+ // Copiers map if this is nil.
+ Copiers map[reflect.Type]CopierFunc
+}
+
+func (c Config) Copy(v interface{}) (interface{}, error) {
+ if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr {
+ return nil, errPointerRequired
+ }
+
+ w := new(walker)
+ if c.Lock {
+ w.useLocks = true
+ }
+
+ if c.Copiers == nil {
+ c.Copiers = Copiers
+ }
+
+ err := reflectwalk.Walk(v, w)
+ if err != nil {
+ return nil, err
+ }
+
+ // Get the result. If the result is nil, then we want to turn it
+ // into a typed nil if we can.
+ result := w.Result
+ if result == nil {
+ val := reflect.ValueOf(v)
+ result = reflect.Indirect(reflect.New(val.Type())).Interface()
+ }
+
+ return result, nil
+}
+
+// Return the key used to index interfaces types we've seen. Store the number
+// of pointers in the upper 32bits, and the depth in the lower 32bits. This is
+// easy to calculate, easy to match a key with our current depth, and we don't
+// need to deal with initializing and cleaning up nested maps or slices.
+func ifaceKey(pointers, depth int) uint64 {
+ return uint64(pointers)<<32 | uint64(depth)
+}
+
+type walker struct {
+ Result interface{}
+
+ depth int
+ ignoreDepth int
+ vals []reflect.Value
+ cs []reflect.Value
+
+ // This stores the number of pointers we've walked over, indexed by depth.
+ ps []int
+
+ // If an interface is indirected by a pointer, we need to know the type of
+ // interface to create when creating the new value. Store the interface
+ // types here, indexed by both the walk depth and the number of pointers
+ // already seen at that depth. Use ifaceKey to calculate the proper uint64
+ // value.
+ ifaceTypes map[uint64]reflect.Type
+
+ // any locks we've taken, indexed by depth
+ locks []sync.Locker
+ // take locks while walking the structure
+ useLocks bool
+}
+
+func (w *walker) Enter(l reflectwalk.Location) error {
+ w.depth++
+
+ // ensure we have enough elements to index via w.depth
+ for w.depth >= len(w.locks) {
+ w.locks = append(w.locks, nil)
+ }
+
+ for len(w.ps) < w.depth+1 {
+ w.ps = append(w.ps, 0)
+ }
+
+ return nil
+}
+
+func (w *walker) Exit(l reflectwalk.Location) error {
+ locker := w.locks[w.depth]
+ w.locks[w.depth] = nil
+ if locker != nil {
+ defer locker.Unlock()
+ }
+
+ // clear out pointers and interfaces as we exit the stack
+ w.ps[w.depth] = 0
+
+ for k := range w.ifaceTypes {
+ mask := uint64(^uint32(0))
+ if k&mask == uint64(w.depth) {
+ delete(w.ifaceTypes, k)
+ }
+ }
+
+ w.depth--
+ if w.ignoreDepth > w.depth {
+ w.ignoreDepth = 0
+ }
+
+ if w.ignoring() {
+ return nil
+ }
+
+ switch l {
+ case reflectwalk.Array:
+ fallthrough
+ case reflectwalk.Map:
+ fallthrough
+ case reflectwalk.Slice:
+ w.replacePointerMaybe()
+
+ // Pop map off our container
+ w.cs = w.cs[:len(w.cs)-1]
+ case reflectwalk.MapValue:
+ // Pop off the key and value
+ mv := w.valPop()
+ mk := w.valPop()
+ m := w.cs[len(w.cs)-1]
+
+ // If mv is the zero value, SetMapIndex deletes the key form the map,
+ // or in this case never adds it. We need to create a properly typed
+ // zero value so that this key can be set.
+ if !mv.IsValid() {
+ mv = reflect.Zero(m.Elem().Type().Elem())
+ }
+ m.Elem().SetMapIndex(mk, mv)
+ case reflectwalk.ArrayElem:
+ // Pop off the value and the index and set it on the array
+ v := w.valPop()
+ i := w.valPop().Interface().(int)
+ if v.IsValid() {
+ a := w.cs[len(w.cs)-1]
+ ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call
+ if ae.CanSet() {
+ ae.Set(v)
+ }
+ }
+ case reflectwalk.SliceElem:
+ // Pop off the value and the index and set it on the slice
+ v := w.valPop()
+ i := w.valPop().Interface().(int)
+ if v.IsValid() {
+ s := w.cs[len(w.cs)-1]
+ se := s.Elem().Index(i)
+ if se.CanSet() {
+ se.Set(v)
+ }
+ }
+ case reflectwalk.Struct:
+ w.replacePointerMaybe()
+
+ // Remove the struct from the container stack
+ w.cs = w.cs[:len(w.cs)-1]
+ case reflectwalk.StructField:
+ // Pop off the value and the field
+ v := w.valPop()
+ f := w.valPop().Interface().(reflect.StructField)
+ if v.IsValid() {
+ s := w.cs[len(w.cs)-1]
+ sf := reflect.Indirect(s).FieldByName(f.Name)
+
+ if sf.CanSet() {
+ sf.Set(v)
+ }
+ }
+ case reflectwalk.WalkLoc:
+ // Clear out the slices for GC
+ w.cs = nil
+ w.vals = nil
+ }
+
+ return nil
+}
+
+func (w *walker) Map(m reflect.Value) error {
+ if w.ignoring() {
+ return nil
+ }
+ w.lock(m)
+
+ // Create the map. If the map itself is nil, then just make a nil map
+ var newMap reflect.Value
+ if m.IsNil() {
+ newMap = reflect.New(m.Type())
+ } else {
+ newMap = wrapPtr(reflect.MakeMap(m.Type()))
+ }
+
+ w.cs = append(w.cs, newMap)
+ w.valPush(newMap)
+ return nil
+}
+
+func (w *walker) MapElem(m, k, v reflect.Value) error {
+ return nil
+}
+
+func (w *walker) PointerEnter(v bool) error {
+ if v {
+ w.ps[w.depth]++
+ }
+ return nil
+}
+
+func (w *walker) PointerExit(v bool) error {
+ if v {
+ w.ps[w.depth]--
+ }
+ return nil
+}
+
+func (w *walker) Interface(v reflect.Value) error {
+ if !v.IsValid() {
+ return nil
+ }
+ if w.ifaceTypes == nil {
+ w.ifaceTypes = make(map[uint64]reflect.Type)
+ }
+
+ w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type()
+ return nil
+}
+
+func (w *walker) Primitive(v reflect.Value) error {
+ if w.ignoring() {
+ return nil
+ }
+ w.lock(v)
+
+ // IsValid verifies the v is non-zero and CanInterface verifies
+ // that we're allowed to read this value (unexported fields).
+ var newV reflect.Value
+ if v.IsValid() && v.CanInterface() {
+ newV = reflect.New(v.Type())
+ newV.Elem().Set(v)
+ }
+
+ w.valPush(newV)
+ w.replacePointerMaybe()
+ return nil
+}
+
+func (w *walker) Slice(s reflect.Value) error {
+ if w.ignoring() {
+ return nil
+ }
+ w.lock(s)
+
+ var newS reflect.Value
+ if s.IsNil() {
+ newS = reflect.New(s.Type())
+ } else {
+ newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap()))
+ }
+
+ w.cs = append(w.cs, newS)
+ w.valPush(newS)
+ return nil
+}
+
+func (w *walker) SliceElem(i int, elem reflect.Value) error {
+ if w.ignoring() {
+ return nil
+ }
+
+ // We don't write the slice here because elem might still be
+ // arbitrarily complex. Just record the index and continue on.
+ w.valPush(reflect.ValueOf(i))
+
+ return nil
+}
+
+func (w *walker) Array(a reflect.Value) error {
+ if w.ignoring() {
+ return nil
+ }
+ w.lock(a)
+
+ newA := reflect.New(a.Type())
+
+ w.cs = append(w.cs, newA)
+ w.valPush(newA)
+ return nil
+}
+
+func (w *walker) ArrayElem(i int, elem reflect.Value) error {
+ if w.ignoring() {
+ return nil
+ }
+
+ // We don't write the array here because elem might still be
+ // arbitrarily complex. Just record the index and continue on.
+ w.valPush(reflect.ValueOf(i))
+
+ return nil
+}
+
+func (w *walker) Struct(s reflect.Value) error {
+ if w.ignoring() {
+ return nil
+ }
+ w.lock(s)
+
+ var v reflect.Value
+ if c, ok := Copiers[s.Type()]; ok {
+ // We have a Copier for this struct, so we use that copier to
+ // get the copy, and we ignore anything deeper than this.
+ w.ignoreDepth = w.depth
+
+ dup, err := c(s.Interface())
+ if err != nil {
+ return err
+ }
+
+ // We need to put a pointer to the value on the value stack,
+ // so allocate a new pointer and set it.
+ v = reflect.New(s.Type())
+ reflect.Indirect(v).Set(reflect.ValueOf(dup))
+ } else {
+ // No copier, we copy ourselves and allow reflectwalk to guide
+ // us deeper into the structure for copying.
+ v = reflect.New(s.Type())
+ }
+
+ // Push the value onto the value stack for setting the struct field,
+ // and add the struct itself to the containers stack in case we walk
+ // deeper so that its own fields can be modified.
+ w.valPush(v)
+ w.cs = append(w.cs, v)
+
+ return nil
+}
+
+func (w *walker) StructField(f reflect.StructField, v reflect.Value) error {
+ if w.ignoring() {
+ return nil
+ }
+
+ // If PkgPath is non-empty, this is a private (unexported) field.
+ // We do not set this unexported since the Go runtime doesn't allow us.
+ if f.PkgPath != "" {
+ return reflectwalk.SkipEntry
+ }
+
+ // Push the field onto the stack, we'll handle it when we exit
+ // the struct field in Exit...
+ w.valPush(reflect.ValueOf(f))
+ return nil
+}
+
+// ignore causes the walker to ignore any more values until we exit this on
+func (w *walker) ignore() {
+ w.ignoreDepth = w.depth
+}
+
+func (w *walker) ignoring() bool {
+ return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth
+}
+
+func (w *walker) pointerPeek() bool {
+ return w.ps[w.depth] > 0
+}
+
+func (w *walker) valPop() reflect.Value {
+ result := w.vals[len(w.vals)-1]
+ w.vals = w.vals[:len(w.vals)-1]
+
+ // If we're out of values, that means we popped everything off. In
+ // this case, we reset the result so the next pushed value becomes
+ // the result.
+ if len(w.vals) == 0 {
+ w.Result = nil
+ }
+
+ return result
+}
+
+func (w *walker) valPush(v reflect.Value) {
+ w.vals = append(w.vals, v)
+
+ // If we haven't set the result yet, then this is the result since
+ // it is the first (outermost) value we're seeing.
+ if w.Result == nil && v.IsValid() {
+ w.Result = v.Interface()
+ }
+}
+
+func (w *walker) replacePointerMaybe() {
+ // Determine the last pointer value. If it is NOT a pointer, then
+ // we need to push that onto the stack.
+ if !w.pointerPeek() {
+ w.valPush(reflect.Indirect(w.valPop()))
+ return
+ }
+
+ v := w.valPop()
+
+ // If the expected type is a pointer to an interface of any depth,
+ // such as *interface{}, **interface{}, etc., then we need to convert
+ // the value "v" from *CONCRETE to *interface{} so types match for
+ // Set.
+ //
+ // Example if v is type *Foo where Foo is a struct, v would become
+ // *interface{} instead. This only happens if we have an interface expectation
+ // at this depth.
+ //
+ // For more info, see GH-16
+ if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface {
+ y := reflect.New(iType) // Create *interface{}
+ y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced)
+ v = y // v is now typed *interface{} (where *v = Foo)
+ }
+
+ for i := 1; i < w.ps[w.depth]; i++ {
+ if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok {
+ iface := reflect.New(iType).Elem()
+ iface.Set(v)
+ v = iface
+ }
+
+ p := reflect.New(v.Type())
+ p.Elem().Set(v)
+ v = p
+ }
+
+ w.valPush(v)
+}
+
+// if this value is a Locker, lock it and add it to the locks slice
+func (w *walker) lock(v reflect.Value) {
+ if !w.useLocks {
+ return
+ }
+
+ if !v.IsValid() || !v.CanInterface() {
+ return
+ }
+
+ type rlocker interface {
+ RLocker() sync.Locker
+ }
+
+ var locker sync.Locker
+
+ // We can't call Interface() on a value directly, since that requires
+ // a copy. This is OK, since the pointer to a value which is a sync.Locker
+ // is also a sync.Locker.
+ if v.Kind() == reflect.Ptr {
+ switch l := v.Interface().(type) {
+ case rlocker:
+ // don't lock a mutex directly
+ if _, ok := l.(*sync.RWMutex); !ok {
+ locker = l.RLocker()
+ }
+ case sync.Locker:
+ locker = l
+ }
+ } else if v.CanAddr() {
+ switch l := v.Addr().Interface().(type) {
+ case rlocker:
+ // don't lock a mutex directly
+ if _, ok := l.(*sync.RWMutex); !ok {
+ locker = l.RLocker()
+ }
+ case sync.Locker:
+ locker = l
+ }
+ }
+
+ // still no callable locker
+ if locker == nil {
+ return
+ }
+
+ // don't lock a mutex directly
+ switch locker.(type) {
+ case *sync.Mutex, *sync.RWMutex:
+ return
+ }
+
+ locker.Lock()
+ w.locks[w.depth] = locker
+}
+
+// wrapPtr is a helper that takes v and always make it *v. copystructure
+// stores things internally as pointers until the last moment before unwrapping
+func wrapPtr(v reflect.Value) reflect.Value {
+ if !v.IsValid() {
+ return v
+ }
+ vPtr := reflect.New(v.Type())
+ vPtr.Elem().Set(v)
+ return vPtr
+}
diff --git a/vendor/github.com/mitchellh/reflectwalk/.travis.yml b/vendor/github.com/mitchellh/reflectwalk/.travis.yml
new file mode 100644
index 000000000..4f2ee4d97
--- /dev/null
+++ b/vendor/github.com/mitchellh/reflectwalk/.travis.yml
@@ -0,0 +1 @@
+language: go
diff --git a/vendor/github.com/mitchellh/reflectwalk/LICENSE b/vendor/github.com/mitchellh/reflectwalk/LICENSE
new file mode 100644
index 000000000..f9c841a51
--- /dev/null
+++ b/vendor/github.com/mitchellh/reflectwalk/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/reflectwalk/README.md b/vendor/github.com/mitchellh/reflectwalk/README.md
new file mode 100644
index 000000000..ac82cd2e1
--- /dev/null
+++ b/vendor/github.com/mitchellh/reflectwalk/README.md
@@ -0,0 +1,6 @@
+# reflectwalk
+
+reflectwalk is a Go library for "walking" a value in Go using reflection,
+in the same way a directory tree can be "walked" on the filesystem. Walking
+a complex structure can allow you to do manipulations on unknown structures
+such as those decoded from JSON.
diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go
new file mode 100644
index 000000000..6a7f17611
--- /dev/null
+++ b/vendor/github.com/mitchellh/reflectwalk/location.go
@@ -0,0 +1,19 @@
+package reflectwalk
+
+//go:generate stringer -type=Location location.go
+
+type Location uint
+
+const (
+ None Location = iota
+ Map
+ MapKey
+ MapValue
+ Slice
+ SliceElem
+ Array
+ ArrayElem
+ Struct
+ StructField
+ WalkLoc
+)
diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go
new file mode 100644
index 000000000..70760cf4c
--- /dev/null
+++ b/vendor/github.com/mitchellh/reflectwalk/location_string.go
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type=Location location.go"; DO NOT EDIT.
+
+package reflectwalk
+
+import "fmt"
+
+const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemArrayArrayElemStructStructFieldWalkLoc"
+
+var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 40, 49, 55, 66, 73}
+
+func (i Location) String() string {
+ if i >= Location(len(_Location_index)-1) {
+ return fmt.Sprintf("Location(%d)", i)
+ }
+ return _Location_name[_Location_index[i]:_Location_index[i+1]]
+}
diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go
new file mode 100644
index 000000000..d7ab7b6d7
--- /dev/null
+++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go
@@ -0,0 +1,401 @@
+// reflectwalk is a package that allows you to "walk" complex structures
+// similar to how you may "walk" a filesystem: visiting every element one
+// by one and calling callback functions allowing you to handle and manipulate
+// those elements.
+package reflectwalk
+
+import (
+ "errors"
+ "reflect"
+)
+
+// PrimitiveWalker implementations are able to handle primitive values
+// within complex structures. Primitive values are numbers, strings,
+// booleans, funcs, chans.
+//
+// These primitive values are often members of more complex
+// structures (slices, maps, etc.) that are walkable by other interfaces.
+type PrimitiveWalker interface {
+ Primitive(reflect.Value) error
+}
+
+// InterfaceWalker implementations are able to handle interface values as they
+// are encountered during the walk.
+type InterfaceWalker interface {
+ Interface(reflect.Value) error
+}
+
+// MapWalker implementations are able to handle individual elements
+// found within a map structure.
+type MapWalker interface {
+ Map(m reflect.Value) error
+ MapElem(m, k, v reflect.Value) error
+}
+
+// SliceWalker implementations are able to handle slice elements found
+// within complex structures.
+type SliceWalker interface {
+ Slice(reflect.Value) error
+ SliceElem(int, reflect.Value) error
+}
+
+// ArrayWalker implementations are able to handle array elements found
+// within complex structures.
+type ArrayWalker interface {
+ Array(reflect.Value) error
+ ArrayElem(int, reflect.Value) error
+}
+
+// StructWalker is an interface that has methods that are called for
+// structs when a Walk is done.
+type StructWalker interface {
+ Struct(reflect.Value) error
+ StructField(reflect.StructField, reflect.Value) error
+}
+
+// EnterExitWalker implementations are notified before and after
+// they walk deeper into complex structures (into struct fields,
+// into slice elements, etc.)
+type EnterExitWalker interface {
+ Enter(Location) error
+ Exit(Location) error
+}
+
+// PointerWalker implementations are notified when the value they're
+// walking is a pointer or not. Pointer is called for _every_ value whether
+// it is a pointer or not.
+type PointerWalker interface {
+ PointerEnter(bool) error
+ PointerExit(bool) error
+}
+
+// SkipEntry can be returned from walk functions to skip walking
+// the value of this field. This is only valid in the following functions:
+//
+// - Struct: skips all fields from being walked
+// - StructField: skips walking the struct value
+//
+var SkipEntry = errors.New("skip this entry")
+
+// Walk takes an arbitrary value and an interface and traverses the
+// value, calling callbacks on the interface if they are supported.
+// The interface should implement one or more of the walker interfaces
+// in this package, such as PrimitiveWalker, StructWalker, etc.
+func Walk(data, walker interface{}) (err error) {
+ v := reflect.ValueOf(data)
+ ew, ok := walker.(EnterExitWalker)
+ if ok {
+ err = ew.Enter(WalkLoc)
+ }
+
+ if err == nil {
+ err = walk(v, walker)
+ }
+
+ if ok && err == nil {
+ err = ew.Exit(WalkLoc)
+ }
+
+ return
+}
+
+func walk(v reflect.Value, w interface{}) (err error) {
+ // Determine if we're receiving a pointer and if so notify the walker.
+ // The logic here is convoluted but very important (tests will fail if
+ // almost any part is changed). I will try to explain here.
+ //
+ // First, we check if the value is an interface, if so, we really need
+ // to check the interface's VALUE to see whether it is a pointer.
+ //
+ // Check whether the value is then a pointer. If so, then set pointer
+ // to true to notify the user.
+ //
+ // If we still have a pointer or an interface after the indirections, then
+ // we unwrap another level
+ //
+ // At this time, we also set "v" to be the dereferenced value. This is
+ // because once we've unwrapped the pointer we want to use that value.
+ pointer := false
+ pointerV := v
+
+ for {
+ if pointerV.Kind() == reflect.Interface {
+ if iw, ok := w.(InterfaceWalker); ok {
+ if err = iw.Interface(pointerV); err != nil {
+ return
+ }
+ }
+
+ pointerV = pointerV.Elem()
+ }
+
+ if pointerV.Kind() == reflect.Ptr {
+ pointer = true
+ v = reflect.Indirect(pointerV)
+ }
+ if pw, ok := w.(PointerWalker); ok {
+ if err = pw.PointerEnter(pointer); err != nil {
+ return
+ }
+
+ defer func(pointer bool) {
+ if err != nil {
+ return
+ }
+
+ err = pw.PointerExit(pointer)
+ }(pointer)
+ }
+
+ if pointer {
+ pointerV = v
+ }
+ pointer = false
+
+ // If we still have a pointer or interface we have to indirect another level.
+ switch pointerV.Kind() {
+ case reflect.Ptr, reflect.Interface:
+ continue
+ }
+ break
+ }
+
+ // We preserve the original value here because if it is an interface
+ // type, we want to pass that directly into the walkPrimitive, so that
+ // we can set it.
+ originalV := v
+ if v.Kind() == reflect.Interface {
+ v = v.Elem()
+ }
+
+ k := v.Kind()
+ if k >= reflect.Int && k <= reflect.Complex128 {
+ k = reflect.Int
+ }
+
+ switch k {
+ // Primitives
+ case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid:
+ err = walkPrimitive(originalV, w)
+ return
+ case reflect.Map:
+ err = walkMap(v, w)
+ return
+ case reflect.Slice:
+ err = walkSlice(v, w)
+ return
+ case reflect.Struct:
+ err = walkStruct(v, w)
+ return
+ case reflect.Array:
+ err = walkArray(v, w)
+ return
+ default:
+ panic("unsupported type: " + k.String())
+ }
+}
+
+func walkMap(v reflect.Value, w interface{}) error {
+ ew, ewok := w.(EnterExitWalker)
+ if ewok {
+ ew.Enter(Map)
+ }
+
+ if mw, ok := w.(MapWalker); ok {
+ if err := mw.Map(v); err != nil {
+ return err
+ }
+ }
+
+ for _, k := range v.MapKeys() {
+ kv := v.MapIndex(k)
+
+ if mw, ok := w.(MapWalker); ok {
+ if err := mw.MapElem(v, k, kv); err != nil {
+ return err
+ }
+ }
+
+ ew, ok := w.(EnterExitWalker)
+ if ok {
+ ew.Enter(MapKey)
+ }
+
+ if err := walk(k, w); err != nil {
+ return err
+ }
+
+ if ok {
+ ew.Exit(MapKey)
+ ew.Enter(MapValue)
+ }
+
+ if err := walk(kv, w); err != nil {
+ return err
+ }
+
+ if ok {
+ ew.Exit(MapValue)
+ }
+ }
+
+ if ewok {
+ ew.Exit(Map)
+ }
+
+ return nil
+}
+
+func walkPrimitive(v reflect.Value, w interface{}) error {
+ if pw, ok := w.(PrimitiveWalker); ok {
+ return pw.Primitive(v)
+ }
+
+ return nil
+}
+
+func walkSlice(v reflect.Value, w interface{}) (err error) {
+ ew, ok := w.(EnterExitWalker)
+ if ok {
+ ew.Enter(Slice)
+ }
+
+ if sw, ok := w.(SliceWalker); ok {
+ if err := sw.Slice(v); err != nil {
+ return err
+ }
+ }
+
+ for i := 0; i < v.Len(); i++ {
+ elem := v.Index(i)
+
+ if sw, ok := w.(SliceWalker); ok {
+ if err := sw.SliceElem(i, elem); err != nil {
+ return err
+ }
+ }
+
+ ew, ok := w.(EnterExitWalker)
+ if ok {
+ ew.Enter(SliceElem)
+ }
+
+ if err := walk(elem, w); err != nil {
+ return err
+ }
+
+ if ok {
+ ew.Exit(SliceElem)
+ }
+ }
+
+ ew, ok = w.(EnterExitWalker)
+ if ok {
+ ew.Exit(Slice)
+ }
+
+ return nil
+}
+
+func walkArray(v reflect.Value, w interface{}) (err error) {
+ ew, ok := w.(EnterExitWalker)
+ if ok {
+ ew.Enter(Array)
+ }
+
+ if aw, ok := w.(ArrayWalker); ok {
+ if err := aw.Array(v); err != nil {
+ return err
+ }
+ }
+
+ for i := 0; i < v.Len(); i++ {
+ elem := v.Index(i)
+
+ if aw, ok := w.(ArrayWalker); ok {
+ if err := aw.ArrayElem(i, elem); err != nil {
+ return err
+ }
+ }
+
+ ew, ok := w.(EnterExitWalker)
+ if ok {
+ ew.Enter(ArrayElem)
+ }
+
+ if err := walk(elem, w); err != nil {
+ return err
+ }
+
+ if ok {
+ ew.Exit(ArrayElem)
+ }
+ }
+
+ ew, ok = w.(EnterExitWalker)
+ if ok {
+ ew.Exit(Array)
+ }
+
+ return nil
+}
+
+func walkStruct(v reflect.Value, w interface{}) (err error) {
+ ew, ewok := w.(EnterExitWalker)
+ if ewok {
+ ew.Enter(Struct)
+ }
+
+ skip := false
+ if sw, ok := w.(StructWalker); ok {
+ err = sw.Struct(v)
+ if err == SkipEntry {
+ skip = true
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+
+ if !skip {
+ vt := v.Type()
+ for i := 0; i < vt.NumField(); i++ {
+ sf := vt.Field(i)
+ f := v.FieldByIndex([]int{i})
+
+ if sw, ok := w.(StructWalker); ok {
+ err = sw.StructField(sf, f)
+
+ // SkipEntry just pretends this field doesn't even exist
+ if err == SkipEntry {
+ continue
+ }
+
+ if err != nil {
+ return
+ }
+ }
+
+ ew, ok := w.(EnterExitWalker)
+ if ok {
+ ew.Enter(StructField)
+ }
+
+ err = walk(f, w)
+ if err != nil {
+ return
+ }
+
+ if ok {
+ ew.Exit(StructField)
+ }
+ }
+ }
+
+ if ewok {
+ ew.Exit(Struct)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/schollz/progressbar/v3/.golangci.yml b/vendor/github.com/schollz/progressbar/v3/.golangci.yml
new file mode 100644
index 000000000..8c45095d2
--- /dev/null
+++ b/vendor/github.com/schollz/progressbar/v3/.golangci.yml
@@ -0,0 +1,21 @@
+run:
+ timeout: 5m
+ exclude-dirs:
+ - vendor
+ - examples
+
+linters:
+ enable:
+ - errcheck
+ - gocyclo
+ - gofmt
+ - goimports
+ - gosimple
+ - govet
+ - ineffassign
+ - staticcheck
+ - unused
+
+linters-settings:
+ gocyclo:
+ min-complexity: 20
\ No newline at end of file
diff --git a/vendor/github.com/schollz/progressbar/v3/progressbar.go b/vendor/github.com/schollz/progressbar/v3/progressbar.go
index cef46294d..e3bd67b31 100644
--- a/vendor/github.com/schollz/progressbar/v3/progressbar.go
+++ b/vendor/github.com/schollz/progressbar/v3/progressbar.go
@@ -494,7 +494,13 @@ func (p *ProgressBar) Reset() {
// Finish will fill the bar to full
func (p *ProgressBar) Finish() error {
- return p.Set64(p.config.max)
+ p.lock.Lock()
+ p.state.currentNum = p.config.max
+ if !p.config.ignoreLength {
+ p.state.currentBytes = float64(p.config.max)
+ }
+ p.lock.Unlock()
+ return p.Add(0)
}
// Exit will exit the bar to keep current state
diff --git a/vendor/github.com/shopspring/decimal/.gitignore b/vendor/github.com/shopspring/decimal/.gitignore
new file mode 100644
index 000000000..8a43ce9d7
--- /dev/null
+++ b/vendor/github.com/shopspring/decimal/.gitignore
@@ -0,0 +1,6 @@
+.git
+*.swp
+
+# IntelliJ
+.idea/
+*.iml
diff --git a/vendor/github.com/shopspring/decimal/.travis.yml b/vendor/github.com/shopspring/decimal/.travis.yml
new file mode 100644
index 000000000..55d42b289
--- /dev/null
+++ b/vendor/github.com/shopspring/decimal/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+
+go:
+ - 1.7.x
+ - 1.12.x
+ - 1.13.x
+ - tip
+
+install:
+ - go build .
+
+script:
+ - go test -v
diff --git a/vendor/github.com/shopspring/decimal/CHANGELOG.md b/vendor/github.com/shopspring/decimal/CHANGELOG.md
new file mode 100644
index 000000000..01ba02feb
--- /dev/null
+++ b/vendor/github.com/shopspring/decimal/CHANGELOG.md
@@ -0,0 +1,19 @@
+## Decimal v1.2.0
+
+#### BREAKING
+- Drop support for Go version older than 1.7 [#172](https://github.com/shopspring/decimal/pull/172)
+
+#### FEATURES
+- Add NewFromInt and NewFromInt32 initializers [#72](https://github.com/shopspring/decimal/pull/72)
+- Add support for Go modules [#157](https://github.com/shopspring/decimal/pull/157)
+- Add BigInt, BigFloat helper methods [#171](https://github.com/shopspring/decimal/pull/171)
+
+#### ENHANCEMENTS
+- Memory usage optimization [#160](https://github.com/shopspring/decimal/pull/160)
+- Updated travis CI golang versions [#156](https://github.com/shopspring/decimal/pull/156)
+- Update documentation [#173](https://github.com/shopspring/decimal/pull/173)
+- Improve code quality [#174](https://github.com/shopspring/decimal/pull/174)
+
+#### BUGFIXES
+- Revert remove insignificant digits [#159](https://github.com/shopspring/decimal/pull/159)
+- Remove 15 interval for RoundCash [#166](https://github.com/shopspring/decimal/pull/166)
diff --git a/vendor/github.com/shopspring/decimal/LICENSE b/vendor/github.com/shopspring/decimal/LICENSE
new file mode 100644
index 000000000..ad2148aaf
--- /dev/null
+++ b/vendor/github.com/shopspring/decimal/LICENSE
@@ -0,0 +1,45 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Spring, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+- Based on https://github.com/oguzbilgic/fpd, which has the following license:
+"""
+The MIT License (MIT)
+
+Copyright (c) 2013 Oguz Bilgic
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+"""
diff --git a/vendor/github.com/shopspring/decimal/README.md b/vendor/github.com/shopspring/decimal/README.md
new file mode 100644
index 000000000..b70f90159
--- /dev/null
+++ b/vendor/github.com/shopspring/decimal/README.md
@@ -0,0 +1,130 @@
+# decimal
+
+[![Build Status](https://travis-ci.org/shopspring/decimal.png?branch=master)](https://travis-ci.org/shopspring/decimal) [![GoDoc](https://godoc.org/github.com/shopspring/decimal?status.svg)](https://godoc.org/github.com/shopspring/decimal) [![Go Report Card](https://goreportcard.com/badge/github.com/shopspring/decimal)](https://goreportcard.com/report/github.com/shopspring/decimal)
+
+Arbitrary-precision fixed-point decimal numbers in go.
+
+_Note:_ Decimal library can "only" represent numbers with a maximum of 2^31 digits after the decimal point.
+
+## Features
+
+ * The zero-value is 0, and is safe to use without initialization
+ * Addition, subtraction, multiplication with no loss of precision
+ * Division with specified precision
+ * Database/sql serialization/deserialization
+ * JSON and XML serialization/deserialization
+
+## Install
+
+Run `go get github.com/shopspring/decimal`
+
+## Requirements
+
+Decimal library requires Go version `>=1.7`
+
+## Usage
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/shopspring/decimal"
+)
+
+func main() {
+ price, err := decimal.NewFromString("136.02")
+ if err != nil {
+ panic(err)
+ }
+
+ quantity := decimal.NewFromInt(3)
+
+ fee, _ := decimal.NewFromString(".035")
+ taxRate, _ := decimal.NewFromString(".08875")
+
+ subtotal := price.Mul(quantity)
+
+ preTax := subtotal.Mul(fee.Add(decimal.NewFromFloat(1)))
+
+ total := preTax.Mul(taxRate.Add(decimal.NewFromFloat(1)))
+
+ fmt.Println("Subtotal:", subtotal) // Subtotal: 408.06
+ fmt.Println("Pre-tax:", preTax) // Pre-tax: 422.3421
+ fmt.Println("Taxes:", total.Sub(preTax)) // Taxes: 37.482861375
+ fmt.Println("Total:", total) // Total: 459.824961375
+ fmt.Println("Tax rate:", total.Sub(preTax).Div(preTax)) // Tax rate: 0.08875
+}
+```
+
+## Documentation
+
+http://godoc.org/github.com/shopspring/decimal
+
+## Production Usage
+
+* [Spring](https://shopspring.com/), since August 14, 2014.
+* If you are using this in production, please let us know!
+
+## FAQ
+
+#### Why don't you just use float64?
+
+Because float64 (or any binary floating point type, actually) can't represent
+numbers such as `0.1` exactly.
+
+Consider this code: http://play.golang.org/p/TQBd4yJe6B You might expect that
+it prints out `10`, but it actually prints `9.999999999999831`. Over time,
+these small errors can really add up!
+
+#### Why don't you just use big.Rat?
+
+big.Rat is fine for representing rational numbers, but Decimal is better for
+representing money. Why? Here's a (contrived) example:
+
+Let's say you use big.Rat, and you have two numbers, x and y, both
+representing 1/3, and you have `z = 1 - x - y = 1/3`. If you print each one
+out, the string output has to stop somewhere (let's say it stops at 3 decimal
+digits, for simplicity), so you'll get 0.333, 0.333, and 0.333. But where did
+the other 0.001 go?
+
+Here's the above example as code: http://play.golang.org/p/lCZZs0w9KE
+
+With Decimal, the strings being printed out represent the number exactly. So,
+if you have `x = y = 1/3` (with precision 3), they will actually be equal to
+0.333, and when you do `z = 1 - x - y`, `z` will be equal to .334. No money is
+unaccounted for!
+
+You still have to be careful. If you want to split a number `N` 3 ways, you
+can't just send `N/3` to three different people. You have to pick one to send
+`N - (2/3*N)` to. That person will receive the fraction of a penny remainder.
+
+But, it is much easier to be careful with Decimal than with big.Rat.
+
+#### Why isn't the API similar to big.Int's?
+
+big.Int's API is built to reduce the number of memory allocations for maximal
+performance. This makes sense for its use-case, but the trade-off is that the
+API is awkward and easy to misuse.
+
+For example, to add two big.Ints, you do: `z := new(big.Int).Add(x, y)`. A
+developer unfamiliar with this API might try to do `z := a.Add(a, b)`. This
+modifies `a` and sets `z` as an alias for `a`, which they might not expect. It
+also modifies any other aliases to `a`.
+
+Here's an example of the subtle bugs you can introduce with big.Int's API:
+https://play.golang.org/p/x2R_78pa8r
+
+In contrast, it's difficult to make such mistakes with decimal. Decimals
+behave like other go numbers types: even though `a = b` will not deep copy
+`b` into `a`, it is impossible to modify a Decimal, since all Decimal methods
+return new Decimals and do not modify the originals. The downside is that
+this causes extra allocations, so Decimal is less performant. My assumption
+is that if you're using Decimals, you probably care more about correctness
+than performance.
+
+## License
+
+The MIT License (MIT)
+
+This is a heavily modified fork of [fpd.Decimal](https://github.com/oguzbilgic/fpd), which was also released under the MIT License.
diff --git a/vendor/github.com/shopspring/decimal/decimal-go.go b/vendor/github.com/shopspring/decimal/decimal-go.go
new file mode 100644
index 000000000..9958d6902
--- /dev/null
+++ b/vendor/github.com/shopspring/decimal/decimal-go.go
@@ -0,0 +1,415 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Multiprecision decimal numbers.
+// For floating-point formatting only; not general purpose.
+// Only operations are assign and (binary) left/right shift.
+// Can do binary floating point in multiprecision decimal precisely
+// because 2 divides 10; cannot do decimal floating point
+// in multiprecision binary precisely.
+
+package decimal
+
+type decimal struct {
+ d [800]byte // digits, big-endian representation
+ nd int // number of digits used
+ dp int // decimal point
+ neg bool // negative flag
+ trunc bool // discarded nonzero digits beyond d[:nd]
+}
+
+func (a *decimal) String() string {
+ n := 10 + a.nd
+ if a.dp > 0 {
+ n += a.dp
+ }
+ if a.dp < 0 {
+ n += -a.dp
+ }
+
+ buf := make([]byte, n)
+ w := 0
+ switch {
+ case a.nd == 0:
+ return "0"
+
+ case a.dp <= 0:
+ // zeros fill space between decimal point and digits
+ buf[w] = '0'
+ w++
+ buf[w] = '.'
+ w++
+ w += digitZero(buf[w : w+-a.dp])
+ w += copy(buf[w:], a.d[0:a.nd])
+
+ case a.dp < a.nd:
+ // decimal point in middle of digits
+ w += copy(buf[w:], a.d[0:a.dp])
+ buf[w] = '.'
+ w++
+ w += copy(buf[w:], a.d[a.dp:a.nd])
+
+ default:
+ // zeros fill space between digits and decimal point
+ w += copy(buf[w:], a.d[0:a.nd])
+ w += digitZero(buf[w : w+a.dp-a.nd])
+ }
+ return string(buf[0:w])
+}
+
+func digitZero(dst []byte) int {
+ for i := range dst {
+ dst[i] = '0'
+ }
+ return len(dst)
+}
+
+// trim trailing zeros from number.
+// (They are meaningless; the decimal point is tracked
+// independent of the number of digits.)
+func trim(a *decimal) {
+ for a.nd > 0 && a.d[a.nd-1] == '0' {
+ a.nd--
+ }
+ if a.nd == 0 {
+ a.dp = 0
+ }
+}
+
+// Assign v to a.
+func (a *decimal) Assign(v uint64) {
+ var buf [24]byte
+
+ // Write reversed decimal in buf.
+ n := 0
+ for v > 0 {
+ v1 := v / 10
+ v -= 10 * v1
+ buf[n] = byte(v + '0')
+ n++
+ v = v1
+ }
+
+ // Reverse again to produce forward decimal in a.d.
+ a.nd = 0
+ for n--; n >= 0; n-- {
+ a.d[a.nd] = buf[n]
+ a.nd++
+ }
+ a.dp = a.nd
+ trim(a)
+}
+
+// Maximum shift that we can do in one pass without overflow.
+// A uint has 32 or 64 bits, and we have to be able to accommodate 9<> 63)
+const maxShift = uintSize - 4
+
+// Binary shift right (/ 2) by k bits. k <= maxShift to avoid overflow.
+func rightShift(a *decimal, k uint) {
+ r := 0 // read pointer
+ w := 0 // write pointer
+
+ // Pick up enough leading digits to cover first shift.
+ var n uint
+ for ; n>>k == 0; r++ {
+ if r >= a.nd {
+ if n == 0 {
+ // a == 0; shouldn't get here, but handle anyway.
+ a.nd = 0
+ return
+ }
+ for n>>k == 0 {
+ n = n * 10
+ r++
+ }
+ break
+ }
+ c := uint(a.d[r])
+ n = n*10 + c - '0'
+ }
+ a.dp -= r - 1
+
+ var mask uint = (1 << k) - 1
+
+ // Pick up a digit, put down a digit.
+ for ; r < a.nd; r++ {
+ c := uint(a.d[r])
+ dig := n >> k
+ n &= mask
+ a.d[w] = byte(dig + '0')
+ w++
+ n = n*10 + c - '0'
+ }
+
+ // Put down extra digits.
+ for n > 0 {
+ dig := n >> k
+ n &= mask
+ if w < len(a.d) {
+ a.d[w] = byte(dig + '0')
+ w++
+ } else if dig > 0 {
+ a.trunc = true
+ }
+ n = n * 10
+ }
+
+ a.nd = w
+ trim(a)
+}
+
+// Cheat sheet for left shift: table indexed by shift count giving
+// number of new digits that will be introduced by that shift.
+//
+// For example, leftcheats[4] = {2, "625"}. That means that
+// if we are shifting by 4 (multiplying by 16), it will add 2 digits
+// when the string prefix is "625" through "999", and one fewer digit
+// if the string prefix is "000" through "624".
+//
+// Credit for this trick goes to Ken.
+
+type leftCheat struct {
+ delta int // number of new digits
+ cutoff string // minus one digit if original < a.
+}
+
+var leftcheats = []leftCheat{
+ // Leading digits of 1/2^i = 5^i.
+ // 5^23 is not an exact 64-bit floating point number,
+ // so have to use bc for the math.
+ // Go up to 60 to be large enough for 32bit and 64bit platforms.
+ /*
+ seq 60 | sed 's/^/5^/' | bc |
+ awk 'BEGIN{ print "\t{ 0, \"\" }," }
+ {
+ log2 = log(2)/log(10)
+ printf("\t{ %d, \"%s\" },\t// * %d\n",
+ int(log2*NR+1), $0, 2**NR)
+ }'
+ */
+ {0, ""},
+ {1, "5"}, // * 2
+ {1, "25"}, // * 4
+ {1, "125"}, // * 8
+ {2, "625"}, // * 16
+ {2, "3125"}, // * 32
+ {2, "15625"}, // * 64
+ {3, "78125"}, // * 128
+ {3, "390625"}, // * 256
+ {3, "1953125"}, // * 512
+ {4, "9765625"}, // * 1024
+ {4, "48828125"}, // * 2048
+ {4, "244140625"}, // * 4096
+ {4, "1220703125"}, // * 8192
+ {5, "6103515625"}, // * 16384
+ {5, "30517578125"}, // * 32768
+ {5, "152587890625"}, // * 65536
+ {6, "762939453125"}, // * 131072
+ {6, "3814697265625"}, // * 262144
+ {6, "19073486328125"}, // * 524288
+ {7, "95367431640625"}, // * 1048576
+ {7, "476837158203125"}, // * 2097152
+ {7, "2384185791015625"}, // * 4194304
+ {7, "11920928955078125"}, // * 8388608
+ {8, "59604644775390625"}, // * 16777216
+ {8, "298023223876953125"}, // * 33554432
+ {8, "1490116119384765625"}, // * 67108864
+ {9, "7450580596923828125"}, // * 134217728
+ {9, "37252902984619140625"}, // * 268435456
+ {9, "186264514923095703125"}, // * 536870912
+ {10, "931322574615478515625"}, // * 1073741824
+ {10, "4656612873077392578125"}, // * 2147483648
+ {10, "23283064365386962890625"}, // * 4294967296
+ {10, "116415321826934814453125"}, // * 8589934592
+ {11, "582076609134674072265625"}, // * 17179869184
+ {11, "2910383045673370361328125"}, // * 34359738368
+ {11, "14551915228366851806640625"}, // * 68719476736
+ {12, "72759576141834259033203125"}, // * 137438953472
+ {12, "363797880709171295166015625"}, // * 274877906944
+ {12, "1818989403545856475830078125"}, // * 549755813888
+ {13, "9094947017729282379150390625"}, // * 1099511627776
+ {13, "45474735088646411895751953125"}, // * 2199023255552
+ {13, "227373675443232059478759765625"}, // * 4398046511104
+ {13, "1136868377216160297393798828125"}, // * 8796093022208
+ {14, "5684341886080801486968994140625"}, // * 17592186044416
+ {14, "28421709430404007434844970703125"}, // * 35184372088832
+ {14, "142108547152020037174224853515625"}, // * 70368744177664
+ {15, "710542735760100185871124267578125"}, // * 140737488355328
+ {15, "3552713678800500929355621337890625"}, // * 281474976710656
+ {15, "17763568394002504646778106689453125"}, // * 562949953421312
+ {16, "88817841970012523233890533447265625"}, // * 1125899906842624
+ {16, "444089209850062616169452667236328125"}, // * 2251799813685248
+ {16, "2220446049250313080847263336181640625"}, // * 4503599627370496
+ {16, "11102230246251565404236316680908203125"}, // * 9007199254740992
+ {17, "55511151231257827021181583404541015625"}, // * 18014398509481984
+ {17, "277555756156289135105907917022705078125"}, // * 36028797018963968
+ {17, "1387778780781445675529539585113525390625"}, // * 72057594037927936
+ {18, "6938893903907228377647697925567626953125"}, // * 144115188075855872
+ {18, "34694469519536141888238489627838134765625"}, // * 288230376151711744
+ {18, "173472347597680709441192448139190673828125"}, // * 576460752303423488
+ {19, "867361737988403547205962240695953369140625"}, // * 1152921504606846976
+}
+
+// Is the leading prefix of b lexicographically less than s?
+func prefixIsLessThan(b []byte, s string) bool {
+ for i := 0; i < len(s); i++ {
+ if i >= len(b) {
+ return true
+ }
+ if b[i] != s[i] {
+ return b[i] < s[i]
+ }
+ }
+ return false
+}
+
+// Binary shift left (* 2) by k bits. k <= maxShift to avoid overflow.
+func leftShift(a *decimal, k uint) {
+ delta := leftcheats[k].delta
+ if prefixIsLessThan(a.d[0:a.nd], leftcheats[k].cutoff) {
+ delta--
+ }
+
+ r := a.nd // read index
+ w := a.nd + delta // write index
+
+ // Pick up a digit, put down a digit.
+ var n uint
+ for r--; r >= 0; r-- {
+ n += (uint(a.d[r]) - '0') << k
+ quo := n / 10
+ rem := n - 10*quo
+ w--
+ if w < len(a.d) {
+ a.d[w] = byte(rem + '0')
+ } else if rem != 0 {
+ a.trunc = true
+ }
+ n = quo
+ }
+
+ // Put down extra digits.
+ for n > 0 {
+ quo := n / 10
+ rem := n - 10*quo
+ w--
+ if w < len(a.d) {
+ a.d[w] = byte(rem + '0')
+ } else if rem != 0 {
+ a.trunc = true
+ }
+ n = quo
+ }
+
+ a.nd += delta
+ if a.nd >= len(a.d) {
+ a.nd = len(a.d)
+ }
+ a.dp += delta
+ trim(a)
+}
+
+// Binary shift left (k > 0) or right (k < 0).
+func (a *decimal) Shift(k int) {
+ switch {
+ case a.nd == 0:
+ // nothing to do: a == 0
+ case k > 0:
+ for k > maxShift {
+ leftShift(a, maxShift)
+ k -= maxShift
+ }
+ leftShift(a, uint(k))
+ case k < 0:
+ for k < -maxShift {
+ rightShift(a, maxShift)
+ k += maxShift
+ }
+ rightShift(a, uint(-k))
+ }
+}
+
+// If we chop a at nd digits, should we round up?
+func shouldRoundUp(a *decimal, nd int) bool {
+ if nd < 0 || nd >= a.nd {
+ return false
+ }
+ if a.d[nd] == '5' && nd+1 == a.nd { // exactly halfway - round to even
+ // if we truncated, a little higher than what's recorded - always round up
+ if a.trunc {
+ return true
+ }
+ return nd > 0 && (a.d[nd-1]-'0')%2 != 0
+ }
+ // not halfway - digit tells all
+ return a.d[nd] >= '5'
+}
+
+// Round a to nd digits (or fewer).
+// If nd is zero, it means we're rounding
+// just to the left of the digits, as in
+// 0.09 -> 0.1.
+func (a *decimal) Round(nd int) {
+ if nd < 0 || nd >= a.nd {
+ return
+ }
+ if shouldRoundUp(a, nd) {
+ a.RoundUp(nd)
+ } else {
+ a.RoundDown(nd)
+ }
+}
+
+// Round a down to nd digits (or fewer).
+func (a *decimal) RoundDown(nd int) {
+ if nd < 0 || nd >= a.nd {
+ return
+ }
+ a.nd = nd
+ trim(a)
+}
+
+// Round a up to nd digits (or fewer).
+func (a *decimal) RoundUp(nd int) {
+ if nd < 0 || nd >= a.nd {
+ return
+ }
+
+ // round up
+ for i := nd - 1; i >= 0; i-- {
+ c := a.d[i]
+ if c < '9' { // can stop after this digit
+ a.d[i]++
+ a.nd = i + 1
+ return
+ }
+ }
+
+ // Number is all 9s.
+ // Change to single 1 with adjusted decimal point.
+ a.d[0] = '1'
+ a.nd = 1
+ a.dp++
+}
+
+// Extract integer part, rounded appropriately.
+// No guarantees about overflow.
+func (a *decimal) RoundedInteger() uint64 {
+ if a.dp > 20 {
+ return 0xFFFFFFFFFFFFFFFF
+ }
+ var i int
+ n := uint64(0)
+ for i = 0; i < a.dp && i < a.nd; i++ {
+ n = n*10 + uint64(a.d[i]-'0')
+ }
+ for ; i < a.dp; i++ {
+ n *= 10
+ }
+ if shouldRoundUp(a, a.dp) {
+ n++
+ }
+ return n
+}
diff --git a/vendor/github.com/shopspring/decimal/decimal.go b/vendor/github.com/shopspring/decimal/decimal.go
new file mode 100644
index 000000000..801c1a045
--- /dev/null
+++ b/vendor/github.com/shopspring/decimal/decimal.go
@@ -0,0 +1,1477 @@
+// Package decimal implements an arbitrary precision fixed-point decimal.
+//
+// The zero-value of a Decimal is 0, as you would expect.
+//
+// The best way to create a new Decimal is to use decimal.NewFromString, ex:
+//
+// n, err := decimal.NewFromString("-123.4567")
+// n.String() // output: "-123.4567"
+//
+// To use Decimal as part of a struct:
+//
+// type Struct struct {
+// Number Decimal
+// }
+//
+// Note: This can "only" represent numbers with a maximum of 2^31 digits after the decimal point.
+package decimal
+
+import (
+ "database/sql/driver"
+ "encoding/binary"
+ "fmt"
+ "math"
+ "math/big"
+ "strconv"
+ "strings"
+)
+
+// DivisionPrecision is the number of decimal places in the result when it
+// doesn't divide exactly.
+//
+// Example:
+//
+// d1 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3))
+// d1.String() // output: "0.6666666666666667"
+// d2 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(30000))
+// d2.String() // output: "0.0000666666666667"
+// d3 := decimal.NewFromFloat(20000).Div(decimal.NewFromFloat(3))
+// d3.String() // output: "6666.6666666666666667"
+// decimal.DivisionPrecision = 3
+// d4 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3))
+// d4.String() // output: "0.667"
+//
+var DivisionPrecision = 16
+
+// MarshalJSONWithoutQuotes should be set to true if you want the decimal to
+// be JSON marshaled as a number, instead of as a string.
+// WARNING: this is dangerous for decimals with many digits, since many JSON
+// unmarshallers (ex: Javascript's) will unmarshal JSON numbers to IEEE 754
+// double-precision floating point numbers, which means you can potentially
+// silently lose precision.
+var MarshalJSONWithoutQuotes = false
+
+// Zero constant, to make computations faster.
+// Zero should never be compared with == or != directly, please use decimal.Equal or decimal.Cmp instead.
+var Zero = New(0, 1)
+
+var zeroInt = big.NewInt(0)
+var oneInt = big.NewInt(1)
+var twoInt = big.NewInt(2)
+var fourInt = big.NewInt(4)
+var fiveInt = big.NewInt(5)
+var tenInt = big.NewInt(10)
+var twentyInt = big.NewInt(20)
+
+// Decimal represents a fixed-point decimal. It is immutable.
+// number = value * 10 ^ exp
+type Decimal struct {
+ value *big.Int
+
+ // NOTE(vadim): this must be an int32, because we cast it to float64 during
+ // calculations. If exp is 64 bit, we might lose precision.
+ // If we cared about being able to represent every possible decimal, we
+ // could make exp a *big.Int but it would hurt performance and numbers
+ // like that are unrealistic.
+ exp int32
+}
+
+// New returns a new fixed-point decimal, value * 10 ^ exp.
+func New(value int64, exp int32) Decimal {
+ return Decimal{
+ value: big.NewInt(value),
+ exp: exp,
+ }
+}
+
+// NewFromInt converts a int64 to Decimal.
+//
+// Example:
+//
+// NewFromInt(123).String() // output: "123"
+// NewFromInt(-10).String() // output: "-10"
+func NewFromInt(value int64) Decimal {
+ return Decimal{
+ value: big.NewInt(value),
+ exp: 0,
+ }
+}
+
+// NewFromInt32 converts a int32 to Decimal.
+//
+// Example:
+//
+// NewFromInt(123).String() // output: "123"
+// NewFromInt(-10).String() // output: "-10"
+func NewFromInt32(value int32) Decimal {
+ return Decimal{
+ value: big.NewInt(int64(value)),
+ exp: 0,
+ }
+}
+
+// NewFromBigInt returns a new Decimal from a big.Int, value * 10 ^ exp
+func NewFromBigInt(value *big.Int, exp int32) Decimal {
+ return Decimal{
+ value: big.NewInt(0).Set(value),
+ exp: exp,
+ }
+}
+
+// NewFromString returns a new Decimal from a string representation.
+// Trailing zeroes are not trimmed.
+//
+// Example:
+//
+// d, err := NewFromString("-123.45")
+// d2, err := NewFromString(".0001")
+// d3, err := NewFromString("1.47000")
+//
+func NewFromString(value string) (Decimal, error) {
+ originalInput := value
+ var intString string
+ var exp int64
+
+ // Check if number is using scientific notation
+ eIndex := strings.IndexAny(value, "Ee")
+ if eIndex != -1 {
+ expInt, err := strconv.ParseInt(value[eIndex+1:], 10, 32)
+ if err != nil {
+ if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
+ return Decimal{}, fmt.Errorf("can't convert %s to decimal: fractional part too long", value)
+ }
+ return Decimal{}, fmt.Errorf("can't convert %s to decimal: exponent is not numeric", value)
+ }
+ value = value[:eIndex]
+ exp = expInt
+ }
+
+ parts := strings.Split(value, ".")
+ if len(parts) == 1 {
+ // There is no decimal point, we can just parse the original string as
+ // an int
+ intString = value
+ } else if len(parts) == 2 {
+ intString = parts[0] + parts[1]
+ expInt := -len(parts[1])
+ exp += int64(expInt)
+ } else {
+ return Decimal{}, fmt.Errorf("can't convert %s to decimal: too many .s", value)
+ }
+
+ dValue := new(big.Int)
+ _, ok := dValue.SetString(intString, 10)
+ if !ok {
+ return Decimal{}, fmt.Errorf("can't convert %s to decimal", value)
+ }
+
+ if exp < math.MinInt32 || exp > math.MaxInt32 {
+ // NOTE(vadim): I doubt a string could realistically be this long
+ return Decimal{}, fmt.Errorf("can't convert %s to decimal: fractional part too long", originalInput)
+ }
+
+ return Decimal{
+ value: dValue,
+ exp: int32(exp),
+ }, nil
+}
+
+// RequireFromString returns a new Decimal from a string representation
+// or panics if NewFromString would have returned an error.
+//
+// Example:
+//
+// d := RequireFromString("-123.45")
+// d2 := RequireFromString(".0001")
+//
+func RequireFromString(value string) Decimal {
+ dec, err := NewFromString(value)
+ if err != nil {
+ panic(err)
+ }
+ return dec
+}
+
+// NewFromFloat converts a float64 to Decimal.
+//
+// The converted number will contain the number of significant digits that can be
+// represented in a float with reliable roundtrip.
+// This is typically 15 digits, but may be more in some cases.
+// See https://www.exploringbinary.com/decimal-precision-of-binary-floating-point-numbers/ for more information.
+//
+// For slightly faster conversion, use NewFromFloatWithExponent where you can specify the precision in absolute terms.
+//
+// NOTE: this will panic on NaN, +/-inf
+func NewFromFloat(value float64) Decimal {
+ if value == 0 {
+ return New(0, 0)
+ }
+ return newFromFloat(value, math.Float64bits(value), &float64info)
+}
+
+// NewFromFloat32 converts a float32 to Decimal.
+//
+// The converted number will contain the number of significant digits that can be
+// represented in a float with reliable roundtrip.
+// This is typically 6-8 digits depending on the input.
+// See https://www.exploringbinary.com/decimal-precision-of-binary-floating-point-numbers/ for more information.
+//
+// For slightly faster conversion, use NewFromFloatWithExponent where you can specify the precision in absolute terms.
+//
+// NOTE: this will panic on NaN, +/-inf
+func NewFromFloat32(value float32) Decimal {
+ if value == 0 {
+ return New(0, 0)
+ }
+ // XOR is workaround for https://github.com/golang/go/issues/26285
+ a := math.Float32bits(value) ^ 0x80808080
+ return newFromFloat(float64(value), uint64(a)^0x80808080, &float32info)
+}
+
+func newFromFloat(val float64, bits uint64, flt *floatInfo) Decimal {
+ if math.IsNaN(val) || math.IsInf(val, 0) {
+ panic(fmt.Sprintf("Cannot create a Decimal from %v", val))
+ }
+ exp := int(bits>>flt.mantbits) & (1<>(flt.expbits+flt.mantbits) != 0
+
+ roundShortest(&d, mant, exp, flt)
+ // If less than 19 digits, we can do calculation in an int64.
+ if d.nd < 19 {
+ tmp := int64(0)
+ m := int64(1)
+ for i := d.nd - 1; i >= 0; i-- {
+ tmp += m * int64(d.d[i]-'0')
+ m *= 10
+ }
+ if d.neg {
+ tmp *= -1
+ }
+ return Decimal{value: big.NewInt(tmp), exp: int32(d.dp) - int32(d.nd)}
+ }
+ dValue := new(big.Int)
+ dValue, ok := dValue.SetString(string(d.d[:d.nd]), 10)
+ if ok {
+ return Decimal{value: dValue, exp: int32(d.dp) - int32(d.nd)}
+ }
+
+ return NewFromFloatWithExponent(val, int32(d.dp)-int32(d.nd))
+}
+
+// NewFromFloatWithExponent converts a float64 to Decimal, with an arbitrary
+// number of fractional digits.
+//
+// Example:
+//
+// NewFromFloatWithExponent(123.456, -2).String() // output: "123.46"
+//
+func NewFromFloatWithExponent(value float64, exp int32) Decimal {
+ if math.IsNaN(value) || math.IsInf(value, 0) {
+ panic(fmt.Sprintf("Cannot create a Decimal from %v", value))
+ }
+
+ bits := math.Float64bits(value)
+ mant := bits & (1<<52 - 1)
+ exp2 := int32((bits >> 52) & (1<<11 - 1))
+ sign := bits >> 63
+
+ if exp2 == 0 {
+ // specials
+ if mant == 0 {
+ return Decimal{}
+ }
+ // subnormal
+ exp2++
+ } else {
+ // normal
+ mant |= 1 << 52
+ }
+
+ exp2 -= 1023 + 52
+
+ // normalizing base-2 values
+ for mant&1 == 0 {
+ mant = mant >> 1
+ exp2++
+ }
+
+ // maximum number of fractional base-10 digits to represent 2^N exactly cannot be more than -N if N<0
+ if exp < 0 && exp < exp2 {
+ if exp2 < 0 {
+ exp = exp2
+ } else {
+ exp = 0
+ }
+ }
+
+ // representing 10^M * 2^N as 5^M * 2^(M+N)
+ exp2 -= exp
+
+ temp := big.NewInt(1)
+ dMant := big.NewInt(int64(mant))
+
+ // applying 5^M
+ if exp > 0 {
+ temp = temp.SetInt64(int64(exp))
+ temp = temp.Exp(fiveInt, temp, nil)
+ } else if exp < 0 {
+ temp = temp.SetInt64(-int64(exp))
+ temp = temp.Exp(fiveInt, temp, nil)
+ dMant = dMant.Mul(dMant, temp)
+ temp = temp.SetUint64(1)
+ }
+
+ // applying 2^(M+N)
+ if exp2 > 0 {
+ dMant = dMant.Lsh(dMant, uint(exp2))
+ } else if exp2 < 0 {
+ temp = temp.Lsh(temp, uint(-exp2))
+ }
+
+ // rounding and downscaling
+ if exp > 0 || exp2 < 0 {
+ halfDown := new(big.Int).Rsh(temp, 1)
+ dMant = dMant.Add(dMant, halfDown)
+ dMant = dMant.Quo(dMant, temp)
+ }
+
+ if sign == 1 {
+ dMant = dMant.Neg(dMant)
+ }
+
+ return Decimal{
+ value: dMant,
+ exp: exp,
+ }
+}
+
+// rescale returns a rescaled version of the decimal. Returned
+// decimal may be less precise if the given exponent is bigger
+// than the initial exponent of the Decimal.
+// NOTE: this will truncate, NOT round
+//
+// Example:
+//
+// d := New(12345, -4)
+// d2 := d.rescale(-1)
+// d3 := d2.rescale(-4)
+// println(d1)
+// println(d2)
+// println(d3)
+//
+// Output:
+//
+// 1.2345
+// 1.2
+// 1.2000
+//
+func (d Decimal) rescale(exp int32) Decimal {
+ d.ensureInitialized()
+
+ if d.exp == exp {
+ return Decimal{
+ new(big.Int).Set(d.value),
+ d.exp,
+ }
+ }
+
+ // NOTE(vadim): must convert exps to float64 before - to prevent overflow
+ diff := math.Abs(float64(exp) - float64(d.exp))
+ value := new(big.Int).Set(d.value)
+
+ expScale := new(big.Int).Exp(tenInt, big.NewInt(int64(diff)), nil)
+ if exp > d.exp {
+ value = value.Quo(value, expScale)
+ } else if exp < d.exp {
+ value = value.Mul(value, expScale)
+ }
+
+ return Decimal{
+ value: value,
+ exp: exp,
+ }
+}
+
+// Abs returns the absolute value of the decimal.
+func (d Decimal) Abs() Decimal {
+ d.ensureInitialized()
+ d2Value := new(big.Int).Abs(d.value)
+ return Decimal{
+ value: d2Value,
+ exp: d.exp,
+ }
+}
+
+// Add returns d + d2.
+func (d Decimal) Add(d2 Decimal) Decimal {
+ rd, rd2 := RescalePair(d, d2)
+
+ d3Value := new(big.Int).Add(rd.value, rd2.value)
+ return Decimal{
+ value: d3Value,
+ exp: rd.exp,
+ }
+}
+
+// Sub returns d - d2.
+func (d Decimal) Sub(d2 Decimal) Decimal {
+ rd, rd2 := RescalePair(d, d2)
+
+ d3Value := new(big.Int).Sub(rd.value, rd2.value)
+ return Decimal{
+ value: d3Value,
+ exp: rd.exp,
+ }
+}
+
+// Neg returns -d.
+func (d Decimal) Neg() Decimal {
+ d.ensureInitialized()
+ val := new(big.Int).Neg(d.value)
+ return Decimal{
+ value: val,
+ exp: d.exp,
+ }
+}
+
+// Mul returns d * d2.
+func (d Decimal) Mul(d2 Decimal) Decimal {
+ d.ensureInitialized()
+ d2.ensureInitialized()
+
+ expInt64 := int64(d.exp) + int64(d2.exp)
+ if expInt64 > math.MaxInt32 || expInt64 < math.MinInt32 {
+ // NOTE(vadim): better to panic than give incorrect results, as
+ // Decimals are usually used for money
+ panic(fmt.Sprintf("exponent %v overflows an int32!", expInt64))
+ }
+
+ d3Value := new(big.Int).Mul(d.value, d2.value)
+ return Decimal{
+ value: d3Value,
+ exp: int32(expInt64),
+ }
+}
+
+// Shift shifts the decimal in base 10.
+// It shifts left when shift is positive and right if shift is negative.
+// In simpler terms, the given value for shift is added to the exponent
+// of the decimal.
+func (d Decimal) Shift(shift int32) Decimal {
+ d.ensureInitialized()
+ return Decimal{
+ value: new(big.Int).Set(d.value),
+ exp: d.exp + shift,
+ }
+}
+
+// Div returns d / d2. If it doesn't divide exactly, the result will have
+// DivisionPrecision digits after the decimal point.
+func (d Decimal) Div(d2 Decimal) Decimal {
+ return d.DivRound(d2, int32(DivisionPrecision))
+}
+
+// QuoRem does divsion with remainder
+// d.QuoRem(d2,precision) returns quotient q and remainder r such that
+// d = d2 * q + r, q an integer multiple of 10^(-precision)
+// 0 <= r < abs(d2) * 10 ^(-precision) if d>=0
+// 0 >= r > -abs(d2) * 10 ^(-precision) if d<0
+// Note that precision<0 is allowed as input.
+func (d Decimal) QuoRem(d2 Decimal, precision int32) (Decimal, Decimal) {
+ d.ensureInitialized()
+ d2.ensureInitialized()
+ if d2.value.Sign() == 0 {
+ panic("decimal division by 0")
+ }
+ scale := -precision
+ e := int64(d.exp - d2.exp - scale)
+ if e > math.MaxInt32 || e < math.MinInt32 {
+ panic("overflow in decimal QuoRem")
+ }
+ var aa, bb, expo big.Int
+ var scalerest int32
+ // d = a 10^ea
+ // d2 = b 10^eb
+ if e < 0 {
+ aa = *d.value
+ expo.SetInt64(-e)
+ bb.Exp(tenInt, &expo, nil)
+ bb.Mul(d2.value, &bb)
+ scalerest = d.exp
+ // now aa = a
+ // bb = b 10^(scale + eb - ea)
+ } else {
+ expo.SetInt64(e)
+ aa.Exp(tenInt, &expo, nil)
+ aa.Mul(d.value, &aa)
+ bb = *d2.value
+ scalerest = scale + d2.exp
+ // now aa = a ^ (ea - eb - scale)
+ // bb = b
+ }
+ var q, r big.Int
+ q.QuoRem(&aa, &bb, &r)
+ dq := Decimal{value: &q, exp: scale}
+ dr := Decimal{value: &r, exp: scalerest}
+ return dq, dr
+}
+
+// DivRound divides and rounds to a given precision
+// i.e. to an integer multiple of 10^(-precision)
+// for a positive quotient digit 5 is rounded up, away from 0
+// if the quotient is negative then digit 5 is rounded down, away from 0
+// Note that precision<0 is allowed as input.
+func (d Decimal) DivRound(d2 Decimal, precision int32) Decimal {
+ // QuoRem already checks initialization
+ q, r := d.QuoRem(d2, precision)
+ // the actual rounding decision is based on comparing r*10^precision and d2/2
+ // instead compare 2 r 10 ^precision and d2
+ var rv2 big.Int
+ rv2.Abs(r.value)
+ rv2.Lsh(&rv2, 1)
+ // now rv2 = abs(r.value) * 2
+ r2 := Decimal{value: &rv2, exp: r.exp + precision}
+ // r2 is now 2 * r * 10 ^ precision
+ var c = r2.Cmp(d2.Abs())
+
+ if c < 0 {
+ return q
+ }
+
+ if d.value.Sign()*d2.value.Sign() < 0 {
+ return q.Sub(New(1, -precision))
+ }
+
+ return q.Add(New(1, -precision))
+}
+
+// Mod returns d % d2.
+func (d Decimal) Mod(d2 Decimal) Decimal {
+ quo := d.Div(d2).Truncate(0)
+ return d.Sub(d2.Mul(quo))
+}
+
+// Pow returns d to the power d2
+func (d Decimal) Pow(d2 Decimal) Decimal {
+ var temp Decimal
+ if d2.IntPart() == 0 {
+ return NewFromFloat(1)
+ }
+ temp = d.Pow(d2.Div(NewFromFloat(2)))
+ if d2.IntPart()%2 == 0 {
+ return temp.Mul(temp)
+ }
+ if d2.IntPart() > 0 {
+ return temp.Mul(temp).Mul(d)
+ }
+ return temp.Mul(temp).Div(d)
+}
+
+// Cmp compares the numbers represented by d and d2 and returns:
+//
+// -1 if d < d2
+// 0 if d == d2
+// +1 if d > d2
+//
+func (d Decimal) Cmp(d2 Decimal) int {
+ d.ensureInitialized()
+ d2.ensureInitialized()
+
+ if d.exp == d2.exp {
+ return d.value.Cmp(d2.value)
+ }
+
+ rd, rd2 := RescalePair(d, d2)
+
+ return rd.value.Cmp(rd2.value)
+}
+
+// Equal returns whether the numbers represented by d and d2 are equal.
+func (d Decimal) Equal(d2 Decimal) bool {
+ return d.Cmp(d2) == 0
+}
+
+// Equals is deprecated, please use Equal method instead
+func (d Decimal) Equals(d2 Decimal) bool {
+ return d.Equal(d2)
+}
+
+// GreaterThan (GT) returns true when d is greater than d2.
+func (d Decimal) GreaterThan(d2 Decimal) bool {
+ return d.Cmp(d2) == 1
+}
+
+// GreaterThanOrEqual (GTE) returns true when d is greater than or equal to d2.
+func (d Decimal) GreaterThanOrEqual(d2 Decimal) bool {
+ cmp := d.Cmp(d2)
+ return cmp == 1 || cmp == 0
+}
+
+// LessThan (LT) returns true when d is less than d2.
+func (d Decimal) LessThan(d2 Decimal) bool {
+ return d.Cmp(d2) == -1
+}
+
+// LessThanOrEqual (LTE) returns true when d is less than or equal to d2.
+func (d Decimal) LessThanOrEqual(d2 Decimal) bool {
+ cmp := d.Cmp(d2)
+ return cmp == -1 || cmp == 0
+}
+
+// Sign returns:
+//
+// -1 if d < 0
+// 0 if d == 0
+// +1 if d > 0
+//
+func (d Decimal) Sign() int {
+ if d.value == nil {
+ return 0
+ }
+ return d.value.Sign()
+}
+
+// IsPositive return
+//
+// true if d > 0
+// false if d == 0
+// false if d < 0
+func (d Decimal) IsPositive() bool {
+ return d.Sign() == 1
+}
+
+// IsNegative return
+//
+// true if d < 0
+// false if d == 0
+// false if d > 0
+func (d Decimal) IsNegative() bool {
+ return d.Sign() == -1
+}
+
+// IsZero return
+//
+// true if d == 0
+// false if d > 0
+// false if d < 0
+func (d Decimal) IsZero() bool {
+ return d.Sign() == 0
+}
+
+// Exponent returns the exponent, or scale component of the decimal.
+func (d Decimal) Exponent() int32 {
+ return d.exp
+}
+
+// Coefficient returns the coefficient of the decimal. It is scaled by 10^Exponent()
+func (d Decimal) Coefficient() *big.Int {
+ d.ensureInitialized()
+ // we copy the coefficient so that mutating the result does not mutate the
+ // Decimal.
+ return big.NewInt(0).Set(d.value)
+}
+
+// IntPart returns the integer component of the decimal.
+func (d Decimal) IntPart() int64 {
+ scaledD := d.rescale(0)
+ return scaledD.value.Int64()
+}
+
+// BigInt returns integer component of the decimal as a BigInt.
+func (d Decimal) BigInt() *big.Int {
+ scaledD := d.rescale(0)
+ i := &big.Int{}
+ i.SetString(scaledD.String(), 10)
+ return i
+}
+
+// BigFloat returns decimal as BigFloat.
+// Be aware that casting decimal to BigFloat might cause a loss of precision.
+func (d Decimal) BigFloat() *big.Float {
+ f := &big.Float{}
+ f.SetString(d.String())
+ return f
+}
+
+// Rat returns a rational number representation of the decimal.
+func (d Decimal) Rat() *big.Rat {
+ d.ensureInitialized()
+ if d.exp <= 0 {
+ // NOTE(vadim): must negate after casting to prevent int32 overflow
+ denom := new(big.Int).Exp(tenInt, big.NewInt(-int64(d.exp)), nil)
+ return new(big.Rat).SetFrac(d.value, denom)
+ }
+
+ mul := new(big.Int).Exp(tenInt, big.NewInt(int64(d.exp)), nil)
+ num := new(big.Int).Mul(d.value, mul)
+ return new(big.Rat).SetFrac(num, oneInt)
+}
+
+// Float64 returns the nearest float64 value for d and a bool indicating
+// whether f represents d exactly.
+// For more details, see the documentation for big.Rat.Float64
+func (d Decimal) Float64() (f float64, exact bool) {
+ return d.Rat().Float64()
+}
+
+// String returns the string representation of the decimal
+// with the fixed point.
+//
+// Example:
+//
+// d := New(-12345, -3)
+// println(d.String())
+//
+// Output:
+//
+// -12.345
+//
+func (d Decimal) String() string {
+ return d.string(true)
+}
+
+// StringFixed returns a rounded fixed-point string with places digits after
+// the decimal point.
+//
+// Example:
+//
+// NewFromFloat(0).StringFixed(2) // output: "0.00"
+// NewFromFloat(0).StringFixed(0) // output: "0"
+// NewFromFloat(5.45).StringFixed(0) // output: "5"
+// NewFromFloat(5.45).StringFixed(1) // output: "5.5"
+// NewFromFloat(5.45).StringFixed(2) // output: "5.45"
+// NewFromFloat(5.45).StringFixed(3) // output: "5.450"
+// NewFromFloat(545).StringFixed(-1) // output: "550"
+//
+func (d Decimal) StringFixed(places int32) string {
+ rounded := d.Round(places)
+ return rounded.string(false)
+}
+
+// StringFixedBank returns a banker rounded fixed-point string with places digits
+// after the decimal point.
+//
+// Example:
+//
+// NewFromFloat(0).StringFixedBank(2) // output: "0.00"
+// NewFromFloat(0).StringFixedBank(0) // output: "0"
+// NewFromFloat(5.45).StringFixedBank(0) // output: "5"
+// NewFromFloat(5.45).StringFixedBank(1) // output: "5.4"
+// NewFromFloat(5.45).StringFixedBank(2) // output: "5.45"
+// NewFromFloat(5.45).StringFixedBank(3) // output: "5.450"
+// NewFromFloat(545).StringFixedBank(-1) // output: "540"
+//
+func (d Decimal) StringFixedBank(places int32) string {
+ rounded := d.RoundBank(places)
+ return rounded.string(false)
+}
+
+// StringFixedCash returns a Swedish/Cash rounded fixed-point string. For
+// more details see the documentation at function RoundCash.
+func (d Decimal) StringFixedCash(interval uint8) string {
+ rounded := d.RoundCash(interval)
+ return rounded.string(false)
+}
+
+// Round rounds the decimal to places decimal places.
+// If places < 0, it will round the integer part to the nearest 10^(-places).
+//
+// Example:
+//
+// NewFromFloat(5.45).Round(1).String() // output: "5.5"
+// NewFromFloat(545).Round(-1).String() // output: "550"
+//
+func (d Decimal) Round(places int32) Decimal {
+ // truncate to places + 1
+ ret := d.rescale(-places - 1)
+
+ // add sign(d) * 0.5
+ if ret.value.Sign() < 0 {
+ ret.value.Sub(ret.value, fiveInt)
+ } else {
+ ret.value.Add(ret.value, fiveInt)
+ }
+
+ // floor for positive numbers, ceil for negative numbers
+ _, m := ret.value.DivMod(ret.value, tenInt, new(big.Int))
+ ret.exp++
+ if ret.value.Sign() < 0 && m.Cmp(zeroInt) != 0 {
+ ret.value.Add(ret.value, oneInt)
+ }
+
+ return ret
+}
+
+// RoundBank rounds the decimal to places decimal places.
+// If the final digit to round is equidistant from the nearest two integers the
+// rounded value is taken as the even number
+//
+// If places < 0, it will round the integer part to the nearest 10^(-places).
+//
+// Examples:
+//
+// NewFromFloat(5.45).Round(1).String() // output: "5.4"
+// NewFromFloat(545).Round(-1).String() // output: "540"
+// NewFromFloat(5.46).Round(1).String() // output: "5.5"
+// NewFromFloat(546).Round(-1).String() // output: "550"
+// NewFromFloat(5.55).Round(1).String() // output: "5.6"
+// NewFromFloat(555).Round(-1).String() // output: "560"
+//
+func (d Decimal) RoundBank(places int32) Decimal {
+
+ round := d.Round(places)
+ remainder := d.Sub(round).Abs()
+
+ half := New(5, -places-1)
+ if remainder.Cmp(half) == 0 && round.value.Bit(0) != 0 {
+ if round.value.Sign() < 0 {
+ round.value.Add(round.value, oneInt)
+ } else {
+ round.value.Sub(round.value, oneInt)
+ }
+ }
+
+ return round
+}
+
+// RoundCash aka Cash/Penny/öre rounding rounds decimal to a specific
+// interval. The amount payable for a cash transaction is rounded to the nearest
+// multiple of the minimum currency unit available. The following intervals are
+// available: 5, 10, 25, 50 and 100; any other number throws a panic.
+// 5: 5 cent rounding 3.43 => 3.45
+// 10: 10 cent rounding 3.45 => 3.50 (5 gets rounded up)
+// 25: 25 cent rounding 3.41 => 3.50
+// 50: 50 cent rounding 3.75 => 4.00
+// 100: 100 cent rounding 3.50 => 4.00
+// For more details: https://en.wikipedia.org/wiki/Cash_rounding
+func (d Decimal) RoundCash(interval uint8) Decimal {
+ var iVal *big.Int
+ switch interval {
+ case 5:
+ iVal = twentyInt
+ case 10:
+ iVal = tenInt
+ case 25:
+ iVal = fourInt
+ case 50:
+ iVal = twoInt
+ case 100:
+ iVal = oneInt
+ default:
+ panic(fmt.Sprintf("Decimal does not support this Cash rounding interval `%d`. Supported: 5, 10, 25, 50, 100", interval))
+ }
+ dVal := Decimal{
+ value: iVal,
+ }
+
+ // TODO: optimize those calculations to reduce the high allocations (~29 allocs).
+ return d.Mul(dVal).Round(0).Div(dVal).Truncate(2)
+}
+
+// Floor returns the nearest integer value less than or equal to d.
+func (d Decimal) Floor() Decimal {
+ d.ensureInitialized()
+
+ if d.exp >= 0 {
+ return d
+ }
+
+ exp := big.NewInt(10)
+
+ // NOTE(vadim): must negate after casting to prevent int32 overflow
+ exp.Exp(exp, big.NewInt(-int64(d.exp)), nil)
+
+ z := new(big.Int).Div(d.value, exp)
+ return Decimal{value: z, exp: 0}
+}
+
+// Ceil returns the nearest integer value greater than or equal to d.
+func (d Decimal) Ceil() Decimal {
+ d.ensureInitialized()
+
+ if d.exp >= 0 {
+ return d
+ }
+
+ exp := big.NewInt(10)
+
+ // NOTE(vadim): must negate after casting to prevent int32 overflow
+ exp.Exp(exp, big.NewInt(-int64(d.exp)), nil)
+
+ z, m := new(big.Int).DivMod(d.value, exp, new(big.Int))
+ if m.Cmp(zeroInt) != 0 {
+ z.Add(z, oneInt)
+ }
+ return Decimal{value: z, exp: 0}
+}
+
+// Truncate truncates off digits from the number, without rounding.
+//
+// NOTE: precision is the last digit that will not be truncated (must be >= 0).
+//
+// Example:
+//
+// decimal.NewFromString("123.456").Truncate(2).String() // "123.45"
+//
+func (d Decimal) Truncate(precision int32) Decimal {
+ d.ensureInitialized()
+ if precision >= 0 && -precision > d.exp {
+ return d.rescale(-precision)
+ }
+ return d
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (d *Decimal) UnmarshalJSON(decimalBytes []byte) error {
+ if string(decimalBytes) == "null" {
+ return nil
+ }
+
+ str, err := unquoteIfQuoted(decimalBytes)
+ if err != nil {
+ return fmt.Errorf("error decoding string '%s': %s", decimalBytes, err)
+ }
+
+ decimal, err := NewFromString(str)
+ *d = decimal
+ if err != nil {
+ return fmt.Errorf("error decoding string '%s': %s", str, err)
+ }
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (d Decimal) MarshalJSON() ([]byte, error) {
+ var str string
+ if MarshalJSONWithoutQuotes {
+ str = d.String()
+ } else {
+ str = "\"" + d.String() + "\""
+ }
+ return []byte(str), nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. As a string representation
+// is already used when encoding to text, this method stores that string as []byte
+func (d *Decimal) UnmarshalBinary(data []byte) error {
+ // Extract the exponent
+ d.exp = int32(binary.BigEndian.Uint32(data[:4]))
+
+ // Extract the value
+ d.value = new(big.Int)
+ return d.value.GobDecode(data[4:])
+}
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (d Decimal) MarshalBinary() (data []byte, err error) {
+ // Write the exponent first since it's a fixed size
+ v1 := make([]byte, 4)
+ binary.BigEndian.PutUint32(v1, uint32(d.exp))
+
+ // Add the value
+ var v2 []byte
+ if v2, err = d.value.GobEncode(); err != nil {
+ return
+ }
+
+ // Return the byte array
+ data = append(v1, v2...)
+ return
+}
+
+// Scan implements the sql.Scanner interface for database deserialization.
+func (d *Decimal) Scan(value interface{}) error {
+ // first try to see if the data is stored in database as a Numeric datatype
+ switch v := value.(type) {
+
+ case float32:
+ *d = NewFromFloat(float64(v))
+ return nil
+
+ case float64:
+ // numeric in sqlite3 sends us float64
+ *d = NewFromFloat(v)
+ return nil
+
+ case int64:
+ // at least in sqlite3 when the value is 0 in db, the data is sent
+ // to us as an int64 instead of a float64 ...
+ *d = New(v, 0)
+ return nil
+
+ default:
+ // default is trying to interpret value stored as string
+ str, err := unquoteIfQuoted(v)
+ if err != nil {
+ return err
+ }
+ *d, err = NewFromString(str)
+ return err
+ }
+}
+
+// Value implements the driver.Valuer interface for database serialization.
+func (d Decimal) Value() (driver.Value, error) {
+ return d.String(), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface for XML
+// deserialization.
+func (d *Decimal) UnmarshalText(text []byte) error {
+ str := string(text)
+
+ dec, err := NewFromString(str)
+ *d = dec
+ if err != nil {
+ return fmt.Errorf("error decoding string '%s': %s", str, err)
+ }
+
+ return nil
+}
+
+// MarshalText implements the encoding.TextMarshaler interface for XML
+// serialization.
+func (d Decimal) MarshalText() (text []byte, err error) {
+ return []byte(d.String()), nil
+}
+
+// GobEncode implements the gob.GobEncoder interface for gob serialization.
+func (d Decimal) GobEncode() ([]byte, error) {
+ return d.MarshalBinary()
+}
+
+// GobDecode implements the gob.GobDecoder interface for gob serialization.
+func (d *Decimal) GobDecode(data []byte) error {
+ return d.UnmarshalBinary(data)
+}
+
+// StringScaled first scales the decimal then calls .String() on it.
+// NOTE: buggy, unintuitive, and DEPRECATED! Use StringFixed instead.
+func (d Decimal) StringScaled(exp int32) string {
+ return d.rescale(exp).String()
+}
+
+func (d Decimal) string(trimTrailingZeros bool) string {
+ if d.exp >= 0 {
+ return d.rescale(0).value.String()
+ }
+
+ abs := new(big.Int).Abs(d.value)
+ str := abs.String()
+
+ var intPart, fractionalPart string
+
+ // NOTE(vadim): this cast to int will cause bugs if d.exp == INT_MIN
+ // and you are on a 32-bit machine. Won't fix this super-edge case.
+ dExpInt := int(d.exp)
+ if len(str) > -dExpInt {
+ intPart = str[:len(str)+dExpInt]
+ fractionalPart = str[len(str)+dExpInt:]
+ } else {
+ intPart = "0"
+
+ num0s := -dExpInt - len(str)
+ fractionalPart = strings.Repeat("0", num0s) + str
+ }
+
+ if trimTrailingZeros {
+ i := len(fractionalPart) - 1
+ for ; i >= 0; i-- {
+ if fractionalPart[i] != '0' {
+ break
+ }
+ }
+ fractionalPart = fractionalPart[:i+1]
+ }
+
+ number := intPart
+ if len(fractionalPart) > 0 {
+ number += "." + fractionalPart
+ }
+
+ if d.value.Sign() < 0 {
+ return "-" + number
+ }
+
+ return number
+}
+
+func (d *Decimal) ensureInitialized() {
+ if d.value == nil {
+ d.value = new(big.Int)
+ }
+}
+
+// Min returns the smallest Decimal that was passed in the arguments.
+//
+// To call this function with an array, you must do:
+//
+// Min(arr[0], arr[1:]...)
+//
+// This makes it harder to accidentally call Min with 0 arguments.
+func Min(first Decimal, rest ...Decimal) Decimal {
+ ans := first
+ for _, item := range rest {
+ if item.Cmp(ans) < 0 {
+ ans = item
+ }
+ }
+ return ans
+}
+
+// Max returns the largest Decimal that was passed in the arguments.
+//
+// To call this function with an array, you must do:
+//
+// Max(arr[0], arr[1:]...)
+//
+// This makes it harder to accidentally call Max with 0 arguments.
+func Max(first Decimal, rest ...Decimal) Decimal {
+ ans := first
+ for _, item := range rest {
+ if item.Cmp(ans) > 0 {
+ ans = item
+ }
+ }
+ return ans
+}
+
+// Sum returns the combined total of the provided first and rest Decimals
+func Sum(first Decimal, rest ...Decimal) Decimal {
+ total := first
+ for _, item := range rest {
+ total = total.Add(item)
+ }
+
+ return total
+}
+
+// Avg returns the average value of the provided first and rest Decimals
+func Avg(first Decimal, rest ...Decimal) Decimal {
+ count := New(int64(len(rest)+1), 0)
+ sum := Sum(first, rest...)
+ return sum.Div(count)
+}
+
+// RescalePair rescales two decimals to common exponential value (minimal exp of both decimals)
+func RescalePair(d1 Decimal, d2 Decimal) (Decimal, Decimal) {
+ d1.ensureInitialized()
+ d2.ensureInitialized()
+
+ if d1.exp == d2.exp {
+ return d1, d2
+ }
+
+ baseScale := min(d1.exp, d2.exp)
+ if baseScale != d1.exp {
+ return d1.rescale(baseScale), d2
+ }
+ return d1, d2.rescale(baseScale)
+}
+
+func min(x, y int32) int32 {
+ if x >= y {
+ return y
+ }
+ return x
+}
+
+func unquoteIfQuoted(value interface{}) (string, error) {
+ var bytes []byte
+
+ switch v := value.(type) {
+ case string:
+ bytes = []byte(v)
+ case []byte:
+ bytes = v
+ default:
+ return "", fmt.Errorf("could not convert value '%+v' to byte array of type '%T'",
+ value, value)
+ }
+
+ // If the amount is quoted, strip the quotes
+ if len(bytes) > 2 && bytes[0] == '"' && bytes[len(bytes)-1] == '"' {
+ bytes = bytes[1 : len(bytes)-1]
+ }
+ return string(bytes), nil
+}
+
+// NullDecimal represents a nullable decimal with compatibility for
+// scanning null values from the database.
+type NullDecimal struct {
+ Decimal Decimal
+ Valid bool
+}
+
+// Scan implements the sql.Scanner interface for database deserialization.
+func (d *NullDecimal) Scan(value interface{}) error {
+ if value == nil {
+ d.Valid = false
+ return nil
+ }
+ d.Valid = true
+ return d.Decimal.Scan(value)
+}
+
+// Value implements the driver.Valuer interface for database serialization.
+func (d NullDecimal) Value() (driver.Value, error) {
+ if !d.Valid {
+ return nil, nil
+ }
+ return d.Decimal.Value()
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (d *NullDecimal) UnmarshalJSON(decimalBytes []byte) error {
+ if string(decimalBytes) == "null" {
+ d.Valid = false
+ return nil
+ }
+ d.Valid = true
+ return d.Decimal.UnmarshalJSON(decimalBytes)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (d NullDecimal) MarshalJSON() ([]byte, error) {
+ if !d.Valid {
+ return []byte("null"), nil
+ }
+ return d.Decimal.MarshalJSON()
+}
+
+// Trig functions
+
+// Atan returns the arctangent, in radians, of x.
+func (d Decimal) Atan() Decimal {
+ if d.Equal(NewFromFloat(0.0)) {
+ return d
+ }
+ if d.GreaterThan(NewFromFloat(0.0)) {
+ return d.satan()
+ }
+ return d.Neg().satan().Neg()
+}
+
+func (d Decimal) xatan() Decimal {
+ P0 := NewFromFloat(-8.750608600031904122785e-01)
+ P1 := NewFromFloat(-1.615753718733365076637e+01)
+ P2 := NewFromFloat(-7.500855792314704667340e+01)
+ P3 := NewFromFloat(-1.228866684490136173410e+02)
+ P4 := NewFromFloat(-6.485021904942025371773e+01)
+ Q0 := NewFromFloat(2.485846490142306297962e+01)
+ Q1 := NewFromFloat(1.650270098316988542046e+02)
+ Q2 := NewFromFloat(4.328810604912902668951e+02)
+ Q3 := NewFromFloat(4.853903996359136964868e+02)
+ Q4 := NewFromFloat(1.945506571482613964425e+02)
+ z := d.Mul(d)
+ b1 := P0.Mul(z).Add(P1).Mul(z).Add(P2).Mul(z).Add(P3).Mul(z).Add(P4).Mul(z)
+ b2 := z.Add(Q0).Mul(z).Add(Q1).Mul(z).Add(Q2).Mul(z).Add(Q3).Mul(z).Add(Q4)
+ z = b1.Div(b2)
+ z = d.Mul(z).Add(d)
+ return z
+}
+
+// satan reduces its argument (known to be positive)
+// to the range [0, 0.66] and calls xatan.
+func (d Decimal) satan() Decimal {
+ Morebits := NewFromFloat(6.123233995736765886130e-17) // pi/2 = PIO2 + Morebits
+ Tan3pio8 := NewFromFloat(2.41421356237309504880) // tan(3*pi/8)
+ pi := NewFromFloat(3.14159265358979323846264338327950288419716939937510582097494459)
+
+ if d.LessThanOrEqual(NewFromFloat(0.66)) {
+ return d.xatan()
+ }
+ if d.GreaterThan(Tan3pio8) {
+ return pi.Div(NewFromFloat(2.0)).Sub(NewFromFloat(1.0).Div(d).xatan()).Add(Morebits)
+ }
+ return pi.Div(NewFromFloat(4.0)).Add((d.Sub(NewFromFloat(1.0)).Div(d.Add(NewFromFloat(1.0)))).xatan()).Add(NewFromFloat(0.5).Mul(Morebits))
+}
+
+// sin coefficients
+var _sin = [...]Decimal{
+ NewFromFloat(1.58962301576546568060e-10), // 0x3de5d8fd1fd19ccd
+ NewFromFloat(-2.50507477628578072866e-8), // 0xbe5ae5e5a9291f5d
+ NewFromFloat(2.75573136213857245213e-6), // 0x3ec71de3567d48a1
+ NewFromFloat(-1.98412698295895385996e-4), // 0xbf2a01a019bfdf03
+ NewFromFloat(8.33333333332211858878e-3), // 0x3f8111111110f7d0
+ NewFromFloat(-1.66666666666666307295e-1), // 0xbfc5555555555548
+}
+
+// Sin returns the sine of the radian argument x.
+func (d Decimal) Sin() Decimal {
+ PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts
+ PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000,
+ PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170,
+ M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi
+
+ if d.Equal(NewFromFloat(0.0)) {
+ return d
+ }
+ // make argument positive but save the sign
+ sign := false
+ if d.LessThan(NewFromFloat(0.0)) {
+ d = d.Neg()
+ sign = true
+ }
+
+ j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle
+ y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float
+
+ // map zeros to origin
+ if j&1 == 1 {
+ j++
+ y = y.Add(NewFromFloat(1.0))
+ }
+ j &= 7 // octant modulo 2Pi radians (360 degrees)
+ // reflect in x axis
+ if j > 3 {
+ sign = !sign
+ j -= 4
+ }
+ z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic
+ zz := z.Mul(z)
+
+ if j == 1 || j == 2 {
+ w := zz.Mul(zz).Mul(_cos[0].Mul(zz).Add(_cos[1]).Mul(zz).Add(_cos[2]).Mul(zz).Add(_cos[3]).Mul(zz).Add(_cos[4]).Mul(zz).Add(_cos[5]))
+ y = NewFromFloat(1.0).Sub(NewFromFloat(0.5).Mul(zz)).Add(w)
+ } else {
+ y = z.Add(z.Mul(zz).Mul(_sin[0].Mul(zz).Add(_sin[1]).Mul(zz).Add(_sin[2]).Mul(zz).Add(_sin[3]).Mul(zz).Add(_sin[4]).Mul(zz).Add(_sin[5])))
+ }
+ if sign {
+ y = y.Neg()
+ }
+ return y
+}
+
+// cos coefficients
+var _cos = [...]Decimal{
+ NewFromFloat(-1.13585365213876817300e-11), // 0xbda8fa49a0861a9b
+ NewFromFloat(2.08757008419747316778e-9), // 0x3e21ee9d7b4e3f05
+ NewFromFloat(-2.75573141792967388112e-7), // 0xbe927e4f7eac4bc6
+ NewFromFloat(2.48015872888517045348e-5), // 0x3efa01a019c844f5
+ NewFromFloat(-1.38888888888730564116e-3), // 0xbf56c16c16c14f91
+ NewFromFloat(4.16666666666665929218e-2), // 0x3fa555555555554b
+}
+
+// Cos returns the cosine of the radian argument x.
+func (d Decimal) Cos() Decimal {
+
+ PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts
+ PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000,
+ PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170,
+ M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi
+
+ // make argument positive
+ sign := false
+ if d.LessThan(NewFromFloat(0.0)) {
+ d = d.Neg()
+ }
+
+ j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle
+ y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float
+
+ // map zeros to origin
+ if j&1 == 1 {
+ j++
+ y = y.Add(NewFromFloat(1.0))
+ }
+ j &= 7 // octant modulo 2Pi radians (360 degrees)
+ // reflect in x axis
+ if j > 3 {
+ sign = !sign
+ j -= 4
+ }
+ if j > 1 {
+ sign = !sign
+ }
+
+ z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic
+ zz := z.Mul(z)
+
+ if j == 1 || j == 2 {
+ y = z.Add(z.Mul(zz).Mul(_sin[0].Mul(zz).Add(_sin[1]).Mul(zz).Add(_sin[2]).Mul(zz).Add(_sin[3]).Mul(zz).Add(_sin[4]).Mul(zz).Add(_sin[5])))
+ } else {
+ w := zz.Mul(zz).Mul(_cos[0].Mul(zz).Add(_cos[1]).Mul(zz).Add(_cos[2]).Mul(zz).Add(_cos[3]).Mul(zz).Add(_cos[4]).Mul(zz).Add(_cos[5]))
+ y = NewFromFloat(1.0).Sub(NewFromFloat(0.5).Mul(zz)).Add(w)
+ }
+ if sign {
+ y = y.Neg()
+ }
+ return y
+}
+
+var _tanP = [...]Decimal{
+ NewFromFloat(-1.30936939181383777646e+4), // 0xc0c992d8d24f3f38
+ NewFromFloat(1.15351664838587416140e+6), // 0x413199eca5fc9ddd
+ NewFromFloat(-1.79565251976484877988e+7), // 0xc1711fead3299176
+}
+var _tanQ = [...]Decimal{
+ NewFromFloat(1.00000000000000000000e+0),
+ NewFromFloat(1.36812963470692954678e+4), //0x40cab8a5eeb36572
+ NewFromFloat(-1.32089234440210967447e+6), //0xc13427bc582abc96
+ NewFromFloat(2.50083801823357915839e+7), //0x4177d98fc2ead8ef
+ NewFromFloat(-5.38695755929454629881e+7), //0xc189afe03cbe5a31
+}
+
+// Tan returns the tangent of the radian argument x.
+func (d Decimal) Tan() Decimal {
+
+ PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts
+ PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000,
+ PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170,
+ M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi
+
+ if d.Equal(NewFromFloat(0.0)) {
+ return d
+ }
+
+ // make argument positive but save the sign
+ sign := false
+ if d.LessThan(NewFromFloat(0.0)) {
+ d = d.Neg()
+ sign = true
+ }
+
+ j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle
+ y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float
+
+ // map zeros to origin
+ if j&1 == 1 {
+ j++
+ y = y.Add(NewFromFloat(1.0))
+ }
+
+ z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic
+ zz := z.Mul(z)
+
+ if zz.GreaterThan(NewFromFloat(1e-14)) {
+ w := zz.Mul(_tanP[0].Mul(zz).Add(_tanP[1]).Mul(zz).Add(_tanP[2]))
+ x := zz.Add(_tanQ[1]).Mul(zz).Add(_tanQ[2]).Mul(zz).Add(_tanQ[3]).Mul(zz).Add(_tanQ[4])
+ y = z.Add(z.Mul(w.Div(x)))
+ } else {
+ y = z
+ }
+ if j&2 == 2 {
+ y = NewFromFloat(-1.0).Div(y)
+ }
+ if sign {
+ y = y.Neg()
+ }
+ return y
+}
diff --git a/vendor/github.com/shopspring/decimal/rounding.go b/vendor/github.com/shopspring/decimal/rounding.go
new file mode 100644
index 000000000..8008f55cb
--- /dev/null
+++ b/vendor/github.com/shopspring/decimal/rounding.go
@@ -0,0 +1,119 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Multiprecision decimal numbers.
+// For floating-point formatting only; not general purpose.
+// Only operations are assign and (binary) left/right shift.
+// Can do binary floating point in multiprecision decimal precisely
+// because 2 divides 10; cannot do decimal floating point
+// in multiprecision binary precisely.
+
+package decimal
+
+type floatInfo struct {
+ mantbits uint
+ expbits uint
+ bias int
+}
+
+var float32info = floatInfo{23, 8, -127}
+var float64info = floatInfo{52, 11, -1023}
+
+// roundShortest rounds d (= mant * 2^exp) to the shortest number of digits
+// that will let the original floating point value be precisely reconstructed.
+func roundShortest(d *decimal, mant uint64, exp int, flt *floatInfo) {
+ // If mantissa is zero, the number is zero; stop now.
+ if mant == 0 {
+ d.nd = 0
+ return
+ }
+
+ // Compute upper and lower such that any decimal number
+ // between upper and lower (possibly inclusive)
+ // will round to the original floating point number.
+
+ // We may see at once that the number is already shortest.
+ //
+ // Suppose d is not denormal, so that 2^exp <= d < 10^dp.
+ // The closest shorter number is at least 10^(dp-nd) away.
+ // The lower/upper bounds computed below are at distance
+ // at most 2^(exp-mantbits).
+ //
+ // So the number is already shortest if 10^(dp-nd) > 2^(exp-mantbits),
+ // or equivalently log2(10)*(dp-nd) > exp-mantbits.
+ // It is true if 332/100*(dp-nd) >= exp-mantbits (log2(10) > 3.32).
+ minexp := flt.bias + 1 // minimum possible exponent
+ if exp > minexp && 332*(d.dp-d.nd) >= 100*(exp-int(flt.mantbits)) {
+ // The number is already shortest.
+ return
+ }
+
+ // d = mant << (exp - mantbits)
+ // Next highest floating point number is mant+1 << exp-mantbits.
+ // Our upper bound is halfway between, mant*2+1 << exp-mantbits-1.
+ upper := new(decimal)
+ upper.Assign(mant*2 + 1)
+ upper.Shift(exp - int(flt.mantbits) - 1)
+
+ // d = mant << (exp - mantbits)
+ // Next lowest floating point number is mant-1 << exp-mantbits,
+ // unless mant-1 drops the significant bit and exp is not the minimum exp,
+ // in which case the next lowest is mant*2-1 << exp-mantbits-1.
+ // Either way, call it mantlo << explo-mantbits.
+ // Our lower bound is halfway between, mantlo*2+1 << explo-mantbits-1.
+ var mantlo uint64
+ var explo int
+ if mant > 1<.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+// Package cast provides easy and safe casting in Go.
+package cast
+
+import "time"
+
+// ToBool casts an interface to a bool type.
+func ToBool(i interface{}) bool {
+ v, _ := ToBoolE(i)
+ return v
+}
+
+// ToTime casts an interface to a time.Time type.
+func ToTime(i interface{}) time.Time {
+ v, _ := ToTimeE(i)
+ return v
+}
+
+// ToDuration casts an interface to a time.Duration type.
+func ToDuration(i interface{}) time.Duration {
+ v, _ := ToDurationE(i)
+ return v
+}
+
+// ToFloat64 casts an interface to a float64 type.
+func ToFloat64(i interface{}) float64 {
+ v, _ := ToFloat64E(i)
+ return v
+}
+
+// ToFloat32 casts an interface to a float32 type.
+func ToFloat32(i interface{}) float32 {
+ v, _ := ToFloat32E(i)
+ return v
+}
+
+// ToInt64 casts an interface to an int64 type.
+func ToInt64(i interface{}) int64 {
+ v, _ := ToInt64E(i)
+ return v
+}
+
+// ToInt32 casts an interface to an int32 type.
+func ToInt32(i interface{}) int32 {
+ v, _ := ToInt32E(i)
+ return v
+}
+
+// ToInt16 casts an interface to an int16 type.
+func ToInt16(i interface{}) int16 {
+ v, _ := ToInt16E(i)
+ return v
+}
+
+// ToInt8 casts an interface to an int8 type.
+func ToInt8(i interface{}) int8 {
+ v, _ := ToInt8E(i)
+ return v
+}
+
+// ToInt casts an interface to an int type.
+func ToInt(i interface{}) int {
+ v, _ := ToIntE(i)
+ return v
+}
+
+// ToUint casts an interface to a uint type.
+func ToUint(i interface{}) uint {
+ v, _ := ToUintE(i)
+ return v
+}
+
+// ToUint64 casts an interface to a uint64 type.
+func ToUint64(i interface{}) uint64 {
+ v, _ := ToUint64E(i)
+ return v
+}
+
+// ToUint32 casts an interface to a uint32 type.
+func ToUint32(i interface{}) uint32 {
+ v, _ := ToUint32E(i)
+ return v
+}
+
+// ToUint16 casts an interface to a uint16 type.
+func ToUint16(i interface{}) uint16 {
+ v, _ := ToUint16E(i)
+ return v
+}
+
+// ToUint8 casts an interface to a uint8 type.
+func ToUint8(i interface{}) uint8 {
+ v, _ := ToUint8E(i)
+ return v
+}
+
+// ToString casts an interface to a string type.
+func ToString(i interface{}) string {
+ v, _ := ToStringE(i)
+ return v
+}
+
+// ToStringMapString casts an interface to a map[string]string type.
+func ToStringMapString(i interface{}) map[string]string {
+ v, _ := ToStringMapStringE(i)
+ return v
+}
+
+// ToStringMapStringSlice casts an interface to a map[string][]string type.
+func ToStringMapStringSlice(i interface{}) map[string][]string {
+ v, _ := ToStringMapStringSliceE(i)
+ return v
+}
+
+// ToStringMapBool casts an interface to a map[string]bool type.
+func ToStringMapBool(i interface{}) map[string]bool {
+ v, _ := ToStringMapBoolE(i)
+ return v
+}
+
+// ToStringMapInt casts an interface to a map[string]int type.
+func ToStringMapInt(i interface{}) map[string]int {
+ v, _ := ToStringMapIntE(i)
+ return v
+}
+
+// ToStringMapInt64 casts an interface to a map[string]int64 type.
+func ToStringMapInt64(i interface{}) map[string]int64 {
+ v, _ := ToStringMapInt64E(i)
+ return v
+}
+
+// ToStringMap casts an interface to a map[string]interface{} type.
+func ToStringMap(i interface{}) map[string]interface{} {
+ v, _ := ToStringMapE(i)
+ return v
+}
+
+// ToSlice casts an interface to a []interface{} type.
+func ToSlice(i interface{}) []interface{} {
+ v, _ := ToSliceE(i)
+ return v
+}
+
+// ToBoolSlice casts an interface to a []bool type.
+func ToBoolSlice(i interface{}) []bool {
+ v, _ := ToBoolSliceE(i)
+ return v
+}
+
+// ToStringSlice casts an interface to a []string type.
+func ToStringSlice(i interface{}) []string {
+ v, _ := ToStringSliceE(i)
+ return v
+}
+
+// ToIntSlice casts an interface to a []int type.
+func ToIntSlice(i interface{}) []int {
+ v, _ := ToIntSliceE(i)
+ return v
+}
+
+// ToDurationSlice casts an interface to a []time.Duration type.
+func ToDurationSlice(i interface{}) []time.Duration {
+ v, _ := ToDurationSliceE(i)
+ return v
+}
diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go
new file mode 100644
index 000000000..70c7291be
--- /dev/null
+++ b/vendor/github.com/spf13/cast/caste.go
@@ -0,0 +1,1249 @@
+// Copyright © 2014 Steve Francia .
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package cast
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "html/template"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var errNegativeNotAllowed = errors.New("unable to cast negative value")
+
+// ToTimeE casts an interface to a time.Time type.
+func ToTimeE(i interface{}) (tim time.Time, err error) {
+ i = indirect(i)
+
+ switch v := i.(type) {
+ case time.Time:
+ return v, nil
+ case string:
+ return StringToDate(v)
+ case int:
+ return time.Unix(int64(v), 0), nil
+ case int64:
+ return time.Unix(v, 0), nil
+ case int32:
+ return time.Unix(int64(v), 0), nil
+ case uint:
+ return time.Unix(int64(v), 0), nil
+ case uint64:
+ return time.Unix(int64(v), 0), nil
+ case uint32:
+ return time.Unix(int64(v), 0), nil
+ default:
+ return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i)
+ }
+}
+
+// ToDurationE casts an interface to a time.Duration type.
+func ToDurationE(i interface{}) (d time.Duration, err error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case time.Duration:
+ return s, nil
+ case int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8:
+ d = time.Duration(ToInt64(s))
+ return
+ case float32, float64:
+ d = time.Duration(ToFloat64(s))
+ return
+ case string:
+ if strings.ContainsAny(s, "nsuµmh") {
+ d, err = time.ParseDuration(s)
+ } else {
+ d, err = time.ParseDuration(s + "ns")
+ }
+ return
+ default:
+ err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i)
+ return
+ }
+}
+
+// ToBoolE casts an interface to a bool type.
+func ToBoolE(i interface{}) (bool, error) {
+ i = indirect(i)
+
+ switch b := i.(type) {
+ case bool:
+ return b, nil
+ case nil:
+ return false, nil
+ case int:
+ if i.(int) != 0 {
+ return true, nil
+ }
+ return false, nil
+ case string:
+ return strconv.ParseBool(i.(string))
+ default:
+ return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i)
+ }
+}
+
+// ToFloat64E casts an interface to a float64 type.
+func ToFloat64E(i interface{}) (float64, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case float64:
+ return s, nil
+ case float32:
+ return float64(s), nil
+ case int:
+ return float64(s), nil
+ case int64:
+ return float64(s), nil
+ case int32:
+ return float64(s), nil
+ case int16:
+ return float64(s), nil
+ case int8:
+ return float64(s), nil
+ case uint:
+ return float64(s), nil
+ case uint64:
+ return float64(s), nil
+ case uint32:
+ return float64(s), nil
+ case uint16:
+ return float64(s), nil
+ case uint8:
+ return float64(s), nil
+ case string:
+ v, err := strconv.ParseFloat(s, 64)
+ if err == nil {
+ return v, nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
+ }
+}
+
+// ToFloat32E casts an interface to a float32 type.
+func ToFloat32E(i interface{}) (float32, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case float64:
+ return float32(s), nil
+ case float32:
+ return s, nil
+ case int:
+ return float32(s), nil
+ case int64:
+ return float32(s), nil
+ case int32:
+ return float32(s), nil
+ case int16:
+ return float32(s), nil
+ case int8:
+ return float32(s), nil
+ case uint:
+ return float32(s), nil
+ case uint64:
+ return float32(s), nil
+ case uint32:
+ return float32(s), nil
+ case uint16:
+ return float32(s), nil
+ case uint8:
+ return float32(s), nil
+ case string:
+ v, err := strconv.ParseFloat(s, 32)
+ if err == nil {
+ return float32(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
+ }
+}
+
+// ToInt64E casts an interface to an int64 type.
+func ToInt64E(i interface{}) (int64, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case int:
+ return int64(s), nil
+ case int64:
+ return s, nil
+ case int32:
+ return int64(s), nil
+ case int16:
+ return int64(s), nil
+ case int8:
+ return int64(s), nil
+ case uint:
+ return int64(s), nil
+ case uint64:
+ return int64(s), nil
+ case uint32:
+ return int64(s), nil
+ case uint16:
+ return int64(s), nil
+ case uint8:
+ return int64(s), nil
+ case float64:
+ return int64(s), nil
+ case float32:
+ return int64(s), nil
+ case string:
+ v, err := strconv.ParseInt(s, 0, 0)
+ if err == nil {
+ return v, nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i)
+ }
+}
+
+// ToInt32E casts an interface to an int32 type.
+func ToInt32E(i interface{}) (int32, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case int:
+ return int32(s), nil
+ case int64:
+ return int32(s), nil
+ case int32:
+ return s, nil
+ case int16:
+ return int32(s), nil
+ case int8:
+ return int32(s), nil
+ case uint:
+ return int32(s), nil
+ case uint64:
+ return int32(s), nil
+ case uint32:
+ return int32(s), nil
+ case uint16:
+ return int32(s), nil
+ case uint8:
+ return int32(s), nil
+ case float64:
+ return int32(s), nil
+ case float32:
+ return int32(s), nil
+ case string:
+ v, err := strconv.ParseInt(s, 0, 0)
+ if err == nil {
+ return int32(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i)
+ }
+}
+
+// ToInt16E casts an interface to an int16 type.
+func ToInt16E(i interface{}) (int16, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case int:
+ return int16(s), nil
+ case int64:
+ return int16(s), nil
+ case int32:
+ return int16(s), nil
+ case int16:
+ return s, nil
+ case int8:
+ return int16(s), nil
+ case uint:
+ return int16(s), nil
+ case uint64:
+ return int16(s), nil
+ case uint32:
+ return int16(s), nil
+ case uint16:
+ return int16(s), nil
+ case uint8:
+ return int16(s), nil
+ case float64:
+ return int16(s), nil
+ case float32:
+ return int16(s), nil
+ case string:
+ v, err := strconv.ParseInt(s, 0, 0)
+ if err == nil {
+ return int16(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i)
+ }
+}
+
+// ToInt8E casts an interface to an int8 type.
+func ToInt8E(i interface{}) (int8, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case int:
+ return int8(s), nil
+ case int64:
+ return int8(s), nil
+ case int32:
+ return int8(s), nil
+ case int16:
+ return int8(s), nil
+ case int8:
+ return s, nil
+ case uint:
+ return int8(s), nil
+ case uint64:
+ return int8(s), nil
+ case uint32:
+ return int8(s), nil
+ case uint16:
+ return int8(s), nil
+ case uint8:
+ return int8(s), nil
+ case float64:
+ return int8(s), nil
+ case float32:
+ return int8(s), nil
+ case string:
+ v, err := strconv.ParseInt(s, 0, 0)
+ if err == nil {
+ return int8(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i)
+ }
+}
+
+// ToIntE casts an interface to an int type.
+func ToIntE(i interface{}) (int, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case int:
+ return s, nil
+ case int64:
+ return int(s), nil
+ case int32:
+ return int(s), nil
+ case int16:
+ return int(s), nil
+ case int8:
+ return int(s), nil
+ case uint:
+ return int(s), nil
+ case uint64:
+ return int(s), nil
+ case uint32:
+ return int(s), nil
+ case uint16:
+ return int(s), nil
+ case uint8:
+ return int(s), nil
+ case float64:
+ return int(s), nil
+ case float32:
+ return int(s), nil
+ case string:
+ v, err := strconv.ParseInt(s, 0, 0)
+ if err == nil {
+ return int(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i)
+ }
+}
+
+// ToUintE casts an interface to a uint type.
+func ToUintE(i interface{}) (uint, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseUint(s, 0, 0)
+ if err == nil {
+ return uint(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v to uint: %s", i, err)
+ case int:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case uint:
+ return s, nil
+ case uint64:
+ return uint(s), nil
+ case uint32:
+ return uint(s), nil
+ case uint16:
+ return uint(s), nil
+ case uint8:
+ return uint(s), nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i)
+ }
+}
+
+// ToUint64E casts an interface to a uint64 type.
+func ToUint64E(i interface{}) (uint64, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseUint(s, 0, 64)
+ if err == nil {
+ return v, nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v to uint64: %s", i, err)
+ case int:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case uint:
+ return uint64(s), nil
+ case uint64:
+ return s, nil
+ case uint32:
+ return uint64(s), nil
+ case uint16:
+ return uint64(s), nil
+ case uint8:
+ return uint64(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i)
+ }
+}
+
+// ToUint32E casts an interface to a uint32 type.
+func ToUint32E(i interface{}) (uint32, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseUint(s, 0, 32)
+ if err == nil {
+ return uint32(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v to uint32: %s", i, err)
+ case int:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case uint:
+ return uint32(s), nil
+ case uint64:
+ return uint32(s), nil
+ case uint32:
+ return s, nil
+ case uint16:
+ return uint32(s), nil
+ case uint8:
+ return uint32(s), nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i)
+ }
+}
+
+// ToUint16E casts an interface to a uint16 type.
+func ToUint16E(i interface{}) (uint16, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseUint(s, 0, 16)
+ if err == nil {
+ return uint16(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v to uint16: %s", i, err)
+ case int:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case uint:
+ return uint16(s), nil
+ case uint64:
+ return uint16(s), nil
+ case uint32:
+ return uint16(s), nil
+ case uint16:
+ return s, nil
+ case uint8:
+ return uint16(s), nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i)
+ }
+}
+
+// ToUint8E casts an interface to a uint type.
+func ToUint8E(i interface{}) (uint8, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseUint(s, 0, 8)
+ if err == nil {
+ return uint8(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v to uint8: %s", i, err)
+ case int:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case uint:
+ return uint8(s), nil
+ case uint64:
+ return uint8(s), nil
+ case uint32:
+ return uint8(s), nil
+ case uint16:
+ return uint8(s), nil
+ case uint8:
+ return s, nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i)
+ }
+}
+
+// From html/template/content.go
+// Copyright 2011 The Go Authors. All rights reserved.
+// indirect returns the value, after dereferencing as many times
+// as necessary to reach the base type (or nil).
+func indirect(a interface{}) interface{} {
+ if a == nil {
+ return nil
+ }
+ if t := reflect.TypeOf(a); t.Kind() != reflect.Ptr {
+ // Avoid creating a reflect.Value if it's not a pointer.
+ return a
+ }
+ v := reflect.ValueOf(a)
+ for v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v.Interface()
+}
+
+// From html/template/content.go
+// Copyright 2011 The Go Authors. All rights reserved.
+// indirectToStringerOrError returns the value, after dereferencing as many times
+// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer
+// or error,
+func indirectToStringerOrError(a interface{}) interface{} {
+ if a == nil {
+ return nil
+ }
+
+ var errorType = reflect.TypeOf((*error)(nil)).Elem()
+ var fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
+
+ v := reflect.ValueOf(a)
+ for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v.Interface()
+}
+
+// ToStringE casts an interface to a string type.
+func ToStringE(i interface{}) (string, error) {
+ i = indirectToStringerOrError(i)
+
+ switch s := i.(type) {
+ case string:
+ return s, nil
+ case bool:
+ return strconv.FormatBool(s), nil
+ case float64:
+ return strconv.FormatFloat(s, 'f', -1, 64), nil
+ case float32:
+ return strconv.FormatFloat(float64(s), 'f', -1, 32), nil
+ case int:
+ return strconv.Itoa(s), nil
+ case int64:
+ return strconv.FormatInt(s, 10), nil
+ case int32:
+ return strconv.Itoa(int(s)), nil
+ case int16:
+ return strconv.FormatInt(int64(s), 10), nil
+ case int8:
+ return strconv.FormatInt(int64(s), 10), nil
+ case uint:
+ return strconv.FormatUint(uint64(s), 10), nil
+ case uint64:
+ return strconv.FormatUint(uint64(s), 10), nil
+ case uint32:
+ return strconv.FormatUint(uint64(s), 10), nil
+ case uint16:
+ return strconv.FormatUint(uint64(s), 10), nil
+ case uint8:
+ return strconv.FormatUint(uint64(s), 10), nil
+ case []byte:
+ return string(s), nil
+ case template.HTML:
+ return string(s), nil
+ case template.URL:
+ return string(s), nil
+ case template.JS:
+ return string(s), nil
+ case template.CSS:
+ return string(s), nil
+ case template.HTMLAttr:
+ return string(s), nil
+ case nil:
+ return "", nil
+ case fmt.Stringer:
+ return s.String(), nil
+ case error:
+ return s.Error(), nil
+ default:
+ return "", fmt.Errorf("unable to cast %#v of type %T to string", i, i)
+ }
+}
+
+// ToStringMapStringE casts an interface to a map[string]string type.
+func ToStringMapStringE(i interface{}) (map[string]string, error) {
+ var m = map[string]string{}
+
+ switch v := i.(type) {
+ case map[string]string:
+ return v, nil
+ case map[string]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToString(val)
+ }
+ return m, nil
+ case map[interface{}]string:
+ for k, val := range v {
+ m[ToString(k)] = ToString(val)
+ }
+ return m, nil
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToString(val)
+ }
+ return m, nil
+ case string:
+ err := jsonStringToObject(v, &m)
+ return m, err
+ default:
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]string", i, i)
+ }
+}
+
+// ToStringMapStringSliceE casts an interface to a map[string][]string type.
+func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) {
+ var m = map[string][]string{}
+
+ switch v := i.(type) {
+ case map[string][]string:
+ return v, nil
+ case map[string][]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToStringSlice(val)
+ }
+ return m, nil
+ case map[string]string:
+ for k, val := range v {
+ m[ToString(k)] = []string{val}
+ }
+ case map[string]interface{}:
+ for k, val := range v {
+ switch vt := val.(type) {
+ case []interface{}:
+ m[ToString(k)] = ToStringSlice(vt)
+ case []string:
+ m[ToString(k)] = vt
+ default:
+ m[ToString(k)] = []string{ToString(val)}
+ }
+ }
+ return m, nil
+ case map[interface{}][]string:
+ for k, val := range v {
+ m[ToString(k)] = ToStringSlice(val)
+ }
+ return m, nil
+ case map[interface{}]string:
+ for k, val := range v {
+ m[ToString(k)] = ToStringSlice(val)
+ }
+ return m, nil
+ case map[interface{}][]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToStringSlice(val)
+ }
+ return m, nil
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ key, err := ToStringE(k)
+ if err != nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
+ }
+ value, err := ToStringSliceE(val)
+ if err != nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
+ }
+ m[key] = value
+ }
+ case string:
+ err := jsonStringToObject(v, &m)
+ return m, err
+ default:
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
+ }
+ return m, nil
+}
+
+// ToStringMapBoolE casts an interface to a map[string]bool type.
+func ToStringMapBoolE(i interface{}) (map[string]bool, error) {
+ var m = map[string]bool{}
+
+ switch v := i.(type) {
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToBool(val)
+ }
+ return m, nil
+ case map[string]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToBool(val)
+ }
+ return m, nil
+ case map[string]bool:
+ return v, nil
+ case string:
+ err := jsonStringToObject(v, &m)
+ return m, err
+ default:
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]bool", i, i)
+ }
+}
+
+// ToStringMapE casts an interface to a map[string]interface{} type.
+func ToStringMapE(i interface{}) (map[string]interface{}, error) {
+ var m = map[string]interface{}{}
+
+ switch v := i.(type) {
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = val
+ }
+ return m, nil
+ case map[string]interface{}:
+ return v, nil
+ case string:
+ err := jsonStringToObject(v, &m)
+ return m, err
+ default:
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]interface{}", i, i)
+ }
+}
+
+// ToStringMapIntE casts an interface to a map[string]int{} type.
+func ToStringMapIntE(i interface{}) (map[string]int, error) {
+ var m = map[string]int{}
+ if i == nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i)
+ }
+
+ switch v := i.(type) {
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToInt(val)
+ }
+ return m, nil
+ case map[string]interface{}:
+ for k, val := range v {
+ m[k] = ToInt(val)
+ }
+ return m, nil
+ case map[string]int:
+ return v, nil
+ case string:
+ err := jsonStringToObject(v, &m)
+ return m, err
+ }
+
+ if reflect.TypeOf(i).Kind() != reflect.Map {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i)
+ }
+
+ mVal := reflect.ValueOf(m)
+ v := reflect.ValueOf(i)
+ for _, keyVal := range v.MapKeys() {
+ val, err := ToIntE(v.MapIndex(keyVal).Interface())
+ if err != nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int", i, i)
+ }
+ mVal.SetMapIndex(keyVal, reflect.ValueOf(val))
+ }
+ return m, nil
+}
+
+// ToStringMapInt64E casts an interface to a map[string]int64{} type.
+func ToStringMapInt64E(i interface{}) (map[string]int64, error) {
+ var m = map[string]int64{}
+ if i == nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i)
+ }
+
+ switch v := i.(type) {
+ case map[interface{}]interface{}:
+ for k, val := range v {
+ m[ToString(k)] = ToInt64(val)
+ }
+ return m, nil
+ case map[string]interface{}:
+ for k, val := range v {
+ m[k] = ToInt64(val)
+ }
+ return m, nil
+ case map[string]int64:
+ return v, nil
+ case string:
+ err := jsonStringToObject(v, &m)
+ return m, err
+ }
+
+ if reflect.TypeOf(i).Kind() != reflect.Map {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i)
+ }
+ mVal := reflect.ValueOf(m)
+ v := reflect.ValueOf(i)
+ for _, keyVal := range v.MapKeys() {
+ val, err := ToInt64E(v.MapIndex(keyVal).Interface())
+ if err != nil {
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]int64", i, i)
+ }
+ mVal.SetMapIndex(keyVal, reflect.ValueOf(val))
+ }
+ return m, nil
+}
+
+// ToSliceE casts an interface to a []interface{} type.
+func ToSliceE(i interface{}) ([]interface{}, error) {
+ var s []interface{}
+
+ switch v := i.(type) {
+ case []interface{}:
+ return append(s, v...), nil
+ case []map[string]interface{}:
+ for _, u := range v {
+ s = append(s, u)
+ }
+ return s, nil
+ default:
+ return s, fmt.Errorf("unable to cast %#v of type %T to []interface{}", i, i)
+ }
+}
+
+// ToBoolSliceE casts an interface to a []bool type.
+func ToBoolSliceE(i interface{}) ([]bool, error) {
+ if i == nil {
+ return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
+ }
+
+ switch v := i.(type) {
+ case []bool:
+ return v, nil
+ }
+
+ kind := reflect.TypeOf(i).Kind()
+ switch kind {
+ case reflect.Slice, reflect.Array:
+ s := reflect.ValueOf(i)
+ a := make([]bool, s.Len())
+ for j := 0; j < s.Len(); j++ {
+ val, err := ToBoolE(s.Index(j).Interface())
+ if err != nil {
+ return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
+ }
+ a[j] = val
+ }
+ return a, nil
+ default:
+ return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
+ }
+}
+
+// ToStringSliceE casts an interface to a []string type.
+func ToStringSliceE(i interface{}) ([]string, error) {
+ var a []string
+
+ switch v := i.(type) {
+ case []interface{}:
+ for _, u := range v {
+ a = append(a, ToString(u))
+ }
+ return a, nil
+ case []string:
+ return v, nil
+ case string:
+ return strings.Fields(v), nil
+ case interface{}:
+ str, err := ToStringE(v)
+ if err != nil {
+ return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i)
+ }
+ return []string{str}, nil
+ default:
+ return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i)
+ }
+}
+
+// ToIntSliceE casts an interface to a []int type.
+func ToIntSliceE(i interface{}) ([]int, error) {
+ if i == nil {
+ return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
+ }
+
+ switch v := i.(type) {
+ case []int:
+ return v, nil
+ }
+
+ kind := reflect.TypeOf(i).Kind()
+ switch kind {
+ case reflect.Slice, reflect.Array:
+ s := reflect.ValueOf(i)
+ a := make([]int, s.Len())
+ for j := 0; j < s.Len(); j++ {
+ val, err := ToIntE(s.Index(j).Interface())
+ if err != nil {
+ return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
+ }
+ a[j] = val
+ }
+ return a, nil
+ default:
+ return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
+ }
+}
+
+// ToDurationSliceE casts an interface to a []time.Duration type.
+func ToDurationSliceE(i interface{}) ([]time.Duration, error) {
+ if i == nil {
+ return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i)
+ }
+
+ switch v := i.(type) {
+ case []time.Duration:
+ return v, nil
+ }
+
+ kind := reflect.TypeOf(i).Kind()
+ switch kind {
+ case reflect.Slice, reflect.Array:
+ s := reflect.ValueOf(i)
+ a := make([]time.Duration, s.Len())
+ for j := 0; j < s.Len(); j++ {
+ val, err := ToDurationE(s.Index(j).Interface())
+ if err != nil {
+ return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i)
+ }
+ a[j] = val
+ }
+ return a, nil
+ default:
+ return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i)
+ }
+}
+
+// StringToDate attempts to parse a string into a time.Time type using a
+// predefined list of formats. If no suitable format is found, an error is
+// returned.
+func StringToDate(s string) (time.Time, error) {
+ return parseDateWith(s, []string{
+ time.RFC3339,
+ "2006-01-02T15:04:05", // iso8601 without timezone
+ time.RFC1123Z,
+ time.RFC1123,
+ time.RFC822Z,
+ time.RFC822,
+ time.RFC850,
+ time.ANSIC,
+ time.UnixDate,
+ time.RubyDate,
+ "2006-01-02 15:04:05.999999999 -0700 MST", // Time.String()
+ "2006-01-02",
+ "02 Jan 2006",
+ "2006-01-02T15:04:05-0700", // RFC3339 without timezone hh:mm colon
+ "2006-01-02 15:04:05 -07:00",
+ "2006-01-02 15:04:05 -0700",
+ "2006-01-02 15:04:05Z07:00", // RFC3339 without T
+ "2006-01-02 15:04:05Z0700", // RFC3339 without T or timezone hh:mm colon
+ "2006-01-02 15:04:05",
+ time.Kitchen,
+ time.Stamp,
+ time.StampMilli,
+ time.StampMicro,
+ time.StampNano,
+ })
+}
+
+func parseDateWith(s string, dates []string) (d time.Time, e error) {
+ for _, dateType := range dates {
+ if d, e = time.Parse(dateType, s); e == nil {
+ return
+ }
+ }
+ return d, fmt.Errorf("unable to parse date: %s", s)
+}
+
+// jsonStringToObject attempts to unmarshall a string as JSON into
+// the object passed as pointer.
+func jsonStringToObject(s string, v interface{}) error {
+ data := []byte(s)
+ return json.Unmarshal(data, v)
+}
diff --git a/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/golang.org/x/crypto/bcrypt/base64.go
new file mode 100644
index 000000000..fc3116090
--- /dev/null
+++ b/vendor/golang.org/x/crypto/bcrypt/base64.go
@@ -0,0 +1,35 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bcrypt
+
+import "encoding/base64"
+
+const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
+
+var bcEncoding = base64.NewEncoding(alphabet)
+
+func base64Encode(src []byte) []byte {
+ n := bcEncoding.EncodedLen(len(src))
+ dst := make([]byte, n)
+ bcEncoding.Encode(dst, src)
+ for dst[n-1] == '=' {
+ n--
+ }
+ return dst[:n]
+}
+
+func base64Decode(src []byte) ([]byte, error) {
+ numOfEquals := 4 - (len(src) % 4)
+ for i := 0; i < numOfEquals; i++ {
+ src = append(src, '=')
+ }
+
+ dst := make([]byte, bcEncoding.DecodedLen(len(src)))
+ n, err := bcEncoding.Decode(dst, src)
+ if err != nil {
+ return nil, err
+ }
+ return dst[:n], nil
+}
diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go
new file mode 100644
index 000000000..5577c0f93
--- /dev/null
+++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go
@@ -0,0 +1,304 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing
+// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf
+package bcrypt // import "golang.org/x/crypto/bcrypt"
+
+// The code is a port of Provos and Mazières's C implementation.
+import (
+ "crypto/rand"
+ "crypto/subtle"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+
+ "golang.org/x/crypto/blowfish"
+)
+
+const (
+ MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword
+ MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword
+ DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword
+)
+
+// The error returned from CompareHashAndPassword when a password and hash do
+// not match.
+var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password")
+
+// The error returned from CompareHashAndPassword when a hash is too short to
+// be a bcrypt hash.
+var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password")
+
+// The error returned from CompareHashAndPassword when a hash was created with
+// a bcrypt algorithm newer than this implementation.
+type HashVersionTooNewError byte
+
+func (hv HashVersionTooNewError) Error() string {
+ return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion)
+}
+
+// The error returned from CompareHashAndPassword when a hash starts with something other than '$'
+type InvalidHashPrefixError byte
+
+func (ih InvalidHashPrefixError) Error() string {
+ return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih))
+}
+
+type InvalidCostError int
+
+func (ic InvalidCostError) Error() string {
+ return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), MinCost, MaxCost)
+}
+
+const (
+ majorVersion = '2'
+ minorVersion = 'a'
+ maxSaltSize = 16
+ maxCryptedHashSize = 23
+ encodedSaltSize = 22
+ encodedHashSize = 31
+ minHashSize = 59
+)
+
+// magicCipherData is an IV for the 64 Blowfish encryption calls in
+// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes.
+var magicCipherData = []byte{
+ 0x4f, 0x72, 0x70, 0x68,
+ 0x65, 0x61, 0x6e, 0x42,
+ 0x65, 0x68, 0x6f, 0x6c,
+ 0x64, 0x65, 0x72, 0x53,
+ 0x63, 0x72, 0x79, 0x44,
+ 0x6f, 0x75, 0x62, 0x74,
+}
+
+type hashed struct {
+ hash []byte
+ salt []byte
+ cost int // allowed range is MinCost to MaxCost
+ major byte
+ minor byte
+}
+
+// ErrPasswordTooLong is returned when the password passed to
+// GenerateFromPassword is too long (i.e. > 72 bytes).
+var ErrPasswordTooLong = errors.New("bcrypt: password length exceeds 72 bytes")
+
+// GenerateFromPassword returns the bcrypt hash of the password at the given
+// cost. If the cost given is less than MinCost, the cost will be set to
+// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package,
+// to compare the returned hashed password with its cleartext version.
+// GenerateFromPassword does not accept passwords longer than 72 bytes, which
+// is the longest password bcrypt will operate on.
+func GenerateFromPassword(password []byte, cost int) ([]byte, error) {
+ if len(password) > 72 {
+ return nil, ErrPasswordTooLong
+ }
+ p, err := newFromPassword(password, cost)
+ if err != nil {
+ return nil, err
+ }
+ return p.Hash(), nil
+}
+
+// CompareHashAndPassword compares a bcrypt hashed password with its possible
+// plaintext equivalent. Returns nil on success, or an error on failure.
+func CompareHashAndPassword(hashedPassword, password []byte) error {
+ p, err := newFromHash(hashedPassword)
+ if err != nil {
+ return err
+ }
+
+ otherHash, err := bcrypt(password, p.cost, p.salt)
+ if err != nil {
+ return err
+ }
+
+ otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor}
+ if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 {
+ return nil
+ }
+
+ return ErrMismatchedHashAndPassword
+}
+
+// Cost returns the hashing cost used to create the given hashed
+// password. When, in the future, the hashing cost of a password system needs
+// to be increased in order to adjust for greater computational power, this
+// function allows one to establish which passwords need to be updated.
+func Cost(hashedPassword []byte) (int, error) {
+ p, err := newFromHash(hashedPassword)
+ if err != nil {
+ return 0, err
+ }
+ return p.cost, nil
+}
+
+func newFromPassword(password []byte, cost int) (*hashed, error) {
+ if cost < MinCost {
+ cost = DefaultCost
+ }
+ p := new(hashed)
+ p.major = majorVersion
+ p.minor = minorVersion
+
+ err := checkCost(cost)
+ if err != nil {
+ return nil, err
+ }
+ p.cost = cost
+
+ unencodedSalt := make([]byte, maxSaltSize)
+ _, err = io.ReadFull(rand.Reader, unencodedSalt)
+ if err != nil {
+ return nil, err
+ }
+
+ p.salt = base64Encode(unencodedSalt)
+ hash, err := bcrypt(password, p.cost, p.salt)
+ if err != nil {
+ return nil, err
+ }
+ p.hash = hash
+ return p, err
+}
+
+func newFromHash(hashedSecret []byte) (*hashed, error) {
+ if len(hashedSecret) < minHashSize {
+ return nil, ErrHashTooShort
+ }
+ p := new(hashed)
+ n, err := p.decodeVersion(hashedSecret)
+ if err != nil {
+ return nil, err
+ }
+ hashedSecret = hashedSecret[n:]
+ n, err = p.decodeCost(hashedSecret)
+ if err != nil {
+ return nil, err
+ }
+ hashedSecret = hashedSecret[n:]
+
+ // The "+2" is here because we'll have to append at most 2 '=' to the salt
+ // when base64 decoding it in expensiveBlowfishSetup().
+ p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2)
+ copy(p.salt, hashedSecret[:encodedSaltSize])
+
+ hashedSecret = hashedSecret[encodedSaltSize:]
+ p.hash = make([]byte, len(hashedSecret))
+ copy(p.hash, hashedSecret)
+
+ return p, nil
+}
+
+func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) {
+ cipherData := make([]byte, len(magicCipherData))
+ copy(cipherData, magicCipherData)
+
+ c, err := expensiveBlowfishSetup(password, uint32(cost), salt)
+ if err != nil {
+ return nil, err
+ }
+
+ for i := 0; i < 24; i += 8 {
+ for j := 0; j < 64; j++ {
+ c.Encrypt(cipherData[i:i+8], cipherData[i:i+8])
+ }
+ }
+
+ // Bug compatibility with C bcrypt implementations. We only encode 23 of
+ // the 24 bytes encrypted.
+ hsh := base64Encode(cipherData[:maxCryptedHashSize])
+ return hsh, nil
+}
+
+func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) {
+ csalt, err := base64Decode(salt)
+ if err != nil {
+ return nil, err
+ }
+
+ // Bug compatibility with C bcrypt implementations. They use the trailing
+ // NULL in the key string during expansion.
+ // We copy the key to prevent changing the underlying array.
+ ckey := append(key[:len(key):len(key)], 0)
+
+ c, err := blowfish.NewSaltedCipher(ckey, csalt)
+ if err != nil {
+ return nil, err
+ }
+
+ var i, rounds uint64
+ rounds = 1 << cost
+ for i = 0; i < rounds; i++ {
+ blowfish.ExpandKey(ckey, c)
+ blowfish.ExpandKey(csalt, c)
+ }
+
+ return c, nil
+}
+
+func (p *hashed) Hash() []byte {
+ arr := make([]byte, 60)
+ arr[0] = '$'
+ arr[1] = p.major
+ n := 2
+ if p.minor != 0 {
+ arr[2] = p.minor
+ n = 3
+ }
+ arr[n] = '$'
+ n++
+ copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost)))
+ n += 2
+ arr[n] = '$'
+ n++
+ copy(arr[n:], p.salt)
+ n += encodedSaltSize
+ copy(arr[n:], p.hash)
+ n += encodedHashSize
+ return arr[:n]
+}
+
+func (p *hashed) decodeVersion(sbytes []byte) (int, error) {
+ if sbytes[0] != '$' {
+ return -1, InvalidHashPrefixError(sbytes[0])
+ }
+ if sbytes[1] > majorVersion {
+ return -1, HashVersionTooNewError(sbytes[1])
+ }
+ p.major = sbytes[1]
+ n := 3
+ if sbytes[2] != '$' {
+ p.minor = sbytes[2]
+ n++
+ }
+ return n, nil
+}
+
+// sbytes should begin where decodeVersion left off.
+func (p *hashed) decodeCost(sbytes []byte) (int, error) {
+ cost, err := strconv.Atoi(string(sbytes[0:2]))
+ if err != nil {
+ return -1, err
+ }
+ err = checkCost(cost)
+ if err != nil {
+ return -1, err
+ }
+ p.cost = cost
+ return 3, nil
+}
+
+func (p *hashed) String() string {
+ return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor)
+}
+
+func checkCost(cost int) error {
+ if cost < MinCost || cost > MaxCost {
+ return InvalidCostError(cost)
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
new file mode 100644
index 000000000..904b57e01
--- /dev/null
+++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
@@ -0,0 +1,77 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC
+2898 / PKCS #5 v2.0.
+
+A key derivation function is useful when encrypting data based on a password
+or any other not-fully-random data. It uses a pseudorandom function to derive
+a secure encryption key based on the password.
+
+While v2.0 of the standard defines only one pseudorandom function to use,
+HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved
+Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To
+choose, you can pass the `New` functions from the different SHA packages to
+pbkdf2.Key.
+*/
+package pbkdf2 // import "golang.org/x/crypto/pbkdf2"
+
+import (
+ "crypto/hmac"
+ "hash"
+)
+
+// Key derives a key from the password, salt and iteration count, returning a
+// []byte of length keylen that can be used as cryptographic key. The key is
+// derived based on the method described as PBKDF2 with the HMAC variant using
+// the supplied hash function.
+//
+// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you
+// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by
+// doing:
+//
+// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New)
+//
+// Remember to get a good random salt. At least 8 bytes is recommended by the
+// RFC.
+//
+// Using a higher iteration count will increase the cost of an exhaustive
+// search but will also make derivation proportionally slower.
+func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte {
+ prf := hmac.New(h, password)
+ hashLen := prf.Size()
+ numBlocks := (keyLen + hashLen - 1) / hashLen
+
+ var buf [4]byte
+ dk := make([]byte, 0, numBlocks*hashLen)
+ U := make([]byte, hashLen)
+ for block := 1; block <= numBlocks; block++ {
+ // N.B.: || means concatenation, ^ means XOR
+ // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter
+ // U_1 = PRF(password, salt || uint(i))
+ prf.Reset()
+ prf.Write(salt)
+ buf[0] = byte(block >> 24)
+ buf[1] = byte(block >> 16)
+ buf[2] = byte(block >> 8)
+ buf[3] = byte(block)
+ prf.Write(buf[:4])
+ dk = prf.Sum(dk)
+ T := dk[len(dk)-hashLen:]
+ copy(U, T)
+
+ // U_n = PRF(password, U_(n-1))
+ for n := 2; n <= iter; n++ {
+ prf.Reset()
+ prf.Write(U)
+ U = U[:0]
+ U = prf.Sum(U)
+ for x := range U {
+ T[x] ^= U[x]
+ }
+ }
+ }
+ return dk[:keyLen]
+}
diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go
new file mode 100644
index 000000000..c971a99fa
--- /dev/null
+++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go
@@ -0,0 +1,212 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package scrypt implements the scrypt key derivation function as defined in
+// Colin Percival's paper "Stronger Key Derivation via Sequential Memory-Hard
+// Functions" (https://www.tarsnap.com/scrypt/scrypt.pdf).
+package scrypt // import "golang.org/x/crypto/scrypt"
+
+import (
+ "crypto/sha256"
+ "encoding/binary"
+ "errors"
+ "math/bits"
+
+ "golang.org/x/crypto/pbkdf2"
+)
+
+const maxInt = int(^uint(0) >> 1)
+
+// blockCopy copies n numbers from src into dst.
+func blockCopy(dst, src []uint32, n int) {
+ copy(dst, src[:n])
+}
+
+// blockXOR XORs numbers from dst with n numbers from src.
+func blockXOR(dst, src []uint32, n int) {
+ for i, v := range src[:n] {
+ dst[i] ^= v
+ }
+}
+
+// salsaXOR applies Salsa20/8 to the XOR of 16 numbers from tmp and in,
+// and puts the result into both tmp and out.
+func salsaXOR(tmp *[16]uint32, in, out []uint32) {
+ w0 := tmp[0] ^ in[0]
+ w1 := tmp[1] ^ in[1]
+ w2 := tmp[2] ^ in[2]
+ w3 := tmp[3] ^ in[3]
+ w4 := tmp[4] ^ in[4]
+ w5 := tmp[5] ^ in[5]
+ w6 := tmp[6] ^ in[6]
+ w7 := tmp[7] ^ in[7]
+ w8 := tmp[8] ^ in[8]
+ w9 := tmp[9] ^ in[9]
+ w10 := tmp[10] ^ in[10]
+ w11 := tmp[11] ^ in[11]
+ w12 := tmp[12] ^ in[12]
+ w13 := tmp[13] ^ in[13]
+ w14 := tmp[14] ^ in[14]
+ w15 := tmp[15] ^ in[15]
+
+ x0, x1, x2, x3, x4, x5, x6, x7, x8 := w0, w1, w2, w3, w4, w5, w6, w7, w8
+ x9, x10, x11, x12, x13, x14, x15 := w9, w10, w11, w12, w13, w14, w15
+
+ for i := 0; i < 8; i += 2 {
+ x4 ^= bits.RotateLeft32(x0+x12, 7)
+ x8 ^= bits.RotateLeft32(x4+x0, 9)
+ x12 ^= bits.RotateLeft32(x8+x4, 13)
+ x0 ^= bits.RotateLeft32(x12+x8, 18)
+
+ x9 ^= bits.RotateLeft32(x5+x1, 7)
+ x13 ^= bits.RotateLeft32(x9+x5, 9)
+ x1 ^= bits.RotateLeft32(x13+x9, 13)
+ x5 ^= bits.RotateLeft32(x1+x13, 18)
+
+ x14 ^= bits.RotateLeft32(x10+x6, 7)
+ x2 ^= bits.RotateLeft32(x14+x10, 9)
+ x6 ^= bits.RotateLeft32(x2+x14, 13)
+ x10 ^= bits.RotateLeft32(x6+x2, 18)
+
+ x3 ^= bits.RotateLeft32(x15+x11, 7)
+ x7 ^= bits.RotateLeft32(x3+x15, 9)
+ x11 ^= bits.RotateLeft32(x7+x3, 13)
+ x15 ^= bits.RotateLeft32(x11+x7, 18)
+
+ x1 ^= bits.RotateLeft32(x0+x3, 7)
+ x2 ^= bits.RotateLeft32(x1+x0, 9)
+ x3 ^= bits.RotateLeft32(x2+x1, 13)
+ x0 ^= bits.RotateLeft32(x3+x2, 18)
+
+ x6 ^= bits.RotateLeft32(x5+x4, 7)
+ x7 ^= bits.RotateLeft32(x6+x5, 9)
+ x4 ^= bits.RotateLeft32(x7+x6, 13)
+ x5 ^= bits.RotateLeft32(x4+x7, 18)
+
+ x11 ^= bits.RotateLeft32(x10+x9, 7)
+ x8 ^= bits.RotateLeft32(x11+x10, 9)
+ x9 ^= bits.RotateLeft32(x8+x11, 13)
+ x10 ^= bits.RotateLeft32(x9+x8, 18)
+
+ x12 ^= bits.RotateLeft32(x15+x14, 7)
+ x13 ^= bits.RotateLeft32(x12+x15, 9)
+ x14 ^= bits.RotateLeft32(x13+x12, 13)
+ x15 ^= bits.RotateLeft32(x14+x13, 18)
+ }
+ x0 += w0
+ x1 += w1
+ x2 += w2
+ x3 += w3
+ x4 += w4
+ x5 += w5
+ x6 += w6
+ x7 += w7
+ x8 += w8
+ x9 += w9
+ x10 += w10
+ x11 += w11
+ x12 += w12
+ x13 += w13
+ x14 += w14
+ x15 += w15
+
+ out[0], tmp[0] = x0, x0
+ out[1], tmp[1] = x1, x1
+ out[2], tmp[2] = x2, x2
+ out[3], tmp[3] = x3, x3
+ out[4], tmp[4] = x4, x4
+ out[5], tmp[5] = x5, x5
+ out[6], tmp[6] = x6, x6
+ out[7], tmp[7] = x7, x7
+ out[8], tmp[8] = x8, x8
+ out[9], tmp[9] = x9, x9
+ out[10], tmp[10] = x10, x10
+ out[11], tmp[11] = x11, x11
+ out[12], tmp[12] = x12, x12
+ out[13], tmp[13] = x13, x13
+ out[14], tmp[14] = x14, x14
+ out[15], tmp[15] = x15, x15
+}
+
+func blockMix(tmp *[16]uint32, in, out []uint32, r int) {
+ blockCopy(tmp[:], in[(2*r-1)*16:], 16)
+ for i := 0; i < 2*r; i += 2 {
+ salsaXOR(tmp, in[i*16:], out[i*8:])
+ salsaXOR(tmp, in[i*16+16:], out[i*8+r*16:])
+ }
+}
+
+func integer(b []uint32, r int) uint64 {
+ j := (2*r - 1) * 16
+ return uint64(b[j]) | uint64(b[j+1])<<32
+}
+
+func smix(b []byte, r, N int, v, xy []uint32) {
+ var tmp [16]uint32
+ R := 32 * r
+ x := xy
+ y := xy[R:]
+
+ j := 0
+ for i := 0; i < R; i++ {
+ x[i] = binary.LittleEndian.Uint32(b[j:])
+ j += 4
+ }
+ for i := 0; i < N; i += 2 {
+ blockCopy(v[i*R:], x, R)
+ blockMix(&tmp, x, y, r)
+
+ blockCopy(v[(i+1)*R:], y, R)
+ blockMix(&tmp, y, x, r)
+ }
+ for i := 0; i < N; i += 2 {
+ j := int(integer(x, r) & uint64(N-1))
+ blockXOR(x, v[j*R:], R)
+ blockMix(&tmp, x, y, r)
+
+ j = int(integer(y, r) & uint64(N-1))
+ blockXOR(y, v[j*R:], R)
+ blockMix(&tmp, y, x, r)
+ }
+ j = 0
+ for _, v := range x[:R] {
+ binary.LittleEndian.PutUint32(b[j:], v)
+ j += 4
+ }
+}
+
+// Key derives a key from the password, salt, and cost parameters, returning
+// a byte slice of length keyLen that can be used as cryptographic key.
+//
+// N is a CPU/memory cost parameter, which must be a power of two greater than 1.
+// r and p must satisfy r * p < 2³⁰. If the parameters do not satisfy the
+// limits, the function returns a nil byte slice and an error.
+//
+// For example, you can get a derived key for e.g. AES-256 (which needs a
+// 32-byte key) by doing:
+//
+// dk, err := scrypt.Key([]byte("some password"), salt, 32768, 8, 1, 32)
+//
+// The recommended parameters for interactive logins as of 2017 are N=32768, r=8
+// and p=1. The parameters N, r, and p should be increased as memory latency and
+// CPU parallelism increases; consider setting N to the highest power of 2 you
+// can derive within 100 milliseconds. Remember to get a good random salt.
+func Key(password, salt []byte, N, r, p, keyLen int) ([]byte, error) {
+ if N <= 1 || N&(N-1) != 0 {
+ return nil, errors.New("scrypt: N must be > 1 and a power of 2")
+ }
+ if uint64(r)*uint64(p) >= 1<<30 || r > maxInt/128/p || r > maxInt/256 || N > maxInt/128/r {
+ return nil, errors.New("scrypt: parameters are too large")
+ }
+
+ xy := make([]uint32, 64*r)
+ v := make([]uint32, 32*N*r)
+ b := pbkdf2.Key(password, salt, 1, p*128*r, sha256.New)
+
+ for i := 0; i < p; i++ {
+ smix(b[i*128*r:], r, N, v, xy)
+ }
+
+ return pbkdf2.Key(password, b, 1, keyLen, sha256.New), nil
+}
diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go
index fd45fe529..3a5e776f8 100644
--- a/vendor/golang.org/x/sys/unix/mremap.go
+++ b/vendor/golang.org/x/sys/unix/mremap.go
@@ -50,3 +50,8 @@ func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data [
func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) {
return mapper.Mremap(oldData, newLength, flags)
}
+
+func MremapPtr(oldAddr unsafe.Pointer, oldSize uintptr, newAddr unsafe.Pointer, newSize uintptr, flags int) (ret unsafe.Pointer, err error) {
+ xaddr, err := mapper.mremap(uintptr(oldAddr), oldSize, newSize, flags, uintptr(newAddr))
+ return unsafe.Pointer(xaddr), err
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go
index 59542a897..4cc7b0059 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -542,6 +542,18 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) {
}
}
+//sys pthread_chdir_np(path string) (err error)
+
+func PthreadChdir(path string) (err error) {
+ return pthread_chdir_np(path)
+}
+
+//sys pthread_fchdir_np(fd int) (err error)
+
+func PthreadFchdir(fd int) (err error) {
+ return pthread_fchdir_np(fd)
+}
+
//sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error)
//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go
index 77081de8c..4e92e5aa4 100644
--- a/vendor/golang.org/x/sys/unix/syscall_unix.go
+++ b/vendor/golang.org/x/sys/unix/syscall_unix.go
@@ -154,6 +154,15 @@ func Munmap(b []byte) (err error) {
return mapper.Munmap(b)
}
+func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) {
+ xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset)
+ return unsafe.Pointer(xaddr), err
+}
+
+func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) {
+ return mapper.munmap(uintptr(addr), length)
+}
+
func Read(fd int, p []byte) (n int, err error) {
n, err = read(fd, p)
if raceenabled {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
index ccb02f240..07642c308 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
@@ -760,6 +760,39 @@ var libc_sysctl_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func pthread_chdir_np(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_pthread_chdir_np_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pthread_fchdir_np(fd int) (err error) {
+ _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_pthread_fchdir_np_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) {
_, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags))
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
index 8b8bb2840..923e08cb7 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
@@ -228,6 +228,16 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8
DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
+TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_pthread_chdir_np(SB)
+GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8
+DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB)
+
+TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_pthread_fchdir_np(SB)
+GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8
+DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB)
+
TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendfile(SB)
GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
index 1b40b997b..7d73dda64 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
@@ -760,6 +760,39 @@ var libc_sysctl_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func pthread_chdir_np(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_pthread_chdir_np_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pthread_fchdir_np(fd int) (err error) {
+ _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_pthread_fchdir_np_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) {
_, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags))
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
index 08362c1ab..057700111 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
@@ -228,6 +228,16 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8
DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
+TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_pthread_chdir_np(SB)
+GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8
+DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB)
+
+TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_pthread_fchdir_np(SB)
+GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8
+DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB)
+
TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendfile(SB)
GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go
index 6f7d2ac70..97651b5bd 100644
--- a/vendor/golang.org/x/sys/windows/security_windows.go
+++ b/vendor/golang.org/x/sys/windows/security_windows.go
@@ -894,7 +894,7 @@ type ACL struct {
aclRevision byte
sbz1 byte
aclSize uint16
- aceCount uint16
+ AceCount uint16
sbz2 uint16
}
@@ -1087,6 +1087,27 @@ type EXPLICIT_ACCESS struct {
Trustee TRUSTEE
}
+// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header
+type ACE_HEADER struct {
+ AceType uint8
+ AceFlags uint8
+ AceSize uint16
+}
+
+// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-access_allowed_ace
+type ACCESS_ALLOWED_ACE struct {
+ Header ACE_HEADER
+ Mask ACCESS_MASK
+ SidStart uint32
+}
+
+const (
+ // Constants for AceType
+ // https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header
+ ACCESS_ALLOWED_ACE_TYPE = 0
+ ACCESS_DENIED_ACE_TYPE = 1
+)
+
// This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions.
type TrusteeValue uintptr
@@ -1158,6 +1179,7 @@ type OBJECTS_AND_NAME struct {
//sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD
//sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW
+//sys GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) = advapi32.GetAce
// Control returns the security descriptor control bits.
func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) {
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index 9f73df75b..eba761018 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -91,6 +91,7 @@ var (
procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW")
procEqualSid = modadvapi32.NewProc("EqualSid")
procFreeSid = modadvapi32.NewProc("FreeSid")
+ procGetAce = modadvapi32.NewProc("GetAce")
procGetLengthSid = modadvapi32.NewProc("GetLengthSid")
procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW")
procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl")
@@ -1224,6 +1225,14 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE
return
}
+func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) {
+ r0, _, _ := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce)))
+ if r0 == 0 {
+ ret = GetLastError()
+ }
+ return
+}
+
func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) {
r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor)))
if r1 == 0 {
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 5af807025..56c008a69 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,6 +1,15 @@
# dario.cat/mergo v1.0.0
## explicit; go 1.13
dario.cat/mergo
+# github.com/Masterminds/goutils v1.1.1
+## explicit
+github.com/Masterminds/goutils
+# github.com/Masterminds/semver/v3 v3.2.0
+## explicit; go 1.18
+github.com/Masterminds/semver/v3
+# github.com/Masterminds/sprig/v3 v3.2.3
+## explicit; go 1.13
+github.com/Masterminds/sprig/v3
# github.com/Microsoft/go-winio v0.6.1
## explicit; go 1.17
github.com/Microsoft/go-winio
@@ -91,9 +100,6 @@ github.com/evanphx/json-patch/v5/internal/json
# github.com/felixge/httpsnoop v1.0.4
## explicit; go 1.13
github.com/felixge/httpsnoop
-# github.com/flosch/pongo2/v6 v6.0.0
-## explicit; go 1.18
-github.com/flosch/pongo2/v6
# github.com/fsnotify/fsnotify v1.7.0
## explicit; go 1.17
github.com/fsnotify/fsnotify
@@ -251,6 +257,9 @@ github.com/grpc-ecosystem/go-grpc-prometheus
github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule
github.com/grpc-ecosystem/grpc-gateway/v2/runtime
github.com/grpc-ecosystem/grpc-gateway/v2/utilities
+# github.com/huandu/xstrings v1.3.3
+## explicit; go 1.12
+github.com/huandu/xstrings
# github.com/imdario/mergo v0.3.16
## explicit; go 1.13
github.com/imdario/mergo
@@ -280,6 +289,12 @@ github.com/mailru/easyjson/jwriter
# github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
## explicit
github.com/mitchellh/colorstring
+# github.com/mitchellh/copystructure v1.0.0
+## explicit
+github.com/mitchellh/copystructure
+# github.com/mitchellh/reflectwalk v1.0.0
+## explicit
+github.com/mitchellh/reflectwalk
# github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd
## explicit
github.com/modern-go/concurrent
@@ -336,15 +351,21 @@ github.com/prometheus/procfs/internal/util
# github.com/rivo/uniseg v0.4.7
## explicit; go 1.18
github.com/rivo/uniseg
-# github.com/schollz/progressbar/v3 v3.14.3
+# github.com/schollz/progressbar/v3 v3.14.5
## explicit; go 1.13
github.com/schollz/progressbar/v3
# github.com/sergi/go-diff v1.3.1
## explicit; go 1.12
github.com/sergi/go-diff/diffmatchpatch
+# github.com/shopspring/decimal v1.2.0
+## explicit; go 1.13
+github.com/shopspring/decimal
# github.com/skeema/knownhosts v1.2.1
## explicit; go 1.17
github.com/skeema/knownhosts
+# github.com/spf13/cast v1.3.1
+## explicit
+github.com/spf13/cast
# github.com/spf13/cobra v1.8.0
## explicit; go 1.15
github.com/spf13/cobra
@@ -456,6 +477,7 @@ go.uber.org/zap/zapgrpc
# golang.org/x/crypto v0.18.0
## explicit; go 1.18
golang.org/x/crypto/argon2
+golang.org/x/crypto/bcrypt
golang.org/x/crypto/blake2b
golang.org/x/crypto/blowfish
golang.org/x/crypto/cast5
@@ -465,6 +487,8 @@ golang.org/x/crypto/curve25519/internal/field
golang.org/x/crypto/hkdf
golang.org/x/crypto/internal/alias
golang.org/x/crypto/internal/poly1305
+golang.org/x/crypto/pbkdf2
+golang.org/x/crypto/scrypt
golang.org/x/crypto/sha3
golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/agent
@@ -499,7 +523,7 @@ golang.org/x/oauth2/internal
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
golang.org/x/sync/singleflight
-# golang.org/x/sys v0.21.0
+# golang.org/x/sys v0.22.0
## explicit; go 1.18
golang.org/x/sys/cpu
golang.org/x/sys/execabs
@@ -507,7 +531,7 @@ golang.org/x/sys/plan9
golang.org/x/sys/unix
golang.org/x/sys/windows
golang.org/x/sys/windows/registry
-# golang.org/x/term v0.21.0
+# golang.org/x/term v0.22.0
## explicit; go 1.18
golang.org/x/term
# golang.org/x/text v0.14.0