Skip to content

Commit

Permalink
Bootstrap crc in github action (#1202)
Browse files Browse the repository at this point in the history
Signed-off-by: PuneetPunamiya <ppunamiy@redhat.com>
  • Loading branch information
PuneetPunamiya authored Nov 5, 2024
1 parent 1e43c8c commit 227b311
Show file tree
Hide file tree
Showing 4 changed files with 303 additions and 0 deletions.
76 changes: 76 additions & 0 deletions .github/workflows/test-on-crc.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
name: Test Chains on CRC
on:
push:
branches:
- main

jobs:
build:
name: Run E2E Tests on CRC
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
go:
- '1.22'
env:
SHELL: /bin/bash
KUBECONFIG: '/Users/runner/.kube/config'

steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v4
with:
ref: ${{ github.sha }}
- uses: imjasonh/setup-ko@v0.7
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go }}
- name: Download and install CRC
run: |
wget "https://developers.redhat.com/content-gateway/file/pub/openshift-v4/clients/crc/2.40.0/crc-linux-amd64.tar.xz"
tar -xf crc-linux-amd64.tar.xz --directory /usr/local/bin --strip-components=1 crc-linux-2.40.0-amd64/crc
- name: Testing CRC
run: |
which crc
crc --help
- name: Install required virtualization software
run: |
sudo apt-get update
sudo apt install qemu-kvm libvirt-daemon libvirt-daemon-system
sudo usermod -a -G libvirt $USER
- name: Install yq
run: |
sudo wget https://github.com/mikefarah/yq/releases/download/v4.31.2/yq_linux_amd64 -O /usr/local/bin/yq
sudo chmod +x /usr/local/bin/yq
yq --version
- name: Set the crc config
env:
PULL_SECRET_CONTENT: ${{ secrets.CRC_TOKEN }}
run: |
crc config set preset microshift
echo "$PULL_SECRET_CONTENT" > pull-secret
crc config set pull-secret-file pull-secret
crc config set network-mode user
crc config set memory 14000
- name: Setup the crc
run: sudo -su $USER crc setup
- name: Start the crc
run: sudo -su $USER crc start
- name: Set Creds
run: |
sudo -su $USER crc oc-env
echo "KUBECONFIG=$HOME/.crc/machines/crc/kubeconfig" >> $GITHUB_ENV
- name: Install Tekton Pipelines and Chains
run: |
# To deploy on Openshift
oc adm policy add-scc-to-user anyuid -z tekton-pipelines-controller
oc adm policy add-scc-to-user anyuid -z tekton-pipelines-webhook
chmod +x test/crc_test.sh
./test/crc_test.sh
172 changes: 172 additions & 0 deletions test/crc_test.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,172 @@
#!/usr/bin/env bash

export namespace="${NAMESPACE:-tekton-chains}"

function start_registry() {
running="$(docker inspect -f '{{.State.Running}}' ${REG_NAME} 2>/dev/null || echo false)"

if [[ ${running} != "true" ]];then
docker rm -f kind-registry || true
docker run \
-d --restart=always -p "127.0.0.1:${REG_PORT}:5000" \
-e REGISTRY_HTTP_SECRET=secret \
--name "${REG_NAME}" \
registry:2
fi
}

function install_chains() {
echo ">> Deploying Tekton Chains"
export KO_DOCKER_REPO=ttl.sh
ko apply -f config/ || fail_test "Tekton Chains installation failed"
ko resolve -f config > release.yaml
yq 'del(.spec.template.spec.containers[].securityContext.runAsUser, .spec.template.spec.containers[].securityContext.runAsGroup)' release.yaml | oc apply -f -

# Wait for pods to be running in the namespaces we are deploying to
wait_until_pods_running ${namespace} || fail_test "Tekton Chains did not come up"
}


function spire_apply() {
if [ $# -lt 2 -o "$1" != "-spiffeID" ]; then
echo "spire_apply requires a spiffeID as the first arg" >&2
exit 1
fi
show=$(kubectl exec -n spire spire-server-0 -c spire-server -- \
/opt/spire/bin/spire-server entry show $1 $2)
if [ "$show" != "Found 0 entries" ]; then
# delete to recreate
entryid=$(echo "$show" | grep "^Entry ID" | cut -f2 -d:)
kubectl exec -n spire spire-server-0 -c spire-server -- \
/opt/spire/bin/spire-server entry delete -entryID $entryid
fi
kubectl exec -n spire spire-server-0 -c spire-server -- \
/opt/spire/bin/spire-server entry create "$@"
}

function install_spire() {
echo ">> Deploying Spire"
kubectl create ns spire --dry-run=client -o yaml | kubectl apply -f -
oc adm policy add-scc-to-user anyuid -z spire-agent -n spire
oc adm policy add-scc-to-user anyuid -z spire-server -n spire
kubectl -n spire apply -f "test/testdata/spire.yaml"
wait_until_pods_running spire || fail_test "Spire did not come up"
spire_apply \
-spiffeID spiffe://example.org/ns/spire/node/example \
-selector k8s_psat:cluster:example \
-selector k8s_psat:agent_ns:spire \
-selector k8s_psat:agent_sa:spire-agent \
-node
spire_apply \
-spiffeID spiffe://example.org/ns/${namespace}/sa/tekton-chains-controller \
-parentID spiffe://example.org/ns/spire/node/example \
-selector k8s:ns:${namespace} \
-selector k8s:sa:tekton-chains-controller
}

function vault_exec() {
envcmd=""
if [ -n "$ROOT_TOKEN" ]; then
envcmd="env VAULT_TOKEN=$ROOT_TOKEN"
fi
kubectl exec -i -n vault vault-0 -- $envcmd vault "$@"
}

function install_vault() {
echo ">> Deploying Vault"
kubectl create ns vault --dry-run=client -o yaml | kubectl apply -f -
oc adm policy add-scc-to-user anyuid -z vault -n vault
kubectl -n vault apply -f test/testdata/vault.yaml
wait_until_pods_running vault || fail_test "Vault did not come up"
ROOT_TOKEN=token12345
vault_exec secrets list 2>&1 | grep "^transit/" \
|| vault_exec secrets enable transit
vault_exec auth list 2>&1 | grep "^jwt/" \
|| vault_exec auth enable jwt
vault_exec read auth/jwt/config >/dev/null 2>&1 \
|| vault_exec write auth/jwt/config \
oidc_discovery_url=http://spire-oidc.spire:8082 \
default_role="spire"
vault_exec policy read spire-transit >/dev/null 2>&1 \
|| vault_exec policy write spire-transit - <<EOF
path "transit/*" {
capabilities = ["read"]
}
path "transit/sign/e2e" {
capabilities = ["create", "read", "update"]
}
path "transit/sign/e2e/*" {
capabilities = ["read", "update"]
}
path "transit/verify/e2e" {
capabilities = ["create", "read", "update"]
}
path "transit/verify/e2e/*" {
capabilities = ["read", "update"]
}
EOF
vault_exec read auth/jwt/role/spire-chains-controller >/dev/null 2>&1 \
|| vault_exec write auth/jwt/role/spire-chains-controller \
role_type=jwt \
user_claim=sub \
bound_audiences=e2e \
bound_subject=spiffe://example.org/ns/${namespace}/sa/tekton-chains-controller \
token_ttl=15m \
token_policies=spire-transit
vault_exec read transit/keys/e2e >/dev/null 2>&1 \
|| vault_exec write transit/keys/e2e type=ecdsa-p521
vault_exec read -format=json transit/keys/e2e \
| jq -r .data.keys.\"1\".public_key >"test/testdata/vault.pub"
}

function chains_patch_spire() {
kubectl patch -n ${namespace} deployment tekton-chains-controller \
--patch-file "test/testdata/chains-patch-spire.json"
# Wait for pods to be running in the namespaces we are deploying to
wait_until_pods_running ${namespace} || fail_test "Tekton Chains did not come up after patching"
}

function wait_until_pods_running() {
echo -n "Waiting until all pods in namespace $1 are up"
for i in {1..150}; do # timeout after 5 minutes
local pods="$(kubectl get pods --no-headers -n $1 2>/dev/null)"
# All pods must be running
local not_running=$(echo "${pods}" | grep -v Running | grep -v Completed | wc -l)
if [[ -n "${pods}" && ${not_running} -eq 0 ]]; then
local all_ready=1
while read pod ; do
local status=(`echo -n ${pod} | cut -f2 -d' ' | tr '/' ' '`)
# All containers must be ready
[[ -z ${status[0]} ]] && all_ready=0 && break
[[ -z ${status[1]} ]] && all_ready=0 && break
[[ ${status[0]} -lt 1 ]] && all_ready=0 && break
[[ ${status[1]} -lt 1 ]] && all_ready=0 && break
[[ ${status[0]} -ne ${status[1]} ]] && all_ready=0 && break
done <<< $(echo "${pods}" | grep -v Completed)
if (( all_ready )); then
echo -e "\nAll pods are up:\n${pods}"
return 0
fi
fi
echo -n "."
sleep 2
done
echo -e "\n\nERROR: timeout waiting for pods to come up\n${pods}"
return 1
}

curl https://storage.googleapis.com/tekton-releases/pipeline/latest/release.notags.yaml | yq 'del(.spec.template.spec.containers[].securityContext.runAsUser, .spec.template.spec.containers[].securityContext.runAsGroup)' | oc apply -f -

start_registry

install_chains

install_spire

install_vault

chains_patch_spire

export GCE_METADATA_HOST=localhost
export OPENSHIFT=localhost
go test -v -count=1 -tags=e2e -timeout=35m ./test/... --kubeconfig $HOME/.crc/machines/crc/kubeconfig
41 changes: 41 additions & 0 deletions test/e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,8 @@ import (

var namespace string

const localhost string = "localhost"

func init() {
namespace = os.Getenv("namespace")
if namespace == "" {
Expand Down Expand Up @@ -229,6 +231,13 @@ func TestOCISigning(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
ctx := logtesting.TestContextWithLogger(t)
c, ns, cleanup := setup(ctx, t, test.opts)
OPENSHIFT := os.Getenv("OPENSHIFT")
if OPENSHIFT == localhost {
if err := assignSCC(ns); err != nil {
t.Fatalf("error creating scc: %s", err)
}
}

t.Cleanup(cleanup)

// Setup the right config.
Expand Down Expand Up @@ -434,6 +443,13 @@ func TestOCIStorage(t *testing.T) {
// create necessary resources
imageName := "chains-test-oci-storage"
image := fmt.Sprintf("%s/%s", c.internalRegistry, imageName)

OPENSHIFT := os.Getenv("OPENSHIFT")
if OPENSHIFT == localhost {
if err := assignSCC(ns); err != nil {
t.Fatalf("error creating scc: %s", err)
}
}
task := kanikoTask(t, ns, image)
if _, err := c.PipelineClient.TektonV1().Tasks(ns).Create(ctx, task, metav1.CreateOptions{}); err != nil {
t.Fatalf("error creating task: %s", err)
Expand Down Expand Up @@ -505,6 +521,13 @@ func TestMultiBackendStorage(t *testing.T) {
registry: true,
kanikoTaskImage: image,
})

OPENSHIFT := os.Getenv("OPENSHIFT")
if OPENSHIFT == localhost {
if err := assignSCC(ns); err != nil {
t.Fatalf("error creating scc: %s", err)
}
}
t.Cleanup(cleanup)

resetConfig := setConfigMap(ctx, t, c, test.cm)
Expand Down Expand Up @@ -579,6 +602,14 @@ func TestRetryFailed(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
ctx := logtesting.TestContextWithLogger(t)
c, ns, cleanup := setup(ctx, t, test.opts)

OPENSHIFT := os.Getenv("OPENSHIFT")
if OPENSHIFT == localhost {
if err := assignSCC(ns); err != nil {
t.Fatalf("error creating scc: %s", err)
}
}

t.Cleanup(cleanup)

resetConfig := setConfigMap(ctx, t, c, test.cm)
Expand Down Expand Up @@ -859,8 +890,18 @@ func TestProvenanceMaterials(t *testing.T) {
}

func TestVaultKMSSpire(t *testing.T) {
OPENSHIFT := os.Getenv("OPENSHIFT")
if OPENSHIFT == localhost {
t.Skip("Skipping, vault kms spire integration tests .")
}
ctx := logtesting.TestContextWithLogger(t)
c, ns, cleanup := setup(ctx, t, setupOpts{})
if OPENSHIFT == localhost {
if err := assignSCC(ns); err != nil {
t.Fatalf("error creating scc: %s", err)
}
}

t.Cleanup(cleanup)

resetConfig := setConfigMap(ctx, t, c, map[string]string{
Expand Down
14 changes: 14 additions & 0 deletions test/kaniko.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package test

import (
"fmt"
"os/exec"
"testing"

"github.com/google/go-containerregistry/pkg/name"
Expand Down Expand Up @@ -131,6 +132,19 @@ func kanikoTask(t *testing.T, namespace, destinationImage string) *v1.Task {
}
}

func assignSCC(namespace string) error {
// Construct the `oc` command with the necessary arguments
cmd := exec.Command("oc", "adm", "policy", "add-scc-to-user", "anyuid", "-z", "default", "-n", namespace)

// Execute the command and capture the output
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("failed to assign SCC: %w, output: %s", err, output)
}

return nil
}

func verifyKanikoTaskRun(namespace, destinationImage, publicKey string) objects.TektonObject {
script := `#!/busybox/sh
Expand Down

0 comments on commit 227b311

Please sign in to comment.