Skip to content

Update GKE development tooling to 1.12 #887

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jul 10, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion build/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,6 @@ See the table below for available customizations :
|---------------------------------------|-------------------------------------------------------------------------------|---------------|
| `GCP_CLUSTER_NAME` | The name of the cluster | `test-cluster` |
| `GCP_CLUSTER_ZONE` | The name of the Google Compute Engine zone in which the cluster will resides. | `us-west1-c` |
| `GCP_CLUSTER_LEGACYABAC` | Enables or disables the [ABAC](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.zones.clusters#LegacyAbac) authorization mechanism on a cluster. | `false` |
| `GCP_CLUSTER_NODEPOOL_INITIALNODECOUNT`| The number of nodes to create in this cluster. | `3` |
| `GCP_CLUSTER_NODEPOOL_MACHINETYPE` | The name of a Google Compute Engine machine type. | `n1-standard-4` |

Expand Down
2 changes: 1 addition & 1 deletion build/build-image/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ RUN go get -u golang.org/x/tools/cmd/goimports
RUN gcloud components update && gcloud components install app-engine-go

# overwrite kubectl as we want a specific version
ENV KUBECTL_VER 1.11.5
ENV KUBECTL_VER 1.12.10
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v${KUBECTL_VER}/bin/linux/amd64/kubectl && \
chmod go+rx ./kubectl && \
mv ./kubectl /usr/local/bin/kubectl
Expand Down
8 changes: 4 additions & 4 deletions build/gke-test-cluster/cluster-e2e.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,10 @@ resources:
cluster:
name: e2e-test-cluster
description: End to end tests cluster for Agones
initialClusterVersion: "1.11"
initialClusterVersion: "1.12"
nodePools:
- name: "default"
initialNodeCount: 4
initialNodeCount: 8

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

8*4 = 32 which i believe is larger than the default quota in a zone for new projects. Do we need to bump this for individuals to run e2e tests? I know the shared e2e cluster has 8 nodes, but in my very limited testing with my own e2e cluster 4 seemed sufficient.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So this is for the agones-images project, not really for individuals (although they are welcome to use it too). We run a higher parallel rate (32 from memory), and need the extra room.

In fact, given that e2e now takes 8 minutes, we could bump that up a bit too - hence the extra space.

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah, I didn't realize that this was only used for e2e. The e2e cluster already has 8 nodes, so this just makes that consistent.

config:
machineType: n1-standard-4
tags:
Expand All @@ -34,10 +34,10 @@ resources:
- https://www.googleapis.com/auth/service.management.readonly
- https://www.googleapis.com/auth/servicecontrol
- https://www.googleapis.com/auth/trace.append
- name: game-server-firewall
- name: gke-game-server-firewall
type: compute.beta.firewall
properties:
name: game-server
name: gke-game-server-firewall
description: "Firewall to allow game server udp traffic"
targetTags:
- "game-server"
Expand Down
7 changes: 1 addition & 6 deletions build/gke-test-cluster/cluster.yml.jinja
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ resources:
cluster:
name: {{ properties["cluster.name"] }}
description: Test cluster for Agones
initialClusterVersion: "1.11"
initialClusterVersion: "1.12"
nodePools:
- name: "default"
initialNodeCount: {{ properties["cluster.nodePool.initialNodeCount"] }}
Expand Down Expand Up @@ -70,11 +70,6 @@ resources:
- key: "stable.agones.dev/agones-metrics"
value: "true"
effect: "NO_EXECUTE"
masterAuth:
username: admin
password: supersecretpassword
legacyAbac:
enabled: {{ properties["cluster.legacyAbac"] }}
- name: game-server-firewall
type: compute.beta.firewall
properties:
Expand Down
9 changes: 4 additions & 5 deletions build/includes/google-cloud.mk
Original file line number Diff line number Diff line change
Expand Up @@ -24,13 +24,12 @@ gcloud-init: ensure-build-config
docker run --rm -it $(common_mounts) $(build_tag) gcloud init

# Creates and authenticates a small, 6 node GKE cluster to work against (2 nodes are used for agones-metrics and agones-system)
gcloud-test-cluster: GCP_CLUSTER_LEGACYABAC ?= false
gcloud-test-cluster: GCP_CLUSTER_NODEPOOL_INITIALNODECOUNT ?= 4
gcloud-test-cluster: GCP_CLUSTER_NODEPOOL_MACHINETYPE ?= n1-standard-4
gcloud-test-cluster: $(ensure-build-image)
docker run --rm -it $(common_mounts) $(DOCKER_RUN_ARGS) $(build_tag) gcloud \
deployment-manager deployments create $(GCP_CLUSTER_NAME) \
--properties cluster.zone:$(GCP_CLUSTER_ZONE),cluster.name:$(GCP_CLUSTER_NAME),cluster.nodePool.initialNodeCount:$(GCP_CLUSTER_NODEPOOL_INITIALNODECOUNT),cluster.nodePool.machineType:$(GCP_CLUSTER_NODEPOOL_MACHINETYPE),cluster.legacyAbac:$(GCP_CLUSTER_LEGACYABAC)\
--properties cluster.zone:$(GCP_CLUSTER_ZONE),cluster.name:$(GCP_CLUSTER_NAME),cluster.nodePool.initialNodeCount:$(GCP_CLUSTER_NODEPOOL_INITIALNODECOUNT),cluster.nodePool.machineType:$(GCP_CLUSTER_NODEPOOL_MACHINETYPE)\
--template=$(mount_path)/build/gke-test-cluster/cluster.yml.jinja
$(MAKE) gcloud-auth-cluster
$(MAKE) setup-test-cluster
Expand All @@ -45,7 +44,7 @@ gcloud-e2e-test-cluster: $(ensure-build-image)
docker run --rm -it $(common_mounts) $(DOCKER_RUN_ARGS) $(build_tag) gcloud \
deployment-manager deployments create e2e-test-cluster \
--config=$(mount_path)/build/gke-test-cluster/cluster-e2e.yml
GCP_CLUSTER_NAME=e2e-test-cluster GCP_CLUSTER_ZONE=us-west1-c $(MAKE) gcloud-auth-cluster
$(MAKE) gcloud-auth-cluster GCP_CLUSTER_NAME=e2e-test-cluster GCP_CLUSTER_ZONE=us-west1-c
docker run --rm $(common_mounts) $(DOCKER_RUN_ARGS) $(build_tag) \
kubectl apply -f $(mount_path)/build/helm.yaml
docker run --rm $(common_mounts) $(DOCKER_RUN_ARGS) $(build_tag) \
Expand All @@ -55,9 +54,9 @@ gcloud-e2e-test-cluster: $(ensure-build-image)

# Deletes the gcloud e2e cluster and cleanup any left pvc volumes
clean-gcloud-e2e-test-cluster: $(ensure-build-image)
docker run --rm $(common_mounts) $(DOCKER_RUN_ARGS) $(build_tag) \
-docker run --rm $(common_mounts) $(DOCKER_RUN_ARGS) $(build_tag) \
helm delete --purge consul && kubectl delete pvc -l component=consul-consul
GCP_CLUSTER_NAME=e2e-test-cluster $(MAKE) clean-gcloud-test-cluster
$(MAKE) clean-gcloud-test-cluster GCP_CLUSTER_NAME=e2e-test-cluster

# Pulls down authentication information for kubectl against a cluster, name can be specified through GCP_CLUSTER_NAME
# (defaults to 'test-cluster')
Expand Down