Type gcloud [command] --help
or gcloud help [command]
to get help on any gcloud
command.
More info and reference.
To install on Linux: curl https://sdk.cloud.google.com | bash
.
gcloud auth login --no-launch-browser
- authenticate/initialize gcloud environment (configuration) without starting browser (link displayed in console)gcloud auth activate-service-account --key-file credentials.json
- authenticate gcloud as service account using service account credentials filegcloud config configurations activate default
- activate different configurationgcloud components list
- list gcloud components (not)installedgcloud components install beta
- install some component (beta)gcloud components update beta
- update some component (beta)gcloud auth list
- list the active account namegcloud auth configure-docker
- register gcloud as a Docker credential helpergcloud auth activate-service-account --key-file credentials.json
- activate service account using json filegcloud config list project
- list the project IDgcloud config list [--all]
- view configuration in the environmentgcloud config set project myProjectId
- change the project (set project id)gcloud config set compute/zone
- set default zonegcloud config set compute/region
- set default regiongcloud config get-value compute/region
- get default regiongcloud config get-value compute/zone
- get default regiongcloud config get-value project
- get project idgcloud components list
- list componentsgcloud compute zones list
- list Google Cloud zonesgcloud compute instances list
- list compute instancesgcloud compute instances list --filter="name=('somename')"
- list compute instances with filteringgcloud compute instances move
- move instance between zones (within a region)gcloud beta compute reset-windows-password machinename
- reset windows password of the machinegcloud compute firewall-rules list
- list firewall rulesgcloud compute firewall-rules list --filter="network='default' AND ALLOW:'icmp'"
- list firewall rules with filteringgcloud compute firewall-rules list --filter=ALLOW:'80'
- list firewall rules with filteringgcloud compute firewall-rules create default-allow-http --direction=INGRESS --priority=1000 --network=default --action=ALLOW --rules=tcp:80 --source-ranges=0.0.0.0/0 --target-tags=http-server
- update the firewall rule to allow http for instances with tagsgcloud compute project-info describe --project $(gcloud config get-value project)
- get info about projectgcloud compute instances create somevmname --machine-type e2-medium --zone
- create a new virtual machine instancegcloud compute instances create somevmname --zone=us-east1-c --machine-type=e2-micro --subnet=somesubnet
- create a new virtual machine instance on specific subnetgcloud compute instances create somevmname --service-account someServiceAccountEmail --scopes "https://www.googleapis.com/auth/compute"
- create new virtual machine with service account and scopegcloud compute instances set-machine-type somevmname --machine-type custom-4-3840
- change machine type of the instancegcloud compute instances add-tags sometag --tags http-server,https-server
- add tag to virtual machine instancegcloud compute instances list --filter=name:gcelab2 --format='value(EXTERNAL_IP)'
- list instances with filtering and formattinggcloud compute instances list --sort-by=ZONE
- list instances with sorting by zonegcloud compute instances delete someInstance
- delete an instancegcloud compute instance-templates create templateName --source-instance=instanceName
- create a template from stopped instancegcloud compute instance-templates list
- list instance templatesgcloud compute ssh somevmname --zone us-central1-c
- start ssh connection to a virtual machinegcloud compute ssh vm-internal --zone us-central1-c --tunnel-through-iap
- start ssh connection to a virtual machine without public IP (gcloud will automatically fallback to this when no external IP even when not specified)gcloud compute scp index.html first-vm:index.nginx-debian.html --zone=us-central1-c
- copy file to linux vm instancegcloud compute instances get-serial-port-output instance-1
- check status of instance startupgcloud compute reset-windows-password [instance] --zone us-east1-b --user [username]
- reset and fetch new Windows passwordgcloud logging logs list
- view available logsgcloud logging logs list --filter="compute"
- view available logs with filteringgcloud logging read "resource.type=gce_instance" --limit 5
- read the logs related to the resource type of gce_instancegcloud logging read "resource.type=gce_instance AND labels.instance_name='gcelab2'" --limit 5
- read the logs specific to a virtual machinegcloud container clusters create hello-world --num-nodes 2 --machine-type n1-standard-1 --zone us-central1-a [--scopes "https://www.googleapis.com/auth/source.read_write,cloud-platform"]
- create GKE cluster with 2 nodes/machines (extra scope for access to Cloud Source Repositories and Google Container Registry)gcloud container clusters get-credentials lab-cluster
- get authentication credentials for GKE clustergcloud container clusters delete lab-cluster
- delete GKE clustergcloud container clusters get-credentials lab-cluster
- re-authenticate gcloud shell with the GKE clustergcloud container node-pools create soneNodePool --cluster=hello-demo-cluster --machine-type=e2-standard-2 --num-nodes=1 --zone=us-central1-a
- create new node pool for a clustergcloud container node-pools delete soneNodePool --cluster hello-demo-cluster --zone us-central1-a
- delete a nodepoolgcloud beta container clusters update scaling-demo --enable-autoscaling --min-nodes 1 --max-nodes 5
- enable cluster autoscalinggcloud beta container clusters update scaling-demo --autoscaling-profile optimize-utilization
- set autoscaling profilegcloud container clusters update scaling-demo --enable-autoprovisioning --min-cpu 1 --min-memory 2 --max-cpu 45 --max-memory 160
- enable cluster node autoprovisioninggcloud compute addresses create network-lb-ip-1 --region us-central1
- create static ip addressgcloud compute http-health-checks create basic-check
- create legacy HTTP health check resourcegcloud compute target-pools create www-pool --region us-central1 --http-health-check basic-check
- create target pool with check resourcegcloud compute target-pools add-instances www-pool --instances www1,www2,www3
- add instances to the poolgcloud compute forwarding-rules create www-rule --region us-central1 --ports 80 --address network-lb-ip-1 --target-pool www-pool
- forward trafic from ip to the poolgcloud compute forwarding-rules describe www-rule --region us-central1
- see info about forwarding rule (including external ip)IPADDRESS=$(gcloud compute forwarding-rules describe www-rule --region us-central1 --format="json" | jq -r .IPAddress)
- store ip adress into variablegcloud compute instance-groups managed create lb-backend-group --template=lb-backend-template --size=2 --zone=us-central1-f --base-instance-name some-name
- create managed instance group based on templategcloud compute instance-groups managed set-named-ports web-server-group --region=us-east1 --named-ports http:80
- set named ports on managed groupgcloud compute instance-groups managed rolling-action replace web-server-group --max-unavailable 50%
- rolling restart of all instances in a group (at least 50% members will be alive while others restart)gcloud compute instance-groups managed rolling-action start-update web-server-group --version template=new-instance-template
- update/restart instance group to use new templategcloud compute instance-groups managed set-autoscaling lb-backend-group --max-num-replicas 2 --target-load-balancing-utilization 0.60
- set autoscaling policy to create more instances if above 60% utilizationgcloud compute instance-groups list-instances lb-backend-group
- list instances in a groupgcloud compute addresses create lb-ipv4-1 --ip-version=IPV4 --global
- create global static external IP addressgcloud compute addresses describe lb-ipv4-1 --format="get(address)" --global
- describe (and get IP) from global static external IP addressgcloud compute health-checks create http http-basic-check --port 80
- create a health check for the load balancergcloud compute backend-services create web-backend-service --protocol=HTTP --port-name=http --health-checks=http-basic-check --global
- create backend service- `gcloud compute backend-services create web-backend-service --http-health-checks web-backend-frontend-check --port-name frontend --global`` - create backend service
gcloud compute backend-services add-backend web-backend-service --instance-group=lb-backend-group --instance-group-zone=us-central1-f --global
- add instance group as the backend to the backend servicegcloud compute backend-services get-health web-backend-service --global
- get health state of the backend servicegcloud compute backend-services update web-backend-service --enable-cdn --global
- enable usage of caching CDN for a backend servicegcloud compute backend-services list
- list backend servicesgcloud compute backend-services get-health web-backend-service --global
- get health status of backend servicegcloud compute url-maps create web-map-http --default-service web-backend-service
- create a URL map to route the incoming requests to the default backend servicegcloud compute url-maps add-path-matcher web-map-http --default-service web-backend-service --path-matcher-name mathcerName --path-rules "/api1=other-backend-service,/api2=other-backend-service2"
- modify url map with a mathcer rule to go to some other servicegcloud compute target-http-proxies create http-lb-proxy --url-map web-map-http
- create a target HTTP proxy to route requests to your URL mapgcloud compute forwarding-rules create http-content-rule --address=lb-ipv4-1 --global --target-http-proxy=http-lb-proxy --ports=80
- create global forwarding rule to route incoming requests to the proxygcloud functions deploy somefunctionname --stage-bucket [bucket_name] --trigger-topic some_pub_sub_topis --runtime nodejs8
- create new cloud function (--trigger-bucket or --trigger-http are also used)gcloud functions describe somefunctionname
- get info about cloud functiongcloud functions call somefunctionname --data '{"data":"ABCD"}'
- invoke some cloud functiongcloud functions logs read somefunctionname
- examine cloud function logsgcloud pubsub topics create sometopic
- create a pub/sub topicgcloud pubsub topics delete sometopic
- delete a pub/sub topicgcloud pubsub topics list
- list all pub/sub topicsgcloud pubsub subscriptions create --topic sometopic somesubscription
- create a pub/sub subscriptiongcloud pubsub topics list-subscriptions sometopic
- list subscriptions on a pub/sub topicgcloud pubsub topics publish sometopic --message "Hello"
- publish a message to a pub/sub topicgcloud pubsub subscriptions pull --auto-ack somesubscription
- pull ONE message from pub/sub subscriptiongcloud pubsub subscriptions pull somesubscription --auto-ack --limit=3
- pull 3 messages from pub/sub subscriptiongcloud compute networks create networkname --subnet-mode=custom --mtu=1460 --bgp-routing-mode=regional
- create a VPC networkgcloud compute networks subnets create subnetworkname --range=10.130.0.0/20 --stack-type=IPV4_ONLY --network=networkname --region=us-central1
- create a VPC subnetworkgcloud compute networks list
- view VPC networksgcloud compute networks subnets list --sort-by=NETWORK
- view VPC subnetworksgcloud compute netwokr subnets update subnet-name --enable-private-ip-google-access
- enable private ip acess to Google servicesgcloud compute firewall-rules create networkname --direction=INGRESS --priority=1000 --network=managementnet --action=ALLOW --rules=tcp:22,tcp:3389,icmp --source-ranges=0.0.0.0/0
- create a firewall rulegcloud compute firewall-rules list --sort-by=NETWORK
- list firewall rulesgcloud app create --region=us-central
- create App Enginegcloud app deploy app.yaml [--quiet] [--version=two] [--no-promote]
- deploy the application to App Engine (--no-promote parameter tells App Engine to continue serving requests with the old version)gcloud app browse
- open current App Engine application in a browser (or display url if browser is not detected)gcloud app logs tail -s default
- tail logs from App Engine applicationgcloud sql connect myproject-demo --user=root
- connect to Cloud SQL Instancegcloud iam service-accounts keys list --iam-account user@email.com
- list keys associated with service accountgcloud iam roles list
- list available rolesgcloud iam roles describe roles/compute.instanceAdmin
- describe (list permissions included) some rolegcloud iam roles create devops --project $PROJECTID2 --permissions "compute.instances.start,compute.instances.stop"
- create custom role with some permissionsgcloud iam service-accounts create serviceAccountName --display-name serviceAccountDisplayName
- create service accountgcloud iam service-accounts list --format="value(email)" --filter "displayName=serviceAccountDisplayName"
- list service accounts with filtering and formatinggcloud projects add-iam-policy-binding someProjectId --member user:someUserId --role=roles/viewer
- add viewer role for some user on some projectgcloud projects add-iam-policy-binding someProjectId --member serviceAccount:someServiceAccountEmail --role=roles/viewer
- add viewer role for some service account on some projectgcloud services enable compute.googleapis.com
- enable the Compute Engine APIgcloud services enable cloudprofiler.googleapis.com
- enable the Cloud Profilergcloud services enable artifactregistry.googleapis.com cloudbuild.googleapis.com run.googleapis.com
- enable Cloud Build, Artifact Registry, and Cloud Run APIsgcloud services enable container.googleapis.com
- enable the Container Registry APIgcloud compute vpn-gateways create demo-vpn-gw --network some-network --region us-central1
- create HA VPN Gatewaygcloud compute vpn-gateways describe demo-vpn-gw --region us-central1
- describe a gatewaygcloud compute routers create demo-router --region us-central1 --network some-network --asn 65001
- create Cloud Routergcloud compute routers describe demo-router --region us-central1
- describe a routergcloud compute vpn-tunnels list
- list all tunnels (and peers)gcloud compute vpn-tunnels describe demo-tunnel --region us-central1
- describe a tunnelgcloud builds submit --tag gcr.io/${GOOGLE_CLOUD_PROJECT}/an-image:v0.1 .
- submit a new build job to Cloud Build (with source from current directory the last .)gcloud builds submit --config cloudbuild.yaml .
- submit a new build job configured in a file to Cloud Buildgcloud builds submit --tag us-central1-docker.pkg.dev/${GOOGLE_CLOUD_PROJECT}/demo/demo:1.0.0
- submit a build job and store to Docker Artifact Registrygcloud builds list
- list all build jobs performedgcloud container images delete gcr.io/${GOOGLE_CLOUD_PROJECT}/an-image:1.0.0 --quiet
- Delete the container image for some versiongcloud builds list | grep 'SOURCE' | cut -d ' ' -f2 | while read line; do gsutil rm $line; done
- delete all source tarbals stored for gcloud buildsgcloud container clusters update multi-tenant-cluster --zone us-central1-a --resource-usage-bigquery-dataset cluster_dataset
- enable GKE usage metering on the clustergcloud source repos clone devops-repo
- clone Google Cloud repositorygcloud source repos create default
- create Google Cloud repositorygcloud run deploy demo --image us-central1-docker.pkg.dev/${GOOGLE_CLOUD_PROJECT}/demo/demo:1.0.0 --region us-central1 [ --concurrency 100]
- deploy a container to Cloud Rungcloud run services list
- list services deployed in Cloud Rungcloud run services describe demo --platform managed --region us-central1
- describe service deployed in Cloud Run
Create VPN tunnel:
gcloud compute vpn-tunnels create demo-tunnel \
--peer-gcp-gateway peer-gw \
--region us-central1 \
--ike-version 2 \
--shared-secret [SHARED_SECRET] \
--router demo-router \
--vpn-gateway demo-vpn-gw \
--interface 0
Create router interface:
gcloud compute routers add-interface demo-router \
--interface-name if-tunnel0 \
--ip-address 169.254.0.1 \
--mask-length 30 \
--vpn-tunnel demo-tunnel \
--region us-central1
Create BGP peering:
gcloud compute routers add-bgp-peer demo-router \
--peer-name bgp-on-prem-tunnel0 \
--interface if-tunnel0 \
--peer-ip-address 169.254.0.2 \
--peer-asn 65002 \
--region us-central1
Create health check rule (130.211.0.0/22,35.191.0.0/16 are Google Cloud health checking systems)
gcloud compute firewall-rules create fw-allow-health-check \
--network=default \
--action=allow \
--direction=ingress \
--source-ranges=130.211.0.0/22,35.191.0.0/16 \
--target-tags=allow-health-check \
--rules=tcp:80
gcloud compute firewall-rules create allow-health-check \
--allow tcp:8080-8081 \
--source-ranges 130.211.0.0/22,35.191.0.0/16 \
--network default
Create instance with lots of params including startup script
gcloud compute instances create www1 \
--zone=us-central1-f \
--tags=network-lb-tag \
--machine-type=e2-medium \
--image-family=debian-11 \
--image-project=debian-cloud \
--metadata=startup-script='#!/bin/bash
apt-get update
apt-get install apache2 -y
service apache2 restart
echo "
<h3>Web Server: www1</h3>" | tee /var/www/html/index.html'
Create load balancer instance template with lots of params including how to fetch hostname
gcloud compute instance-templates create lb-backend-template \
--region=us-central1 \
--network=default \
--subnet=default \
--tags=allow-health-check \
--machine-type=e2-medium \
--image-family=debian-11 \
--image-project=debian-cloud \
--metadata=startup-script='#!/bin/bash
apt-get update
apt-get install apache2 -y
a2ensite default-ssl
a2enmod ssl
vm_hostname="$(curl -H "Metadata-Flavor:Google" \
http://169.254.169.254/computeMetadata/v1/instance/name)"
echo "Page served from: $vm_hostname" | \
tee /var/www/html/index.html
systemctl restart apache2'
Same as above but with startup script read from file
gcloud compute instance-templates create lb-backend-template \
--region=us-east1 \
--network=default \
--subnet=default \
--tags=allow-health-check \
--machine-type=f1-micro \
--image-family=debian-11 \
--image-project=debian-cloud \
--metadata-from-file startup-script=startup.sh
Same as above but with startup script read from url
gcloud compute instances create backend \
--machine-type=n1-standard-1 \
--tags=backend \
--metadata=startup-script-url=https://storage.googleapis.com/somefile.sh
Create HTTP health check
gcloud compute health-checks create http health-check-name \
--port 8080 \
--request-path=/api/orders \
--check-interval 30s \
--healthy-threshold 1 \
--timeout 10s \
--unhealthy-threshold 3
Create HTTP health check
gcloud compute http-health-checks create fancy-fe-frontend-hc \
--request-path / \
--port 8080
Update managed instance group with a health check
gcloud compute instance-groups managed update fancy-be-mig \
--health-check fancy-be-hc \
--initial-delay 300
More info and reference.
gsutil cp somefile.jpg gs://[bucket_name]
- copy object to a bucket (upload)gsutil cp -r gs://[bucket_name]/[object_name_path] .
- copy object from bucket (download)gsutil cp -v somefile.jpg gs://[bucket_name]
- copy with versioning object to a bucket (upload)gsutil cp gs://[bucket_name]/[object_name_path]#version_id recovered.jpg
- copy version of an object from bucket (download)- `gsutil -m cp -r gs://spls/gsp053/orchestrate-with-kubernetes .
gsutil cp gs://[bucket_name]/[object_name_path] gs://[bucket_name]/[dest_folder_path]/
- copy objets from bucket to bucketgsutil ls gs://[bucket_name]
- list objectss in a bucketgsutil ls -l gs://[bucket_name]/[object_name_path]
- list details for a object in a bucketgsutil ls -a gs://[bucket_name]/[object_name_path]
- list all versions of an objectgsutil acl set private gs://[bucket_name]/[object_name_path]
- make object private (only creator/owner has access)gsutil acl ch -u AllUsers:R gs://[bucket_name]/[object_name_path]
- make a object accessible to everyonegsutil acl ch -d AllUsers gs://[bucket_name]/[object_name_path]
- remove permissions on a objectgsutil rm gs://[bucket_name]/[object_name_path]
- remove object from bucketgsutil mb -p [PROJECT_ID] gs://[bucket_name]
- create new storage bucketgsutil signurl -d 10m path/to/privatekey.p12 gs://[bucket_name]/[object_name_path]
- create a signed URL for a object in a bucketgsutil acl get gs://[bucket_name]/[object_name_path]
- show access control listgsutil config -n
- create .boto file (has encryption/decryption keys and lots of other configuration)gsutil rewrite -k gs://[bucket_name]/[object_name_path]
- rewirte with new encryption_key (using decryption_key1 from .boto)gsutil lifecycle get gs://[bucket_name]
- get lifecycle management rulesgsutil lifecycle set life.json gs://[bucket_name]
- set lifecycle management rules from a json filegsutil versioning get gs://[bucket_name]
- get versioning rulesgsutil versioning set gs://[bucket_name]
- enable versioninggsutil rsync -r ./somefolder gs://[bucket_name]/somefolder
- syncronize folder with bucket
More info and reference.
Run BigQuery query:
bq query --location=us --use_legacy_sql=false --use_cache=false \
'select month, avg(mean_temp) as avgtemp from `qwiklabs-resources.qlweather_geo.gsod`
where station_number = 947680
and year = 2010
group by month
order by month'
More info and reference.
kubectl create deployment hello-server --image=gcr.io/google-samples/hello-app:1.0
- create application in GKE clusterkubectl create -f file.yaml
- create application based on yaml filekubectl expose deployment hello-server --type=LoadBalancer --port 8080
- expose GKE cluster application on portkubectl scale deployment hello-node --replicas=4
- scale deployment to 4 replicaskubectl autoscale deployment php-apache --cpu-percent=50 --min=1 --max=10
- automatically scale deployment between 1 and 10 replicas based on cpukubectl get service
- show KGE cluster services (internal and external IPs, exposed ports, etc....)kubectl get deployments
- show deploymentskubectl get replicasets
- show replica sets (or kubectl get rs)kubectl get pods [--show-labels] [-A] [--namespace=someNamespace]
- show pods (-A = all namespaces)kubectl cluster-info
- show info about clusterkubectl config view
- show cluster configkubectl get events
- show cluster eventskubectl logs -f <pod-name>
- show pod logs (-f = follow, stream in realtime)kubectl edit deployment hello-node
- interactivly edit deployment yml file and applykubectl get hpa
- see status of Horizontal Pod Autoscalerkubectl get vpa
- see status of Vertical Pod Autoscalerkubectl set image deployment/hello nginx=nginx:1.9.1
- directly set image of the deloyment to new imagekubectl set resources deployment hello-server --requests=cpu=450m
- directoly set cpu resource request to a deploymentkubectl rollout pause deployment/hello
- pause rollout (which happens after deployment is edited)kubectl rollout resume deployment/hello
- resume rollout (which happens after deployment is edited)kubectl rollout history deployment/hello
- show rollout historykubectl rollout history deployment/nginx-deployment --revision=5
- show info about specific item revision in historykubectl rollout undo deployment/hello
- undo last rolloutkubectl rollout status deployment/hello
- monitor rollout- `kubectl explain deployment --recursive
- `kubectl explain deployment.metadata.name
- `kubectl create secret generic tls-certs --from-file tls/
kubectl create configmap nginx-frontend-conf --from-file=nginx/frontend.conf
- create configmap- `kubectl get pods -o jsonpath --template='{range .items[*]}{.metadata.name}{"\t"}{"\t"}{.spec.containers[0].image}{"\n"}{end}'
- `kubectl delete pods healthy-monolith monolith secure-monolith
- `kubectl delete pod monolith
- `kubectl delete services monolith auth frontend hello
- `kubectl delete deployments auth frontend hello hello-canary hello-green
- `kubectl delete service monolith
- `kubectl delete deployment auth
- `kubectl delete secrets tls-certs
- `kubectl delete configmaps nginx-frontend-conf nginx-proxy-conf
kubectl describe pods podname
- list pod description, ip addresses, event log, ....kubectl port-forward <pod-name> 10080:80
- set up port forwarding from local machine 10080 to pod 80kubectl exec <pod-name> --stdin --tty -c <pod-name> -- /bin/sh
- start interactive shell on podkubectl create secret generic tls-certs --from-file tls/
- create generic secrets form a files in folder (tls/)kubectl create configmap nginx-proxy-conf --from-file nginx/proxy.conf
- create config map from a filekubectl label pods pod-name 'secure=enabled'
- add label to a podkubectl get persistentvolumeclaim
- show current persisten volume claimskubectl completion shellName
- output shell completion code for the specified shell (bash, zsh, fish, or powershell), the shell code must be evaluated to provide interactive completion of kubectl commands e.g. by sourcing it from the .bash_profile (or source <(kubectl completion bash))kubectl api-resources --namespaced=true
- complete list of namespaced resourceskubectl get services --namespace=kube-system
- get services belonging to a namespacekubectl run app-server --image=centos --namespace=team-a -- sleep infinity
- run something inside a namespacekubectl config set-context --current --namespace=team-a
- any subsequent commands will be run against the indicated namespace without specifying the --namespace flagkubectl create role pod-reader --resource=pods --verb=watch --verb=get --verb=list
- create a RBAC rulekubectl create rolebinding team-a-developers --role=developer --user=team-a-dev@${GOOGLE_CLOUD_PROJECT}.iam.gserviceaccount.com
- create a "binding" between RBAC role and IAM account- `kubectl create namespace team-a
kubectl create quota test-quota --hard=count/pods=2,count/services.loadbalancers=1 --namespace=team-a
- create a resource quotakubectl describe quota test-quota --namespace=team-a
- list details about a quota (used and limits)kubectl edit quota test-quota --namespace=team-a
- interactively edit quotakubectl cordon nodeName
- cordon node (mark as unschedulable)kubectl uncordon nodeName
- cordon node (mark as schedulable again)kubectl drain --force --ignore-daemonsets --delete-local-data --grace-period=10 nodeName
- drain node (evict workloads from it)kubectl create poddisruptionbudget kube-dns-pdb --namespace=kube-system --selector k8s-app=kube-dns --max-unavailable 1
- create pod disruption budgetkubectl create poddisruptionbudget gb-pdb --selector run=gb-frontend --min-available 4
- create pod disruption budgetkubectl get nodes -l cloud.google.com/gke-nodepool=somePoolName -o=name
- get nodes (belonging to a node pool somePoolName)kubectl get ingress frontend-ingress
- show ingress statuskubectl cluster-info
- show info about cluster (software versions, etc...)kubectl get all
- show all (pods, deployments, etc....)
for node in $(kubectl get nodes -l cloud.google.com/gke-nodepool=default-pool -o=name); do
kubectl drain --force --ignore-daemonsets --grace-period=10 "$node";
done
More info and reference.
docker build -t gcr.io/blabla/node-app:v1 .
- build imagedocker run -d -p 8080:8080 gcr.io/blabla/node-app:v1 --name my-app
- run container from an image (-d = detach, run in background, -p expose ports, --name set container instance name)docker ps
- list running containerdocker stop [CONTAINER ID]
- stop containerdocker tag node-app:0.2 gcr.io/blabla/node-app:0.2
- tag container imagedocker push gcr.io/blabla/node-app:v1
- push image to registrydocker pull gcr.io/blabla/node-app:0.2
- pull image from registrydocker images
- list images downloaded localydocker ps [-a] [-q]
- list running container (-a includes one that have finished, -q only container ids)docker stop my-app
- stop running containerdocker rm my-app
- remove running containerdocker logs [-f]
- see logs of the container (console logs, -f = follow/tail logs)docker exec -it [container_id] bash
- interactive bash shell on container instancedocker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' [container_id]
- see container instance metadata with formatting
terraform {
required_providers {
google = {
source = "hashicorp/google"
}
}
}
provider "google" {
version = "3.5.0"
project = "unique_project_id"
region = "us-central1"
zone = "us-central1-c"
}
resource "google_compute_network" "vpc_network" {
name = "terraform-network"
}
resource "google_compute_instance" "vm_instance" {
name = "terraform-instance"
machine_type = "f1-micro"
boot_disk {
initialize_params {
image = "debian-cloud/debian-11"
}
}
network_interface {
network = google_compute_network.vpc_network.name
access_config {
}
}
}
resource "google_storage_bucket" "example_bucket" {
name = "UNIQUE-BUCKET-NAME"
location = "US"
website {
main_page_suffix = "index.html"
not_found_page = "404.html"
}
}
resource "google_compute_instance" "another_instance" {
depends_on = [google_storage_bucket.example_bucket]
name = "terraform-instance-2"
machine_type = "f1-micro"
boot_disk {
initialize_params {
image = "cos-cloud/cos-stable"
}
}
network_interface {
network = google_compute_network.vpc_network.self_link
access_config {
}
}
}
variables.tf
variable "project_id" {
type = string
default = "some_id"
description = "The ID of the project where this all will be created"
}
variable "region" {
type = string
default = "us-east1"
description = "The region where this all will be created"
}
variable "zone" {
type = string
default = "us-east1-c"
description = "The zone where this all will be created"
}
main.tf
terraform {
required_providers {
google = {
source = "hashicorp/google"
}
}
}
provider "google" {
version = "~> 3.45.0"
project = var.project_id
region = var.region
zone = var.zone
}
module "instances" {
source = "./modules/instances"
project_id = var.project_id
region = var.region
zone = var.zone
}
module "storage" {
source = "./modules/storage"
}
terraform {
backend "gcs" {
bucket = "tf-bucket-100858"
prefix = "terraform/state"
}
}
module "vpc" {
source = "terraform-google-modules/network/google"
version = "=3.4.0"
project_id = var.project_id
network_name = "tf-vpc-707793"
routing_mode = "GLOBAL"
subnets = [
{
subnet_name = "subnet-01"
subnet_ip = "10.10.10.0/24"
subnet_region = var.region
},
{
subnet_name = "subnet-02"
subnet_ip = "10.10.20.0/24"
subnet_region = var.region
}
]
}
resource "google_compute_firewall" "tf-firewall" {
name = "tf-firewall"
network = "projects/myproject-gcp-01-6c7c86c31cb4/global/networks/tf-vpc-707793"
allow {
protocol = "tcp"
ports = ["80"]
}
source_ranges = ["0.0.0.0/0"]
}
modules/storage/storage.tf
resource "google_storage_bucket" "tf-bucket-546693" {
name = "tf-bucket-546693"
location = "US"
force_destroy = true
uniform_bucket_level_access = true
}
modules/instances/instances.tf
resource "google_compute_instance" "tf-instance-1" {
name = "tf-instance-1"
project = var.project_id
zone = var.zone
machine_type = "n1-standard-2"
boot_disk {
initialize_params {
image = ""
}
}
network_interface {
network = "tf-vpc-707793"
subnetwork = "subnet-01"
access_config {
}
}
metadata_startup_script = <<-EOT
#!/bin/bash
EOT
allow_stopping_for_update = true
}
resource "google_compute_instance" "tf-instance-2" {
name = "tf-instance-2"
project = var.project_id
zone = var.zone
machine_type = "n1-standard-2"
boot_disk {
initialize_params {
image = ""
}
}
network_interface {
network = "tf-vpc-707793"
subnetwork = "subnet-02"
access_config {
}
}
metadata_startup_script = <<-EOT
#!/bin/bash
EOT
allow_stopping_for_update = true
}
```