Skip to content
This repository has been archived by the owner on Jan 25, 2023. It is now read-only.

Commit

Permalink
Merge pull request #44 from hashicorp/tf12-upgrade
Browse files Browse the repository at this point in the history
Tf12 upgrade
  • Loading branch information
Etiene authored Jun 28, 2019
2 parents d0c3802 + 51eeb94 commit 48e8fc5
Show file tree
Hide file tree
Showing 31 changed files with 1,214 additions and 631 deletions.
2 changes: 1 addition & 1 deletion .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ workspace_root: &workspace_root
defaults: &defaults
working_directory: *workspace_root
docker:
- image: gruntwork/circle-ci-test-image-base:latest
- image: gruntwork/circle-ci-test-image-base:go1.11

version: 2
jobs:
Expand Down
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
[![Maintained by Gruntwork.io](https://img.shields.io/badge/maintained%20by-gruntwork.io-%235849a6.svg)](https://gruntwork.io/?ref=repo_gcp_vault)
![Terraform Version](https://img.shields.io/badge/tf-%3E%3D0.12.0-blue.svg)
# Vault for Google Cloud Platform (GCP)

This repo contains a Terraform Module for how to deploy a [Vault](https://www.vaultproject.io/) cluster on
Expand Down Expand Up @@ -91,7 +92,7 @@ To deploy the Vault cluster:

1. Deploy that Image across a Managed Instance Group using the Terraform [vault-cluster-module](https://github.com/hashicorp/terraform-google-vault/tree/master/modules/vault-cluster).

1 TODO ACCESSING THE CLUSTER THROUGH SSH
1 TODO ACCESSING THE CLUSTER THROUGH SSH

1. Execute the [run-consul script](https://github.com/hashicorp/terraform-google-consul/tree/master/modules/run-consul)
with the `--client` flag during boot on each Instance to have the Consul agent connect to the Consul server cluster.
Expand Down
115 changes: 58 additions & 57 deletions examples/vault-cluster-authentication-gce/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,14 @@
# ---------------------------------------------------------------------------------------------------------------------

provider "google" {
project = "${var.gcp_project_id}"
region = "${var.gcp_region}"
project = var.gcp_project_id
region = var.gcp_region
}

# Use Terraform 0.10.x so that we can take advantage of Terraform GCP functionality as a separate provider via
# https://github.com/terraform-providers/terraform-provider-google
terraform {
required_version = ">= 0.10.3"
# The modules used in this example have been updated with 0.12 syntax, which means the example is no longer
# compatible with any versions below 0.12.
required_version = ">= 0.12"
}

# ---------------------------------------------------------------------------------------------------------------------
Expand All @@ -25,21 +25,22 @@ terraform {
resource "google_compute_subnetwork" "private_subnet_with_google_api_access" {
name = "${var.vault_cluster_name}-private-subnet-with-google-api-access"
private_ip_google_access = true
network = "${var.network_name}"
ip_cidr_range = "${var.subnet_ip_cidr_range}"
network = var.network_name
ip_cidr_range = var.subnet_ip_cidr_range
}

# ---------------------------------------------------------------------------------------------------------------------
# DEPLOY A WEB CLIENT THAT AUTHENTICATES TO VAULT USING THE GCE METHOD AND FETCHES A SECRET
# For more details on how the authentication works, check the startup scripts
# ---------------------------------------------------------------------------------------------------------------------

data "google_compute_zones" "available" {}
data "google_compute_zones" "available" {
}

# Deploy web client that authenticates to vault
resource "google_compute_instance" "web_client" {
name = "${var.web_client_name}"
zone = "${data.google_compute_zones.available.names[0]}"
name = var.web_client_name
zone = data.google_compute_zones.available.names[0]
machine_type = "g1-small"
tags = ["web-client"]

Expand All @@ -49,18 +50,18 @@ resource "google_compute_instance" "web_client" {

boot_disk {
initialize_params {
image = "${var.vault_source_image}"
image = var.vault_source_image
}
}

service_account {
scopes = ["cloud-platform"]
}

metadata_startup_script = "${data.template_file.startup_script_client.rendered}"
metadata_startup_script = data.template_file.startup_script_client.rendered

network_interface {
subnetwork = "${google_compute_subnetwork.private_subnet_with_google_api_access.self_link}"
subnetwork = google_compute_subnetwork.private_subnet_with_google_api_access.self_link

access_config {
// Ephemeral IP - leaving this block empty will generate a new external IP and assign it to the machine
Expand All @@ -69,19 +70,19 @@ resource "google_compute_instance" "web_client" {
}

data "template_file" "startup_script_client" {
template = "${file("${path.module}/startup-script-client.sh")}"
template = file("${path.module}/startup-script-client.sh")

vars {
consul_cluster_tag_name = "${var.consul_server_cluster_name}"
vars = {
consul_cluster_tag_name = var.consul_server_cluster_name
example_role_name = "vault-test-role"
project_id = "${var.gcp_project_id}"
project_id = var.gcp_project_id
}
}

# Allowing ingress of port 8080 on web client
resource "google_compute_firewall" "default" {
name = "${var.vault_cluster_name}-test-firewall"
network = "${var.network_name}"
network = var.network_name
target_tags = ["web-client"]

allow {
Expand All @@ -100,26 +101,26 @@ module "vault_cluster" {
# source = "git::git@github.com:hashicorp/terraform-google-vault.git//modules/vault-cluster?ref=v0.0.1"
source = "../../modules/vault-cluster"

subnetwork_name = "${google_compute_subnetwork.private_subnet_with_google_api_access.name}"
subnetwork_name = google_compute_subnetwork.private_subnet_with_google_api_access.name

gcp_project_id = "${var.gcp_project_id}"
gcp_region = "${var.gcp_region}"
gcp_project_id = var.gcp_project_id
gcp_region = var.gcp_region

cluster_name = "${var.vault_cluster_name}"
cluster_size = "${var.vault_cluster_size}"
cluster_tag_name = "${var.vault_cluster_name}"
machine_type = "${var.vault_cluster_machine_type}"
cluster_name = var.vault_cluster_name
cluster_size = var.vault_cluster_size
cluster_tag_name = var.vault_cluster_name
machine_type = var.vault_cluster_machine_type

source_image = "${var.vault_source_image}"
startup_script = "${data.template_file.startup_script_vault.rendered}"
source_image = var.vault_source_image
startup_script = data.template_file.startup_script_vault.rendered

gcs_bucket_name = "${var.vault_cluster_name}"
gcs_bucket_location = "${var.gcs_bucket_location}"
gcs_bucket_storage_class = "${var.gcs_bucket_class}"
gcs_bucket_force_destroy = "${var.gcs_bucket_force_destroy}"
gcs_bucket_name = var.vault_cluster_name
gcs_bucket_location = var.gcs_bucket_location
gcs_bucket_storage_class = var.gcs_bucket_class
gcs_bucket_force_destroy = var.gcs_bucket_force_destroy

root_volume_disk_size_gb = "${var.root_volume_disk_size_gb}"
root_volume_disk_type = "${var.root_volume_disk_type}"
root_volume_disk_size_gb = var.root_volume_disk_size_gb
root_volume_disk_type = var.root_volume_disk_type

# Note that the only way to reach private nodes via SSH is to first SSH into another node that is not private.
assign_public_ip_addresses = false
Expand All @@ -128,21 +129,21 @@ module "vault_cluster" {
# We enable health checks from the Consul Server cluster to Vault.
allowed_inbound_cidr_blocks_api = []

allowed_inbound_tags_api = ["${var.consul_server_cluster_name}"]
allowed_inbound_tags_api = [var.consul_server_cluster_name]
}

# Render the Startup Script that will run on each Vault Instance on boot. This script will configure and start Vault.
data "template_file" "startup_script_vault" {
template = "${file("${path.module}/startup-script-vault.sh")}"
template = file("${path.module}/startup-script-vault.sh")

vars {
consul_cluster_tag_name = "${var.consul_server_cluster_name}"
vault_cluster_tag_name = "${var.vault_cluster_name}"
enable_vault_ui = "${var.enable_vault_ui ? "--enable-ui" : ""}"
vars = {
consul_cluster_tag_name = var.consul_server_cluster_name
vault_cluster_tag_name = var.vault_cluster_name
enable_vault_ui = var.enable_vault_ui ? "--enable-ui" : ""
example_role_name = "vault-test-role"
example_secret = "${var.example_secret}"
project_id = "${var.gcp_project_id}"
vault_auth_allowed_zones = "${data.google_compute_zones.available.names[0]}"
example_secret = var.example_secret
project_id = var.gcp_project_id
vault_auth_allowed_zones = data.google_compute_zones.available.names[0]
vault_auth_allowed_labels = "example_label:example_value"
}
}
Expand All @@ -153,32 +154,32 @@ data "template_file" "startup_script_vault" {
# ---------------------------------------------------------------------------------------------------------------------

module "consul_cluster" {
source = "git::git@github.com:hashicorp/terraform-google-consul.git//modules/consul-cluster?ref=v0.2.1"

subnetwork_name = "${google_compute_subnetwork.private_subnet_with_google_api_access.name}"
source = "git::git@github.com:hashicorp/terraform-google-consul.git//modules/consul-cluster?ref=v0.4.0"

gcp_region = "${var.gcp_region}"
cluster_name = "${var.consul_server_cluster_name}"
cluster_tag_name = "${var.consul_server_cluster_name}"
cluster_size = "${var.consul_server_cluster_size}"
subnetwork_name = google_compute_subnetwork.private_subnet_with_google_api_access.name
gcp_project_id = var.gcp_project_id
gcp_region = var.gcp_region
cluster_name = var.consul_server_cluster_name
cluster_tag_name = var.consul_server_cluster_name
cluster_size = var.consul_server_cluster_size

source_image = "${var.consul_server_source_image}"
machine_type = "${var.consul_server_machine_type}"
source_image = var.consul_server_source_image
machine_type = var.consul_server_machine_type

startup_script = "${data.template_file.startup_script_consul.rendered}"
startup_script = data.template_file.startup_script_consul.rendered

# Note that the only way to reach private nodes via SSH is to first SSH into another node that is not private.
assign_public_ip_addresses = false

allowed_inbound_tags_dns = ["${var.vault_cluster_name}"]
allowed_inbound_tags_http_api = ["${var.vault_cluster_name}"]
allowed_inbound_tags_dns = [var.vault_cluster_name]
allowed_inbound_tags_http_api = [var.vault_cluster_name]
}

# This Startup Script will run at boot to configure and start Consul on the Consul Server cluster nodes.
data "template_file" "startup_script_consul" {
template = "${file("${path.module}/startup-script-consul.sh")}"
template = file("${path.module}/startup-script-consul.sh")

vars {
cluster_tag_name = "${var.consul_server_cluster_name}"
vars = {
cluster_tag_name = var.consul_server_cluster_name
}
}
35 changes: 18 additions & 17 deletions examples/vault-cluster-authentication-gce/outputs.tf
Original file line number Diff line number Diff line change
@@ -1,67 +1,68 @@
output "gcp_project_id" {
value = "${var.gcp_project_id}"
value = var.gcp_project_id
}

output "gcp_region" {
value = "${var.gcp_region}"
value = var.gcp_region
}

output "vault_cluster_size" {
value = "${var.vault_cluster_size}"
value = var.vault_cluster_size
}

output "cluster_tag_name" {
value = "${module.vault_cluster.cluster_tag_name}"
value = module.vault_cluster.cluster_tag_name
}

output "web_client_name" {
value = "${var.web_client_name}"
value = var.web_client_name
}

output "web_client_public_ip" {
value = "${google_compute_instance.web_client.network_interface.0.access_config.0.nat_ip}"
value = google_compute_instance.web_client.network_interface[0].access_config[0].nat_ip
}

output "instance_group_id" {
value = "${module.vault_cluster.instance_group_id}"
value = module.vault_cluster.instance_group_id
}

output "instance_group_url" {
value = "${module.vault_cluster.instance_group_url}"
value = module.vault_cluster.instance_group_url
}

output "instance_template_url" {
value = "${module.vault_cluster.instance_template_url}"
value = module.vault_cluster.instance_template_url
}

output "firewall_rule_allow_intracluster_vault_id" {
value = "${module.vault_cluster.firewall_rule_allow_intracluster_vault_id}"
value = module.vault_cluster.firewall_rule_allow_intracluster_vault_id
}

output "firewall_rule_allow_intracluster_vault_url" {
value = "${module.vault_cluster.firewall_rule_allow_intracluster_vault_url}"
value = module.vault_cluster.firewall_rule_allow_intracluster_vault_url
}

output "firewall_rule_allow_inbound_api_id" {
value = "${module.vault_cluster.firewall_rule_allow_inbound_api_id}"
value = module.vault_cluster.firewall_rule_allow_inbound_api_id
}

output "firewall_rule_allow_inbound_api_url" {
value = "${module.vault_cluster.firewall_rule_allow_inbound_api_url}"
value = module.vault_cluster.firewall_rule_allow_inbound_api_url
}

output "firewall_rule_allow_inbound_health_check_id" {
value = "${module.vault_cluster.firewall_rule_allow_inbound_health_check_id}"
value = module.vault_cluster.firewall_rule_allow_inbound_health_check_id
}

output "firewall_rule_allow_inbound_health_check_url" {
value = "${module.vault_cluster.firewall_rule_allow_inbound_health_check_url}"
value = module.vault_cluster.firewall_rule_allow_inbound_health_check_url
}

output "bucket_name_id" {
value = "${module.vault_cluster.bucket_name_id}"
value = module.vault_cluster.bucket_name_id
}

output "bucket_name_url" {
value = "${module.vault_cluster.bucket_name_url}"
value = module.vault_cluster.bucket_name_url
}

Loading

0 comments on commit 48e8fc5

Please sign in to comment.