diff --git a/examples/two-tier/README.md b/examples/two-tier/README.md index 3920a15f4141..da7141e70536 100644 --- a/examples/two-tier/README.md +++ b/examples/two-tier/README.md @@ -10,9 +10,11 @@ getting your application onto the servers. However, you could do so either via management tool, or by pre-baking configured AMIs with [Packer](http://www.packer.io). +Please replace all strings REPLACE_WITH_YOUR with your data in variables.tf file. + This example will also create a new EC2 Key Pair in the specified AWS Region. The key name and path to the public key must be specified via the -terraform command vars. +terraform variables in variables.tf. After you run `terraform apply` on this configuration, it will automatically output the DNS address of the ELB. After your instance @@ -25,12 +27,12 @@ https://www.terraform.io/docs/providers/aws/index.html Run with a command like this: ``` -terraform apply -var 'key_name={your_aws_key_name}' \ - -var 'public_key_path={location_of_your_key_in_your_local_machine}' +terraform init +terraform apply +terraform destroy ``` +# Requirements -For example: +AWS account with AWS access and AWS secret keys and administrative permissions on AWS region. -``` -terraform apply -var 'key_name=terraform' -var 'public_key_path=/Users/jsmith/.ssh/terraform.pub' -``` +SSH key generated on Linux. diff --git a/examples/two-tier/main.tf b/examples/two-tier/main.tf index 74f4688822d1..84c1460079f5 100644 --- a/examples/two-tier/main.tf +++ b/examples/two-tier/main.tf @@ -1,6 +1,8 @@ # Specify the provider and access details provider "aws" { region = "${var.aws_region}" + access_key = "${var.aws_access_key}" + secret_key = "${var.aws_secret_key}" } # Create a VPC to launch our instances into @@ -21,16 +23,23 @@ resource "aws_route" "internet_access" { } # Create a subnet to launch our instances into -resource "aws_subnet" "default" { +resource "aws_subnet" "web" { vpc_id = "${aws_vpc.default.id}" - cidr_block = "10.0.1.0/24" + cidr_block = "10.0.0.0/24" + map_public_ip_on_launch = true +} + +# Create a subnet to launch our DB instances into +resource "aws_subnet" "db" { + vpc_id = "${aws_vpc.default.id}" + cidr_block = "10.0.160.0/24" map_public_ip_on_launch = true } # A security group for the ELB so it is accessible via the web resource "aws_security_group" "elb" { - name = "terraform_example_elb" - description = "Used in the terraform" + name = "${var.deployment_name}_elb" + description = "Used in the ${var.deployment_name} deployment" vpc_id = "${aws_vpc.default.id}" # HTTP access from anywhere @@ -50,11 +59,11 @@ resource "aws_security_group" "elb" { } } -# Our default security group to access +# Our web security group to access # the instances over SSH and HTTP -resource "aws_security_group" "default" { - name = "terraform_example" - description = "Used in the terraform" +resource "aws_security_group" "web" { + name = "${var.deployment_name}_sg-web" + description = "Used in the ${var.deployment_name} deployment" vpc_id = "${aws_vpc.default.id}" # SSH access from anywhere @@ -82,10 +91,42 @@ resource "aws_security_group" "default" { } } +# Our db security group to access +# the instances over SSH and PosrgreSQL port +resource "aws_security_group" "db" { + name = "${var.deployment_name}_sg-db" + description = "Used in the ${var.deployment_name} deployment" + vpc_id = "${aws_vpc.default.id}" + + # SSH access from anywhere for example only. I suggest to change cidr_blocks to your work/home. + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + # Access to PostrgeSQL from the VPC + ingress { + from_port = 5432 + to_port = 5432 + protocol = "tcp" + cidr_blocks = ["10.0.0.0/16"] + } + + # outbound internet access + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} + resource "aws_elb" "web" { - name = "terraform-example-elb" + name = "${var.deployment_name}-elb" - subnets = ["${aws_subnet.default.id}"] + subnets = ["${aws_subnet.web.id}"] security_groups = ["${aws_security_group.elb.id}"] instances = ["${aws_instance.web.id}"] @@ -95,6 +136,16 @@ resource "aws_elb" "web" { lb_port = 80 lb_protocol = "http" } + + health_check = [ + { + target = "HTTP:80/" + interval = 15 + healthy_threshold = 2 + unhealthy_threshold = 2 + timeout = 5 + }, + ] } resource "aws_key_pair" "auth" { @@ -108,8 +159,8 @@ resource "aws_instance" "web" { connection { # The default username for our AMI user = "ubuntu" - # The connection will use the local SSH agent for authentication. + private_key = "${file(var.private_key_path)}" } instance_type = "t2.micro" @@ -122,21 +173,60 @@ resource "aws_instance" "web" { key_name = "${aws_key_pair.auth.id}" # Our Security group to allow HTTP and SSH access - vpc_security_group_ids = ["${aws_security_group.default.id}"] + vpc_security_group_ids = ["${aws_security_group.web.id}"] # We're going to launch into the same subnet as our ELB. In a production # environment it's more common to have a separate private subnet for # backend instances. - subnet_id = "${aws_subnet.default.id}" + subnet_id = "${aws_subnet.web.id}" # We run a remote provisioner on the instance after creating it. # In this case, we just install nginx and start it. By default, # this should be on port 80 + # "sudo apt-get -y update", provisioner "remote-exec" { inline = [ - "sudo apt-get -y update", "sudo apt-get -y install nginx", "sudo service nginx start", ] } } + +resource "aws_instance" "db" { + # The connection block tells our provisioner how to + # communicate with the resource (instance) + connection { + # The default username for our AMI + user = "ubuntu" + # The connection will use the local SSH agent for authentication. + private_key = "${file(var.private_key_path)}" + } + + instance_type = "t2.micro" + + # Lookup the correct AMI based on the region + # we specified + ami = "${lookup(var.aws_amis, var.aws_region)}" + + # The name of our SSH keypair we created above. + key_name = "${aws_key_pair.auth.id}" + + # Our Security group to allow HTTP and SSH access + vpc_security_group_ids = ["${aws_security_group.db.id}"] + + # We're going to launch into the same subnet as our ELB. In a production + # environment it's more common to have a separate private subnet for + # backend instances. + subnet_id = "${aws_subnet.db.id}" + + # We run a remote provisioner on the instance after creating it. + # In this case, we just install nginx and start it. By default, + # this should be on port 80 + # "sudo apt-get -y update", + provisioner "remote-exec" { + inline = [ + "sudo apt-get -y install postgresql", + "sudo service postgresql start", + ] + } +} diff --git a/examples/two-tier/outputs.tf b/examples/two-tier/outputs.tf index 5977837f3330..bd7762c50724 100644 --- a/examples/two-tier/outputs.tf +++ b/examples/two-tier/outputs.tf @@ -1,3 +1,20 @@ -output "address" { +output "elb_dns_name" { value = "${aws_elb.web.dns_name}" } + +output "web_public_ip" { + value = "${aws_instance.web.public_ip}" +} + +output "web_private_ip" { + value = "${aws_instance.web.private_ip}" +} + +output "db_public_ip" { + value = "${aws_instance.db.public_ip}" +} + +output "db_private_ip" { + value = "${aws_instance.db.private_ip}" +} + diff --git a/examples/two-tier/variables.tf b/examples/two-tier/variables.tf index 60ec0d8348bc..77e0d79afb75 100644 --- a/examples/two-tier/variables.tf +++ b/examples/two-tier/variables.tf @@ -1,3 +1,18 @@ +variable "aws_access_key" { + description = "AWS access key." + default = "REPLACE_WITH_YOUR" +} + +variable "aws_secret_key" { + description = "AWS secret key." + default = "REPLACE_WITH_YOUR" +} + +variable "deployment_name" { + description = "Desired name of Deployment" + default = "test02" +} + variable "public_key_path" { description = <