Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

X-net and Gemini-1b changes #55

Closed
wants to merge 19 commits into from
Closed
4 changes: 4 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -71,3 +71,7 @@ terraform apply "current-plan.tfplan"

Terraform will apply changes and generate/update the **.tfstate** file.
Be aware that state files can contain sensitive information. Do not expose it to the public repository.


Note: When creating a new workspace for a project, ensure to change plan execution from remote to local from the workspace->settings->Execution Mode.
Choose local so only the state is store and tracked and execution can be done from the local instead of terraform cloud.
18 changes: 10 additions & 8 deletions resources/common.tf
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ terraform {

cloudflare = {
source = "cloudflare/cloudflare"
version = "~> 3.0"
version = "~> 3.18.0"
}
}
}
Expand All @@ -16,7 +16,7 @@ terraform {
variable "do_token" {}

# SSH agen identity to use to connect to remote host
variable "ssh_identity" {}
variable "ssh_identity" { }

# Set DigitalOcean as provider
provider "digitalocean" {
Expand All @@ -25,7 +25,6 @@ provider "digitalocean" {

provider "cloudflare" {
email = var.cloudflare_email
account_id = var.cloudflare_account_id
api_token = var.cloudflare_api_token
}

Expand All @@ -34,17 +33,16 @@ variable "cloudflare_email" {
description = "clouflare email address"
}

variable "cloudflare_account_id" {
type = string
description = "cloudflare account id"
}

variable "cloudflare_api_token" {
type = string
description = "cloudflare api token"
}

# SSH team keys to be used on droplet access
data "digitalocean_ssh_key" "alexei2-key" {
name = "Alexei2 SSH Key"
}

data "digitalocean_ssh_key" "nazar-key" {
name = "Nazar SSH Key"
}
Expand All @@ -55,9 +53,13 @@ data "digitalocean_ssh_key" "ved-key" {
name = "Ved SSH Key"
}

# Sensitive variable. Contains private key for connecting to concrete Digital Ocean droplets
variable "alexey2_do_private_key" { }
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why do we need to add this private key here?

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have done it for connecting cloud TF "null_resourse" to DO droplets.
Private key was saved in cloud TF secure/sensitive variable.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not super clear on the architecture. Can you add more details to understand what you thinking ?

My initial thought process was that we want to use a cloud state so that these private/sensitive variables never leave our workspaces. The added benefit with this is that everyone can use their application keys and its much easier for auditing the changes made while the state is kept updated so anyone can pick up with out sharing state explicitly.


# add ssh keys as single var
locals {
ssh_keys = [
data.digitalocean_ssh_key.alexei2-key.id,
data.digitalocean_ssh_key.nazar-key.id,
data.digitalocean_ssh_key.serge-key.id,
data.digitalocean_ssh_key.ved-key.id
Expand Down
10 changes: 10 additions & 0 deletions resources/gemini-1b/backend.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
terraform {
backend "remote" {
hostname = "app.terraform.io"
organization = "subspace-sre"
vedhavyas marked this conversation as resolved.
Show resolved Hide resolved

workspaces {
name = "gemini-1b"
}
}
}
4 changes: 1 addition & 3 deletions resources/gemini-1b/projects.tf
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,7 @@ resource "digitalocean_project" "gemini-1b" {
description = "Subspace Gemini 1b"
purpose = "Testnet"
environment = "Production"
resources = flatten([
[for droplet in digitalocean_droplet.gemini-1b: droplet.urn],
])
resources = [for droplet in digitalocean_droplet.gemini-1b: droplet.urn]
}


18 changes: 9 additions & 9 deletions resources/gemini-1b/provisioner.tf
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ resource "null_resource" "node_keys" {

# generate node keys
provisioner "local-exec" {
command = "../../scripts/generate_node_keys.sh ${length(digitalocean_droplet.gemini-1b)} ./node_keys.txt"
command = "scripts/generate_node_keys.sh ${length(digitalocean_droplet.gemini-1b)} ./node_keys.txt"
interpreter = [ "/bin/bash", "-c" ]
environment = {
NODE_PUBLIC_IPS = join(",", digitalocean_droplet.gemini-1b.*.ipv4_address)
Expand All @@ -28,8 +28,8 @@ resource "null_resource" "setup_nodes" {
host = digitalocean_droplet.gemini-1b[count.index].ipv4_address
user = "root"
type = "ssh"
agent = true
agent_identity = var.ssh_identity
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why do you want to remove the identity?

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I haven't found another way to connect cloud terraform "null_resource" to droplets.
In case if you know any ways to do it - please share.

agent = false
private_key = var.alexey2_do_private_key
timeout = "2m"
}

Expand All @@ -42,7 +42,7 @@ resource "null_resource" "setup_nodes" {

# copy install file
provisioner "file" {
source = "../../scripts/install_docker.sh"
source = "scripts/install_docker.sh"
destination = "/subspace/install_docker.sh"
}

Expand All @@ -59,7 +59,7 @@ resource "null_resource" "setup_nodes" {
# deployment version
# increment this to restart node with any changes to env and compose files
locals {
deployment_version = 3
deployment_version = 9
}

resource "null_resource" "start_nodes" {
Expand All @@ -76,8 +76,8 @@ resource "null_resource" "start_nodes" {
host = digitalocean_droplet.gemini-1b[count.index].ipv4_address
user = "root"
type = "ssh"
agent = true
agent_identity = var.ssh_identity
agent = false
private_key = var.alexey2_do_private_key
timeout = "2m"
}

Expand All @@ -89,15 +89,15 @@ resource "null_resource" "start_nodes" {

# copy compose file
provisioner "file" {
source = "../../services/gemini-1/install_compose_file.sh"
source = "scripts/install_compose_file.sh"
destination = "/subspace/install_compose_file.sh"
}

# start docker containers
provisioner "remote-exec" {
inline = [
"docker compose -f /subspace/docker-compose.yml down",
"echo NODE_SNAPSHOT_TAG=${var.node-snapshot-tag} >> /subspace/.env",
"echo NODE_SNAPSHOT_TAG=${var.node-snapshot-tag} > /subspace/.env",
"echo NODE_ID=${count.index} >> /subspace/.env",
"echo NODE_KEY=$(sed -nr 's/NODE_${count.index}_KEY=//p' /subspace/node_keys.txt) >> /subspace/.env",
"sudo chmod +x /subspace/install_compose_file.sh",
Expand Down
24 changes: 24 additions & 0 deletions resources/gemini-1b/scripts/generate_node_keys.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
#!/bin/bash

docker pull subspacelabs/subspace-node:latest
node_count=${1}
output_file=${2}
if [ -s "${output_file}" ]; then
echo "Node keys exists..."
exit 0
fi
ips=$(echo "$NODE_PUBLIC_IPS" | awk -F, '{for(i=1;i<=NF;i++) print $i}')
ips=( ${ips} )
echo -n > "${output_file}"
echo "Generating node keys..."
for (( i = 0; i < node_count; i++ )); do
data="$(docker run --rm subspacelabs/subspace-node key generate-node-key 2>&1)"
peer_id=$(echo "$data" | sed '2q;d')
node_key=$(echo "$data" | sed '3q;d')
{
echo "NODE_${i}_PEER_ID=${peer_id}"
echo "NODE_${i}_KEY=${node_key}"
echo "NODE_${i}_MULTI_ADDR=/ip4/${ips[${i}]}/tcp/30333/p2p/${peer_id}"
} >> "${output_file}"
done
echo "Done."
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ services:
ports:
- "30333:30333"
labels:
caddy: rpc-\${NODE_ID}.gemini-1a.subspace.network
caddy: rpc-\${NODE_ID}.gemini-1b.subspace.network
caddy.handle_path_0: /http
caddy.handle_path_0.reverse_proxy: "{{upstreams 9933}}"
caddy.handle_path_1: /ws
Expand All @@ -45,9 +45,9 @@ services:
"--rpc-cors", "all",
"--rpc-external",
"--ws-external",
"--in-peers", "1000",
"--out-peers", "500",
"--in-peers-light", "1000",
"--in-peers", "500",
"--out-peers", "250",
"--in-peers-light", "500",
"--ws-max-connections", "10000",
EOF

Expand Down
25 changes: 25 additions & 0 deletions resources/gemini-1b/scripts/install_docker.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
#!/bin/bash

# updates
export DEBIAN_FRONTEND=noninteractive
apt update -y
apt dist-upgrade -y
apt install -y curl jq

# install docker
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu focal stable"
apt update -y
apt-cache -y policy docker-ce # this ensures that docker is installed from the docker repo instead of ubuntu repo
apt install -y docker-ce

# install docker-compose
curl -s -L "https://github.com/docker/compose/releases/download/$(curl -s -L https://api.github.com/repos/docker/compose/releases/latest | jq -r '.name')/docker-compose-$(uname -s)-$(uname -m)" -o /usr/libexec/docker/cli-plugins/docker-compose
chmod +x /usr/libexec/docker/cli-plugins/docker-compose

# set max socket connections
if ! (grep -iq "net.core.somaxconn" /etc/sysctl.conf && sed -i 's/.*net.core.somaxconn.*/net.core.somaxconn=65535/' /etc/sysctl.conf); then
echo "net.core.somaxconn=65535" >> /etc/sysctl.conf
fi

sysctl -p /etc/sysctl.conf
20 changes: 19 additions & 1 deletion resources/gemini-1b/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -19,5 +19,23 @@ variable "droplets-per-region" {
variable "node-snapshot-tag" {
description = "Node snapshot tag"
type = string
default = "gemini-1b-2022-june-05"
default = "gemini-1b-2022-jun-18"
}

variable "extra-droplets" {
description = "Extra droplets"
type = number
default = 20
}

variable "extra-droplets-us-per-region" {
description = "Extra droplets in US"
type = number
default = 5
}

variable "extra-droplet-regions-us" {
description = "Droplet regions"
type = list(string)
default = ["nyc1", "sfo3"]
}
10 changes: 10 additions & 0 deletions resources/x-net/backend.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
terraform {
backend "remote" {
hostname = "app.terraform.io"
organization = "subspace-sre"

workspaces {
name = "x-net"
}
}
}
1 change: 1 addition & 0 deletions resources/x-net/common.tf
88 changes: 88 additions & 0 deletions resources/x-net/node-executor.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
resource "digitalocean_droplet" "x-net-executor" {
image = "ubuntu-20-04-x64"
name = "x-net-executor"
region = var.droplet-region
size = var.droplet-size
ssh_keys = local.ssh_keys
}

resource "digitalocean_firewall" "x-net-executor-firewall" {
name = "x-net-executor-firewall"

droplet_ids = [digitalocean_droplet.x-net-executor.id]

inbound_rule {
protocol = "tcp"
port_range = "22"
source_addresses = ["0.0.0.0/0"]
}

inbound_rule {
protocol = "tcp"
port_range = "30333"
source_addresses = ["0.0.0.0/0"]
}

outbound_rule {
protocol = "tcp"
port_range = "all"
destination_addresses = ["0.0.0.0/0"]
}

outbound_rule {
protocol = "udp"
port_range = "all"
destination_addresses = ["0.0.0.0/0"]
}
}

locals {
executor_deployment_version = 5
}

resource "null_resource" "start_executor_node" {
depends_on = [null_resource.setup_nodes]

# trigger on node deployment version change
triggers = {
deployment_version = local.executor_deployment_version
}

connection {
host = digitalocean_droplet.x-net-executor.ipv4_address
user = "root"
type = "ssh"
agent = true
agent_identity = var.ssh_identity
timeout = "2m"
}

# copy node keys file
provisioner "file" {
source = "./node_keys.txt"
destination = "/subspace/node_keys.txt"
}

# copy compose file
provisioner "file" {
source = "../../services/x-net/setup_executor_compose.sh"
destination = "/subspace/setup_executor_compose.sh"
}

# start docker containers
provisioner "remote-exec" {
inline = [
"docker compose -f /subspace/docker-compose.yml down",
"mkdir -p /subspace/data/node",
"mkdir -p /subspace/data/executor",
"mkdir -p /subspace/data/executor/chains/subspace_x_net_1a_execution/keystore/",
"echo \"$(sed -nr 's/KEYSTORE_FILE_DATA=//p' /subspace/node_keys.txt)\" > /subspace/data/executor/chains/subspace_x_net_1a_execution/keystore/$(sed -nr 's/KEYSTORE_FILE_NAME=//p' /subspace/node_keys.txt)",
"chown -R nobody:nogroup /subspace/data",
"chown -R nobody:nogroup /subspace/data/*",
"echo NODE_KEY=$(sed -nr 's/NODE_2_KEY=//p' /subspace/node_keys.txt) > /subspace/.env",
"sudo chmod +x /subspace/setup_executor_compose.sh",
"sudo /subspace/setup_executor_compose.sh 3 2",
"docker compose -f /subspace/docker-compose.yml up -d",
]
}
}
Loading