-
Notifications
You must be signed in to change notification settings - Fork 2.4k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add hidden flag/var for supervisor/apiserver listen config
Add flags supervisor and apiserver ports and bind address so that we can add an e2e to cover supervisor and apiserver on separate ports, as used by rke2 Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
- Loading branch information
Showing
5 changed files
with
484 additions
and
3 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,183 @@ | ||
ENV['VAGRANT_NO_PARALLEL'] = 'no' | ||
NODE_ROLES = (ENV['E2E_NODE_ROLES'] || | ||
["server-0", "server-1", "server-2", "agent-0", "agent-1"]) | ||
NODE_BOXES = (ENV['E2E_NODE_BOXES'] || | ||
['bento/ubuntu-24.04', 'bento/ubuntu-24.04', 'bento/ubuntu-24.04', 'bento/ubuntu-24.04', 'bento/ubuntu-24.04']) | ||
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master") | ||
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "") | ||
EXTERNAL_DB = (ENV['E2E_EXTERNAL_DB'] || "etcd") | ||
HARDENED = (ENV['E2E_HARDENED'] || "") | ||
REGISTRY = (ENV['E2E_REGISTRY'] || "") | ||
RANCHER = (ENV['E2E_RANCHER'] || "") | ||
GOCOVER = (ENV['E2E_GOCOVER'] || "") | ||
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i | ||
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i | ||
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks | ||
NETWORK_PREFIX = "10.10.10" | ||
install_type = "" | ||
hardened_arg = "" | ||
|
||
def provision(vm, role, role_num, node_num) | ||
vm.box = NODE_BOXES[node_num] | ||
vm.hostname = role | ||
# An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32 | ||
node_ip = "#{NETWORK_PREFIX}.#{100+node_num}" | ||
vm.network "private_network", ip: node_ip, netmask: "255.255.255.0" | ||
|
||
scripts_location = Dir.exist?("./scripts") ? "./scripts" : "../scripts" | ||
vagrant_defaults = File.exist?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb" | ||
load vagrant_defaults | ||
|
||
defaultOSConfigure(vm) | ||
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH) | ||
|
||
vm.provision "shell", inline: "ping -c 2 k3s.io" | ||
|
||
db_type = getDBType(role, role_num, vm) | ||
hardened_arg = getHardenedArg(vm, HARDENED, scripts_location) | ||
|
||
if !REGISTRY.empty? | ||
vm.provision "Set private registry", type: "shell", path: scripts_location + "/registry.sh", args: [ "#{NETWORK_PREFIX}.1" ] | ||
end | ||
addCoverageDir(vm, role, GOCOVER) | ||
|
||
if role.include?("server") && role_num == 0 | ||
vm.provision 'k3s-primary-server', type: 'k3s', run: 'once' do |k3s| | ||
k3s.args = "server " | ||
k3s.config = <<~YAML | ||
token: vagrant | ||
node-external-ip: #{NETWORK_PREFIX}.100 | ||
flannel-iface: eth1 | ||
disable: traefik | ||
supervisor-port: 9345 | ||
apiserver-port: 6443 | ||
apiserver-bind-address: 0.0.0.0 | ||
node-taint: node-role.kubernetes.io/control-plane:NoSchedule | ||
#{db_type} | ||
#{hardened_arg} | ||
YAML | ||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}] | ||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 | ||
end | ||
|
||
elsif role.include?("server") && role_num != 0 | ||
vm.provision 'k3s-secondary-server', type: 'k3s', run: 'once' do |k3s| | ||
k3s.args = "server" | ||
k3s.config = <<~YAML | ||
server: "https://#{NETWORK_PREFIX}.100:9345" | ||
token: vagrant | ||
node-external-ip: #{node_ip} | ||
flannel-iface: eth1 | ||
disable: traefik | ||
supervisor-port: 9345 | ||
apiserver-port: 6443 | ||
apiserver-bind-address: 0.0.0.0 | ||
node-taint: node-role.kubernetes.io/control-plane:NoSchedule | ||
#{db_type} | ||
#{hardened_arg} | ||
YAML | ||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}] | ||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 | ||
end | ||
end | ||
|
||
if role.include?("agent") | ||
vm.provision 'k3s-agent', type: 'k3s', run: 'once' do |k3s| | ||
k3s.args = "agent" | ||
k3s.config = <<~YAML | ||
server: "https://#{NETWORK_PREFIX}.100:9345" | ||
token: vagrant | ||
node-external-ip: #{node_ip} | ||
flannel-iface: eth1 | ||
#{db_type} | ||
YAML | ||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}] | ||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 | ||
end | ||
end | ||
if vm.box.to_s.include?("microos") | ||
vm.provision 'k3s-reload', type: 'reload', run: 'once' | ||
if !EXTERNAL_DB.empty? | ||
vm.provision "shell", inline: "docker start #{EXTERNAL_DB}" | ||
end | ||
end | ||
# This step does not run by default and is designed to be called by higher level tools | ||
if !RANCHER.empty? | ||
blank_node = role.include?("agent") | ||
vm.provision "Install Rancher", type: "shell", run: "never", path: scripts_location + "/rancher.sh", args: [ "#{NETWORK_PREFIX}.100", blank_node.to_s ] | ||
end | ||
end | ||
|
||
def getDBType(role, role_num, vm) | ||
|
||
if EXTERNAL_DB == "mariadb" | ||
if role.include?("server") && role_num == 0 | ||
dockerInstall(vm) | ||
vm.provision "Start mariaDB", inline: "docker run -d -p 3306:3306 --name #{EXTERNAL_DB} -e MARIADB_ROOT_PASSWORD=e2e mariadb:11" | ||
vm.provision "shell", inline: "echo \"Wait for mariaDB to startup\"; sleep 10" | ||
return "datastore-endpoint: 'mysql://root:e2e@tcp(#{NETWORK_PREFIX}.100:3306)/k3s'" | ||
elsif role.include?("server") && role_num != 0 | ||
return "datastore-endpoint: 'mysql://root:e2e@tcp(#{NETWORK_PREFIX}.100:3306)/k3s'" | ||
end | ||
|
||
elsif EXTERNAL_DB == "mysql" | ||
if role.include?("server") && role_num == 0 | ||
dockerInstall(vm) | ||
vm.provision "Start mysql", inline: "docker run -d -p 3306:3306 --name #{EXTERNAL_DB} -e MYSQL_ROOT_PASSWORD=e2e mysql:5.7" | ||
vm.provision "shell", inline: "echo \"Wait for mysql to startup\"; sleep 10" | ||
return "datastore-endpoint: 'mysql://root:e2e@tcp(#{NETWORK_PREFIX}.100:3306)/k3s'" | ||
elsif role.include?("server") && role_num != 0 | ||
return "datastore-endpoint: 'mysql://root:e2e@tcp(#{NETWORK_PREFIX}.100:3306)/k3s'" | ||
end | ||
|
||
elsif EXTERNAL_DB == "postgres" | ||
if role.include?("server") && role_num == 0 | ||
dockerInstall(vm) | ||
vm.provision "Start postgres", type: "shell", inline: "docker run -d -p 5432:5432 --name postgres -e POSTGRES_PASSWORD=e2e postgres:14-alpine" | ||
vm.provision "shell", inline: "echo \"Wait for postgres to startup\"; sleep 10" | ||
return "datastore-endpoint: 'postgres://postgres:e2e@#{NETWORK_PREFIX}.100:5432/k3s?sslmode=disable'" | ||
elsif role.include?("server") && role_num != 0 | ||
return "datastore-endpoint: 'postgres://postgres:e2e@#{NETWORK_PREFIX}.100:5432/k3s?sslmode=disable'" | ||
end | ||
|
||
elsif ( EXTERNAL_DB == "" || EXTERNAL_DB == "etcd" ) | ||
if role.include?("server") && role_num == 0 | ||
return "cluster-init: true" | ||
end | ||
elsif ( EXTERNAL_DB == "none" ) | ||
# Use internal sqlite, only valid for single node clusters | ||
else | ||
puts "Unknown EXTERNAL_DB: " + EXTERNAL_DB | ||
abort | ||
end | ||
return "" | ||
end | ||
|
||
Vagrant.configure("2") do |config| | ||
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload", "vagrant-scp"] | ||
# Default provider is libvirt, virtualbox is only provided as a backup | ||
config.vm.provider "libvirt" do |v| | ||
v.cpus = NODE_CPUS | ||
v.memory = NODE_MEMORY | ||
# We replicate the default prefix, but add a timestamp to enable parallel runs and cleanup of old VMs | ||
v.default_prefix = File.basename(Dir.getwd) + "_" + Time.now.to_i.to_s + "_" | ||
end | ||
config.vm.provider "virtualbox" do |v| | ||
v.cpus = NODE_CPUS | ||
v.memory = NODE_MEMORY | ||
end | ||
|
||
if NODE_ROLES.kind_of?(String) | ||
NODE_ROLES = NODE_ROLES.split(" ", -1) | ||
end | ||
if NODE_BOXES.kind_of?(String) | ||
NODE_BOXES = NODE_BOXES.split(" ", -1) | ||
end | ||
|
||
NODE_ROLES.each_with_index do |role, i| | ||
role_num = role.split("-", -1).pop.to_i | ||
config.vm.define role do |node| | ||
provision(node.vm, role, role_num, i) | ||
end | ||
end | ||
end |
Oops, something went wrong.