Skip to content

Commit

Permalink
Update k8s installer for refactored contiv components (#317)
Browse files Browse the repository at this point in the history
* Update k8s installer for refactored contiv components

Dropped codes for k8s1.4, updated scripts and yaml files for
contiv services. Made a branch based install testing for k8s,
and changed k8s gating process.
Also update etcd to v3.2.4, it still behaves as etcd2, because contiv
connects it with v2 api, just need to use newer container to make it
having sh in it.
Also shfmt all bash scripts and bump contiv version to 1.2.0

Signed-off-by: Wei Tie <wtie@cisco.com>
  • Loading branch information
tiewei authored Dec 14, 2017
1 parent 6c8a0d9 commit 4aae3dc
Show file tree
Hide file tree
Showing 33 changed files with 628 additions and 838 deletions.
11 changes: 8 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ export CONTIV_INSTALLER_VERSION ?= $(BUILD_VERSION)
# downloaded and built assets intended to go in installer by build.sh
export CONTIV_ARTIFACT_STAGING := $(PWD)/artifact_staging
# some assets are retrieved from GitHub, this is the default version to fetch
export DEFAULT_DOWNLOAD_CONTIV_VERSION := 1.1.7
export DEFAULT_DOWNLOAD_CONTIV_VERSION := 1.2.0
export CONTIV_ACI_GW_VERSION ?= latest
export NETPLUGIN_OWNER ?= contiv
# setting NETPLUGIN_BRANCH compiles that commit on demand,
Expand Down Expand Up @@ -107,10 +107,15 @@ release-test-swarm-mode: build
make cluster-swarm-mode
make install-test-swarm-mode

# create k8s release testing image (do not contains ansible)
k8s-build: prepare-netplugin-images assemble-build

prepare-netplugin-images:
@bash ./scripts/prepare_netplugin_images.sh
# Create a build and test the release installation on a vagrant cluster
# TODO: The vagrant part of this can be optimized by taking snapshots instead
# of creating a new set of VMs for each case
release-test-kubeadm: build
release-test-kubeadm: k8s-build
# Test kubeadm (centos by default)
make cluster-kubeadm
make install-test-kubeadm
Expand Down Expand Up @@ -152,4 +157,4 @@ install-test-legacy-swarm:
ci: release-test-kubeadm
ci-old: release-test-swarm-mode release-test-kubeadm release-test-legacy-swarm

.PHONY: all build cluster cluster-destroy release-test-legacy-swarm release-test-swarm-mode release-test-kubeadm release-test-kubelegacy install-test-legacy-swarm install-test-swarm-mode install-test-kubeadm install-test-kube-legacy
.PHONY: all build cluster cluster-destroy release-test-legacy-swarm release-test-swarm-mode release-test-kubeadm release-test-kubelegacy install-test-legacy-swarm install-test-swarm-mode install-test-kubeadm install-test-kube-legacy k8s-build prepare-netplugin-images
4 changes: 2 additions & 2 deletions cluster/docker17/bootstrap_centos.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@ fi

yum install -y yum-utils
yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo

yum makecache fast
yum -y install docker-ce
Expand Down
11 changes: 5 additions & 6 deletions cluster/docker17/centos_docker_install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@
set -euo pipefail

if [ $EUID -ne 0 ]; then
echo "Please run this script as root user"
exit 1
echo "Please run this script as root user"
exit 1
fi

# Install pre-reqs
Expand All @@ -16,22 +16,21 @@ yum install -y yum-utils device-mapper-persistent-data lvm2

yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo


# Install Docker
# If you require a specific version, comment out the first line and uncomment
# If you require a specific version, comment out the first line and uncomment
# the other one. Fill in the version you want.
yum -y install docker-ce
#sudo yum install docker-ce-<VERSION>

# Post-install steps
# add admin user to docker group
# add admin user to docker group
usermod -aG docker $SUDO_USER

# add /etc/docker/ if it doesn't exist
mkdir -p /etc/docker

# add (and create) daemon.json with entry for storage-device
cat <<EOT >> /etc/docker/daemon.json
cat <<EOT >>/etc/docker/daemon.json
{
"storage-driver": "devicemapper"
}
Expand Down
8 changes: 4 additions & 4 deletions cluster/docker17/master.sh
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
docker swarm init --advertise-addr $1
docker swarm join-token manager | \
grep -A 20 "docker swarm join" > $2/manager.sh
docker swarm join-token worker | \
grep -A 20 "docker swarm join" > $2/worker.sh
docker swarm join-token manager |
grep -A 20 "docker swarm join" >$2/manager.sh
docker swarm join-token worker |
grep -A 20 "docker swarm join" >$2/worker.sh
6 changes: 3 additions & 3 deletions cluster/k8s1.6/k8smaster.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,6 @@ kubeadm init --token=$1 --apiserver-advertise-address=$2 --skip-preflight-checks
if [ "$#" -eq 4 ]; then
cp /etc/kubernetes/admin.conf /home/$4
chown $(id -u $4):$(id -g $4) /home/$4/admin.conf
echo "export KUBECONFIG=/home/$4/admin.conf" >> /home/$4/.$(basename $SHELL)rc
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.$(basename $SHELL)rc
fi
echo "export KUBECONFIG=/home/$4/admin.conf" >>/home/$4/.$(basename $SHELL)rc
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >>~/.$(basename $SHELL)rc
fi
6 changes: 3 additions & 3 deletions cluster/k8s1.8/k8smaster.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,6 @@ kubeadm init --token=$1 --apiserver-advertise-address=$2 --skip-preflight-checks
if [ "$#" -eq 4 ]; then
cp /etc/kubernetes/admin.conf /home/$4
chown $(id -u $4):$(id -g $4) /home/$4/admin.conf
echo "export KUBECONFIG=/home/$4/admin.conf" >> /home/$4/.$(basename $SHELL)rc
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.$(basename $SHELL)rc
fi
echo "export KUBECONFIG=/home/$4/admin.conf" >>/home/$4/.$(basename $SHELL)rc
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >>~/.$(basename $SHELL)rc
fi
3 changes: 2 additions & 1 deletion install/ansible/env.json
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
"etcd_peers_group": "netplugin-master",
"service_vip": "__NETMASTER_IP__",
"validate_certs": false,
"cluster_store": "__CLUSTER_STORE__",
"cluster_store_driver": "__CLUSTER_STORE_TYPE__",
"cluster_store_url": "__CLUSTER_STORE_URLS__",
"auth_proxy_image": "contiv/auth_proxy:__API_PROXY_VERSION__",
"docker_reset_container_state": __DOCKER_RESET_CONTAINER_STATE__,
"docker_reset_image_state": __DOCKER_RESET_IMAGE_STATE__,
Expand Down
44 changes: 37 additions & 7 deletions install/ansible/install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ error_ret() {
exit 1
}

while getopts ":n:a:im:d:v:ps:" opt; do
while getopts ":n:a:im:d:v:pe:c:s:" opt; do
case $opt in
n)
netmaster=$OPTARG
Expand Down Expand Up @@ -67,8 +67,29 @@ while getopts ":n:a:im:d:v:ps:" opt; do
p)
contiv_v2plugin_install=true
;;
e)
# etcd endpoint option
cluster_store_type=etcd
cluster_store_urls=$OPTARG
install_etcd=false
;;
c)
# consul endpoint option
cluster_store_type=consul
cluster_store_urls=$OPTARG
install_etcd=false
;;
s)
cluster_store=$OPTARG
# backward compatibility
echo "-s option has been deprecated, use -e or -c instead"
local cluster_store=$OPTARG
if [[ "$cluster_store" =~ ^etcd://.+ ]]; then
cluster_store_type=etcd
cluster_store_urls=$(echo $cluster_store | sed s/etcd/http/)
elif [[ "$cluster_store" =~ ^consul://.+ ]]; then
cluster_store_type=consul
cluster_store_urls=$(echo $cluster_store | sed s/consul/http/)
fi
install_etcd=false
;;
:)
Expand All @@ -88,6 +109,15 @@ mkdir -p "$inventory"
host_inventory="$inventory/contiv_hosts"
node_info="$inventory/contiv_nodes"

# TODO: use python to generate the inventory
# This python generated inventory contains
# 1. groups and host
# 2. ssh info for each host
# 3. control interface for each host
# 4. data interface for each host
# 5. aci info
# 6. fwd_mode(bridge/routing), net_mode(vlan/vxlan), contiv_network_mode(standalone/aci)
# then below sed against env_file set rest of them, they should be combined as one
./install/genInventoryFile.py "$contiv_config" "$host_inventory" "$node_info" $contiv_network_mode $fwd_mode

if [ "$netmaster" = "" ]; then
Expand Down Expand Up @@ -131,13 +161,15 @@ if [ "$service_vip" == "" ]; then
service_vip=$netmaster
fi

if [ "$cluster_store" == "" ]; then
cluster_store="etcd://localhost:2379"
if [ "$cluster_store" = "" ]; then
cluster_store_type="etcd"
cluster_store_urls="http://localhost:2379"
fi

# variables already replaced by build.sh will not pattern match
sed -i.bak 's#__NETMASTER_IP__#'"$service_vip"'#g' "$env_file"
sed -i.bak 's#__CLUSTER_STORE__#'"$cluster_store"'#g' "$env_file"
sed -i.bak 's#__CLUSTER_STORE_TYPE__#'"$cluster_store_type"'#g' "$env_file"
sed -i.bak 's#__CLUSTER_STORE_URLS__#'"$cluster_store_urls"'#g' "$env_file"
sed -i.bak 's#__DOCKER_RESET_CONTAINER_STATE__#false#g' "$env_file"
sed -i.bak 's#__DOCKER_RESET_IMAGE_STATE__#false#g' "$env_file"
sed -i.bak 's#__ETCD_CLEANUP_STATE__#false#g' "$env_file"
Expand Down Expand Up @@ -205,8 +237,6 @@ if [ "$unreachable" = "" ] && [ "$failed" = "" ]; then
echo "Please export DOCKER_HOST=tcp://$netmaster:2375 in your shell before proceeding"
echo "Contiv UI is available at https://$netmaster:10000"
echo "Please use the first run wizard or configure the setup as follows:"
echo " Configure forwarding mode (optional, default is bridge)."
echo " netctl global set --fwd-mode routing"
echo " Configure ACI mode (optional)"
echo " netctl global set --fabric-mode aci --vlan-range <start>-<end>"
echo " Create a default network"
Expand Down
100 changes: 51 additions & 49 deletions install/ansible/install_swarm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -73,53 +73,53 @@ mkdir -p "$src_conf_path"
cluster_param=""
while getopts ":f:n:a:e:ipm:d:v:u:c:k:s:" opt; do
case $opt in
f)
cp "$OPTARG" "$host_contiv_config"
;;
n)
netmaster=$OPTARG
;;
a)
ans_opts="$OPTARG"
;;
e)
ans_key=$OPTARG
;;
u)
ans_user=$OPTARG
;;
m)
contiv_network_mode=$OPTARG
;;
d)
fwd_mode=$OPTARG
;;
v)
aci_image=$OPTARG
;;
s)
cluster_param="-s $OPTARG"
;;

i)
install_scheduler="-i"
;;
p)
v2plugin_param="-p"
;;
c)
cp "$OPTARG" "$host_tls_cert"
;;
k)
cp "$OPTARG" "$host_tls_key"
;;
:)
echo "An argument required for $OPTARG was not passed"
usage
;;
?)
usage
;;
f)
cp "$OPTARG" "$host_contiv_config"
;;
n)
netmaster=$OPTARG
;;
a)
ans_opts="$OPTARG"
;;
e)
ans_key=$OPTARG
;;
u)
ans_user=$OPTARG
;;
m)
contiv_network_mode=$OPTARG
;;
d)
fwd_mode=$OPTARG
;;
v)
aci_image=$OPTARG
;;
s)
cluster_param="-s $OPTARG"
;;

i)
install_scheduler="-i"
;;
p)
v2plugin_param="-p"
;;
c)
cp "$OPTARG" "$host_tls_cert"
;;
k)
cp "$OPTARG" "$host_tls_key"
;;
:)
echo "An argument required for $OPTARG was not passed"
usage
;;
?)
usage
;;
esac
done

Expand Down Expand Up @@ -148,7 +148,7 @@ fi
if [ "$ans_opts" == "" ]; then
ans_opts="--private-key $def_ans_key -u $ans_user"
else
ans_opts+=" --private-key $def_ans_key -u $ans_user"
ans_opts+=" --private-key $def_ans_key -u $ans_user"
fi

# Generate SSL certs for auth proxy
Expand All @@ -172,4 +172,6 @@ mounts[5]="$src_conf_path:$container_conf_path:Z"
mounts[6]="-v"
mounts[7]="$(pwd)/contiv_cache:/var/contiv_cache:Z"
set -x
docker run --rm --net=host "${mounts[@]}" $image_name ./install/ansible/install.sh $netmaster_param -a "$ans_opts" $install_scheduler -m $contiv_network_mode -d $fwd_mode $aci_param $cluster_param $v2plugin_param
docker run --rm --net=host "${mounts[@]}" $image_name ./install/ansible/install.sh \
$netmaster_param -a "$ans_opts" $install_scheduler -m $contiv_network_mode \
-d $fwd_mode $aci_param $cluster_param $v2plugin_param
Loading

0 comments on commit 4aae3dc

Please sign in to comment.