diff --git a/bootstrap-master.sh b/bootstrap-master.sh index 6da049a38..ade8ea727 100644 --- a/bootstrap-master.sh +++ b/bootstrap-master.sh @@ -6,11 +6,6 @@ sudo apt-get --yes update sudo apt-get --yes upgrade sudo apt-get --yes install ansible git screen vim telnet tcpdump python-setuptools gcc python-dev python-pip libssl-dev libffi-dev software-properties-common -# Kargo and custom inventory -sudo git clone https://github.com/kubespray/kargo /root/kargo -sudo git clone https://github.com/adidenko/vagrant-k8s /root/vagrant-k8s -sudo cp -a /root/vagrant-k8s/kargo/inv /root/kargo/inv - # Kargo-cli sudo git clone https://github.com/kubespray/kargo-cli.git /root/kargo-cli sudo sh -c 'cd /root/kargo-cli && python setup.py install' diff --git a/deploy-k8s.kargo.sh b/deploy-k8s.kargo.sh index e5a845dfd..59790251f 100644 --- a/deploy-k8s.kargo.sh +++ b/deploy-k8s.kargo.sh @@ -1,6 +1,6 @@ #!/bin/bash -custom_opts='--ansible-opts="-e @kargo/custom.yaml"' +custom_opts='--ansible-opts=\"-e kargo/custom.yaml\"' nodes="" i=0 @@ -9,8 +9,14 @@ for nodeip in `cat /root/nodes` ; do nodes+=" node${i}[ansible_ssh_host=${nodeip},ip=${nodeip}]" done -kargo prepare -y --nodes $nodes -kargo deploy -y $custom_opts +if [ -f kargo/inventory/inventory.cfg ] ; then + echo "kargo/inventory/inventory.cfg already exists, if you want to recreate, pls remove it and re-run this script" +else + echo "Preparing inventory..." + kargo prepare -y --nodes $nodes +fi + +echo "Running deployment..." deploy_res=$? if [ "$deploy_res" -eq "0" ]; then diff --git a/kargo/inv/group_vars/all.yml b/kargo/inv/group_vars/all.yml deleted file mode 100644 index cb0da8a1c..000000000 --- a/kargo/inv/group_vars/all.yml +++ /dev/null @@ -1,144 +0,0 @@ -# Directory where the binaries will be installed -bin_dir: /usr/local/bin - -# Where the binaries will be downloaded. -# Note: ensure that you've enough disk space (about 1G) -local_release_dir: "/var/tmp/releases" - -# Uncomment this line for CoreOS only. -# Directory where python binary is installed -# ansible_python_interpreter: "/opt/bin/python" - -# This is the group that the cert creation scripts chgrp the -# cert files to. Not really changable... -kube_cert_group: kube-cert - -# Cluster Loglevel configuration -kube_log_level: 2 - -# Users to create for basic auth in Kubernetes API via HTTP -kube_api_pwd: "changeme" -kube_users: - kube: - pass: "{{kube_api_pwd}}" - role: admin - root: - pass: "changeme" - role: admin - -# Kubernetes cluster name, also will be used as DNS domain -cluster_name: cluster.local - -# For some environments, each node has a pubilcally accessible -# address and an address it should bind services to. These are -# really inventory level variables, but described here for consistency. -# -# When advertising access, the access_ip will be used, but will defer to -# ip and then the default ansible ip when unspecified. -# -# When binding to restrict access, the ip variable will be used, but will -# defer to the default ansible ip when unspecified. -# -# The ip variable is used for specific address binding, e.g. listen address -# for etcd. This is use to help with environments like Vagrant or multi-nic -# systems where one address should be preferred over another. -# ip: 10.2.2.2 -# -# The access_ip variable is used to define how other nodes should access -# the node. This is used in flannel to allow other flannel nodes to see -# this node for example. The access_ip is really useful AWS and Google -# environments where the nodes are accessed remotely by the "public" ip, -# but don't know about that address themselves. -# access_ip: 1.1.1.1 - -# Choose network plugin (calico, weave or flannel) -kube_network_plugin: calico - -# Kubernetes internal network for services, unused block of space. -kube_service_addresses: 10.233.0.0/18 - -# internal network. When used, it will assign IP -# addresses from this range to individual pods. -# This network must be unused in your network infrastructure! -kube_pods_subnet: 10.233.64.0/18 - -# internal network total size (optional). This is the prefix of the -# entire network. Must be unused in your environment. -# kube_network_prefix: 18 - -# internal network node size allocation (optional). This is the size allocated -# to each node on your network. With these defaults you should have -# room for 4096 nodes with 254 pods per node. -kube_network_node_prefix: 24 - -# With calico it is possible to distributed routes with border routers of the datacenter. -peer_with_router: false -# Warning : enabling router peering will disable calico's default behavior ('node mesh'). -# The subnets of each nodes will be distributed by the datacenter router - -# The port the API Server will be listening on. -kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" -kube_apiserver_port: 443 # (https) -kube_apiserver_insecure_port: 8080 # (http) - -# Internal DNS configuration. -# Kubernetes can create and mainatain its own DNS server to resolve service names -# into appropriate IP addresses. It's highly advisable to run such DNS server, -# as it greatly simplifies configuration of your applications - you can use -# service names instead of magic environment variables. -# You still must manually configure all your containers to use this DNS server, -# Kubernetes won't do this for you (yet). - -# Upstream dns servers used by dnsmasq -upstream_dns_servers: - - 8.8.8.8 - - 8.8.4.4 -# -# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md -dns_setup: true -dns_domain: "{{ cluster_name }}" -# -# # Ip address of the kubernetes skydns service -skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" -dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}" - -# There are some changes specific to the cloud providers -# for instance we need to encapsulate packets with some network plugins -# If set the possible values are either 'gce', 'aws' or 'openstack' -# When openstack is used make sure to source in the openstack credentials -# like you would do when using nova-client before starting the playbook. -# cloud_provider: - -# For multi masters architecture: -# kube-proxy doesn't support multiple apiservers for the time being so you'll need to configure your own loadbalancer -# This domain name will be inserted into the /etc/hosts file of all servers -# configuration example with haproxy : -# listen kubernetes-apiserver-https -# bind 10.99.0.21:8383 -# option ssl-hello-chk -# mode tcp -# timeout client 3h -# timeout server 3h -# server master1 10.99.0.26:443 -# server master2 10.99.0.27:443 -# balance roundrobin -# apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local" - -## Set these proxy values in order to update docker daemon to use proxies -# http_proxy: "" -# https_proxy: "" -# no_proxy: "" - -## A string of extra options to pass to the docker daemon. -## This string should be exactly as you wish it to appear. -## An obvious use case is allowing insecure-registry access -## to self hosted registries like so: -docker_options: "--insecure-registry={{ kube_service_addresses }}" - -# default packages to install within the cluster -kpm_packages: - - name: kube-system/kubedns - namespace: kube-system - variables: - cluster_ip: "{{skydns_server}}" -# - name: kube-system/grafana diff --git a/kargo/inv/inventory.cfg b/kargo/inv/inventory.cfg deleted file mode 100644 index 29e6c32e0..000000000 --- a/kargo/inv/inventory.cfg +++ /dev/null @@ -1,28 +0,0 @@ -node2 ansible_ssh_host=10.210.1.12 -node3 ansible_ssh_host=10.210.1.13 -node4 ansible_ssh_host=10.210.1.14 -node5 ansible_ssh_host=10.210.1.15 -node6 ansible_ssh_host=10.210.1.16 -node7 ansible_ssh_host=10.210.1.17 - -[kube-master] -node2 -node3 -node4 - -[etcd] -node2 -node3 -node4 - -[kube-node] -node2 -node3 -node4 -node5 -node6 -node7 - -[k8s-cluster:children] -kube-node -kube-master