adds ability to have hosts with no floating ips on terraform/openstack (+8 squashed commits)

Squashed commits:
[f9355ea] Swap order in which we reload docker/socket
[2ca6819] Reload docker.socket after installing flannel on coreos

Workaround for #569
[9f976e5] Vagrantfile: setup proxy inside virtual machines

In corporate networks, it is good to pre-configure proxy variables.
[9d7142f] Vagrantfile: use Ubuntu 16.04 LTS

Use recent supported version of Ubuntu for local development setup
with Vagrant.
[50f77cc] Add CI test layouts

* Drop Wily from test matrix
* Replace the Wily cases dropped with extra cases to test separate
  roles deployment

Signed-off-by: Bogdan Dobrelya <bdobrelia@mirantis.com>
[03e162b] Update OWNERS
[c7b00ca] Use tar+register instead of copy/slurp for distributing tokens and certs

Related bug: https://github.com/ansible/ansible/issues/15405

Uses tar and register because synchronize module cannot sudo on the
remote side correctly and copy is too slow.

This patch dramatically cuts down the number of tasks to process
for cert synchronization.
[2778ac6] Add new var skip_dnsmasq_k8s

If skip_dnsmasq is set, it will still not set up dnsmasq
k8s pod. This enables independent setup of resolvconf section
before kubelet is up.
This commit is contained in:
Matthew Mosesohn 2016-10-26 17:56:15 +03:00 committed by Pablo Moreno
parent a3f892c76c
commit f106bf5bc4
16 changed files with 198 additions and 70 deletions

View file

@ -11,80 +11,92 @@ env:
CONTAINER_ENGINE=docker
PRIVATE_KEY=$GCE_PRIVATE_KEY
ANSIBLE_KEEP_REMOTE_FILES=1
CLUSTER_MODE=default
matrix:
# Debian Jessie
- >-
KUBE_NETWORK_PLUGIN=flannel
CLOUD_IMAGE=debian-8-kubespray
CLOUD_REGION=europe-west1-b
CLUSTER_MODE=default
- >-
KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=debian-8-kubespray
CLOUD_REGION=us-central1-c
CLUSTER_MODE=default
- >-
KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=debian-8-kubespray
CLOUD_REGION=us-east1-d
CLUSTER_MODE=default
# Centos 7
- >-
KUBE_NETWORK_PLUGIN=flannel
CLOUD_IMAGE=centos-7-sudo
CLOUD_REGION=asia-east1-c
CLUSTER_MODE=default
- >-
KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=centos-7-sudo
CLOUD_REGION=europe-west1-b
CLUSTER_MODE=default
- >-
KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=centos-7-sudo
CLOUD_REGION=us-central1-c
CLUSTER_MODE=default
# Redhat 7
- >-
KUBE_NETWORK_PLUGIN=flannel
CLOUD_IMAGE=rhel-7-sudo
CLOUD_REGION=us-east1-d
CLUSTER_MODE=default
- >-
KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=rhel-7-sudo
CLOUD_REGION=asia-east1-c
CLUSTER_MODE=default
- >-
KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=rhel-7-sudo
CLOUD_REGION=europe-west1-b
CLUSTER_MODE=default
# Ubuntu 16.04
- >-
KUBE_NETWORK_PLUGIN=flannel
CLOUD_IMAGE=ubuntu-1604-xenial
CLOUD_REGION=us-central1-c
CLUSTER_MODE=default
- >-
KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=ubuntu-1604-xenial
CLOUD_REGION=us-east1-d
CLUSTER_MODE=default
- >-
KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=ubuntu-1604-xenial
CLOUD_REGION=asia-east1-c
CLUSTER_MODE=default
# Ubuntu 15.10
# Extra cases for separated roles
- >-
KUBE_NETWORK_PLUGIN=flannel
CLOUD_IMAGE=ubuntu-1510-wily
CLOUD_IMAGE=rhel-7-sudo
CLOUD_REGION=europe-west1-b
CLUSTER_MODE=separate
- >-
KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=ubuntu-1510-wily
CLOUD_IMAGE=ubuntu-1604-xenial
CLOUD_REGION=us-central1-a
CLUSTER_MODE=separate
- >-
KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=ubuntu-1510-wily
CLOUD_IMAGE=debian-8-kubespray
CLOUD_REGION=us-east1-d
CLUSTER_MODE=separate
before_install:
@ -92,7 +104,8 @@ before_install:
- pip install --user boto -U
- pip install --user ansible
- pip install --user netaddr
- pip install --user apache-libcloud
# W/A https://github.com/ansible/ansible-modules-core/issues/5196#issuecomment-253766186
- pip install --user apache-libcloud==0.20.1
cache:
- directories:
@ -114,7 +127,8 @@ before_script:
script:
- >
$HOME/.local/bin/ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts -c local $LOG_LEVEL
$HOME/.local/bin/ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local $LOG_LEVEL
-e mode=${CLUSTER_MODE}
-e test_id=${TEST_ID}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e gce_project_id=${GCE_PROJECT_ID}

3
OWNERS
View file

@ -4,3 +4,6 @@
owners:
- Smana
- ant31
- bogdando
- mattymo
- rsmitty

15
Vagrantfile vendored
View file

@ -16,7 +16,7 @@ $vm_cpus = 1
$shared_folders = {}
$forwarded_ports = {}
$subnet = "172.17.8"
$box = "bento/ubuntu-14.04"
$box = "bento/ubuntu-16.04"
host_vars = {}
@ -38,6 +38,13 @@ if ! File.exist?(File.join(File.dirname($inventory), "hosts"))
end
end
if Vagrant.has_plugin?("vagrant-proxyconf")
$no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
(1..$num_instances).each do |i|
$no_proxy += ",#{$subnet}.#{i+100}"
end
end
Vagrant.configure("2") do |config|
# always use Vagrants insecure key
config.ssh.insert_key = false
@ -52,6 +59,12 @@ Vagrant.configure("2") do |config|
config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config|
config.vm.hostname = vm_name
if Vagrant.has_plugin?("vagrant-proxyconf")
config.proxy.http = ENV['HTTP_PROXY'] || ENV['http_proxy'] || ""
config.proxy.https = ENV['HTTPS_PROXY'] || ENV['https_proxy'] || ""
config.proxy.no_proxy = $no_proxy
end
if $expose_docker_tcp
config.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true
end

View file

@ -5,14 +5,13 @@ Openstack.
## Status
This will install a Kubernetes cluster on an Openstack Cloud. It is tested on a
OpenStack Cloud provided by [BlueBox](https://www.blueboxcloud.com/) and
should work on most modern installs of OpenStack that support the basic
This will install a Kubernetes cluster on an Openstack Cloud. It has been tested on a
OpenStack Cloud provided by [BlueBox](https://www.blueboxcloud.com/) and on OpenStack at [EMBL-EBI's](http://www.ebi.ac.uk/) [EMBASSY Cloud](http://www.embassycloud.org/). This should work on most modern installs of OpenStack that support the basic
services.
There are some assumptions made to try and ensure it will work on your openstack cluster.
* floating-ips are used for access
* floating-ips are used for access, but you can have masters and nodes that don't use floating-ips if needed. You need currently at least 1 floating ip, which we would suggest is used on a master.
* you already have a suitable OS image in glance
* you already have both an internal network and a floating-ip pool created
* you have security-groups enabled
@ -24,16 +23,14 @@ There are some assumptions made to try and ensure it will work on your openstack
## Terraform
Terraform will be used to provision all of the OpenStack resources required to
run Docker Swarm. It is also used to deploy and provision the software
Terraform will be used to provision all of the OpenStack resources. It is also used to deploy and provision the software
requirements.
### Prep
#### OpenStack
Ensure your OpenStack credentials are loaded in environment variables. This is
how I do it:
Ensure your OpenStack credentials are loaded in environment variables. This can be done by downloading a credentials .rc file from your OpenStack dashboard and sourcing it:
```
$ source ~/.stackrc
@ -46,7 +43,7 @@ differences between OpenStack installs the Terraform does not attempt to create
these for you.
By default Terraform will expect that your networks are called `internal` and
`external`. You can change this by altering the Terraform variables `network_name` and `floatingip_pool`.
`external`. You can change this by altering the Terraform variables `network_name` and `floatingip_pool`. This can be done on a new variables file or through environment variables.
A full list of variables you can change can be found at [variables.tf](variables.tf).
@ -76,8 +73,21 @@ $ echo Setting up Terraform creds && \
export TF_VAR_auth_url=${OS_AUTH_URL}
```
If you want to provision master or node VMs that don't use floating ips, write on a `my-terraform-vars.tfvars` file, for example:
```
number_of_k8s_masters = "1"
number_of_k8s_masters_no_floating_ip = "2"
number_of_k8s_nodes_no_floating_ip = "1"
number_of_k8s_nodes = "0"
```
This will provision one VM as master using a floating ip, two additional masters using no floating ips (these will only have private ips inside your tenancy) and one VM as node, again without a floating ip.
# Provision a Kubernetes Cluster on OpenStack
If not using a tfvars file for your setup, then execute:
```
terraform apply -state=contrib/terraform/openstack/terraform.tfstate contrib/terraform/openstack
openstack_compute_secgroup_v2.k8s_master: Creating...
@ -96,6 +106,13 @@ use the `terraform show` command.
State path: contrib/terraform/openstack/terraform.tfstate
```
Alternatively, if you wrote your terraform variables on a file `my-terraform-vars.tfvars`, your command would look like:
```
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-terraform-vars.tfvars contrib/terraform/openstack
```
if you choose to add masters or nodes without floating ips (only internal ips on your OpenStack tenancy), this script will create as well a file `contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for ansible to be able to access your machines tunneling through the first floating ip used. If you want to manually handling the ssh tunneling to these machines, please delete or move that file. If you want to use this, just leave it there, as ansible will pick it up automatically.
Make sure you can connect to the hosts:
```
@ -114,6 +131,8 @@ example-k8s-master-1 | SUCCESS => {
}
```
if you are deploying a system that needs bootstrapping, like CoreOS, these might have a state `FAILED` due to CoreOS not having python. As long as the state is not `UNREACHABLE`, this is fine.
if it fails try to connect manually via SSH ... it could be somthing as simple as a stale host key.
Deploy kubernetes:

View file

@ -0,0 +1 @@
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@BASTION_ADDRESS"'

View file

@ -1,9 +1,14 @@
# Valid bootstrap options (required): xenial, coreos, none
bootstrap_os: "none"
# Directory where the binaries will be installed
bin_dir: /usr/local/bin
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
local_release_dir: "/tmp/releases"
# Random shifts for retrying failed ops like pushing/downloading
retry_stagger: 5
# Uncomment this line for CoreOS only.
# Directory where python binary is installed
@ -28,6 +33,8 @@ kube_users:
# Kubernetes cluster name, also will be used as DNS domain
cluster_name: cluster.local
# Subdomains of DNS domain to be resolved via /etc/resolv.conf
ndots: 5
# For some environments, each node has a pubilcally accessible
# address and an address it should bind services to. These are
@ -51,6 +58,16 @@ cluster_name: cluster.local
# but don't know about that address themselves.
# access_ip: 1.1.1.1
# Etcd access modes:
# Enable multiaccess to configure clients to access all of the etcd members directly
# as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
# This may be the case if clients support and loadbalance multiple etcd servers natively.
etcd_multiaccess: false
# Assume there are no internal loadbalancers for apiservers exist and listen on
# kube_apiserver_port (default 443)
loadbalancer_apiserver_localhost: true
# Choose network plugin (calico, weave or flannel)
kube_network_plugin: flannel
@ -89,10 +106,12 @@ kube_apiserver_insecure_port: 8080 # (http)
# You still must manually configure all your containers to use this DNS server,
# Kubernetes won't do this for you (yet).
# Do not install additional dnsmasq
skip_dnsmasq: false
# Upstream dns servers used by dnsmasq
upstream_dns_servers:
- 8.8.8.8
- 8.8.4.4
#upstream_dns_servers:
# - 8.8.8.8
# - 8.8.4.4
#
# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
dns_setup: true
@ -109,21 +128,6 @@ dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address')
# like you would do when using nova-client before starting the playbook.
# cloud_provider:
# For multi masters architecture:
# kube-proxy doesn't support multiple apiservers for the time being so you'll need to configure your own loadbalancer
# This domain name will be inserted into the /etc/hosts file of all servers
# configuration example with haproxy :
# listen kubernetes-apiserver-https
# bind 10.99.0.21:8383
# option ssl-hello-chk
# mode tcp
# timeout client 3h
# timeout server 3h
# server master1 10.99.0.26:443
# server master2 10.99.0.27:443
# balance roundrobin
# apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
## Set these proxy values in order to update docker daemon to use proxies
# http_proxy: ""
# https_proxy: ""
@ -134,3 +138,7 @@ dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address')
## An obvious use case is allowing insecure-registry access
## to self hosted registries like so:
docker_options: "--insecure-registry={{ kube_service_addresses }}"
# default packages to install within the cluster
kpm_packages: []
# - name: kube-system/grafana

View file

@ -70,6 +70,28 @@ resource "openstack_compute_instance_v2" "k8s_master" {
ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster"
}
}
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
count = "${var.number_of_k8s_masters_no_floating_ip}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_master}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
network {
name = "${var.network_name}"
}
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
"${openstack_compute_secgroup_v2.k8s.name}" ]
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster"
}
provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/k8s-cluster.yml"
}
}
resource "openstack_compute_instance_v2" "k8s_node" {
@ -89,6 +111,28 @@ resource "openstack_compute_instance_v2" "k8s_node" {
}
}
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
count = "${var.number_of_k8s_nodes_no_floating_ip}"
image_name = "${var.image}"
flavor_id = "${var.flavor_k8s_node}"
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
network {
name = "${var.network_name}"
}
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
metadata = {
ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-node,k8s-cluster"
}
provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/k8s-cluster.yml"
}
}
#output "msg" {
# value = "Your hosts are ready to go!\nYour ssh hosts are: ${join(", ", openstack_networking_floatingip_v2.k8s_master.*.address )}"
#}

View file

@ -6,10 +6,18 @@ variable "number_of_k8s_masters" {
default = 2
}
variable "number_of_k8s_masters_no_floating_ip" {
default = 2
}
variable "number_of_k8s_nodes" {
default = 1
}
variable "number_of_k8s_nodes_no_floating_ip" {
default = 1
}
variable "public_key_path" {
description = "The path of the ssh pub key"
default = "~/.ssh/id_rsa.pub"

View file

@ -16,4 +16,10 @@ dnsmasq_version: 2.72
# Images
dnsmasq_image_repo: "andyshinn/dnsmasq"
dnsmasq_image_tag: "{{ dnsmasq_version }}"
dnsmasq_image_tag: "{{ dnsmasq_version }}"
# Skip dnsmasq setup
skip_dnsmasq: false
# Skip setting up dnsmasq daemonset
skip_dnsmasq_k8s: "{{ skip_dnsmasq }}"

View file

@ -1,5 +1,5 @@
---
- include: dnsmasq.yml
when: "{{ not skip_dnsmasq|bool }}"
when: "{{ not skip_dnsmasq_k8s|bool }}"
- include: resolvconf.yml

View file

@ -3,6 +3,7 @@
command: /bin/true
notify:
- Docker | reload systemd
- Docker | reload docker.socket
- Docker | reload docker
- Docker | pause while Docker restarts
- Docker | wait for docker
@ -16,6 +17,12 @@
name: docker
state: restarted
- name: Docker | reload docker.socket
service:
name: docker.socket
state: restarted
when: ansible_os_family == 'CoreOS'
- name: Docker | pause while Docker restarts
pause: seconds=10 prompt="Waiting for docker restart"

View file

@ -27,31 +27,30 @@
master_certs: ['ca-key.pem', 'admin.pem', 'admin-key.pem', 'apiserver-key.pem', 'apiserver.pem']
node_certs: ['ca.pem', 'node.pem', 'node-key.pem']
- name: Gen_certs | Get the certs from first master
slurp:
src: "{{ kube_cert_dir }}/{{ item }}"
- name: Gen_certs | Gather master certs
shell: "tar cfz - -C {{ kube_cert_dir }} {{ master_certs|join(' ') }} {{ node_certs|join(' ') }} | base64 --wrap=0"
register: master_cert_data
delegate_to: "{{groups['kube-master'][0]}}"
register: slurp_certs
with_items: '{{ master_certs + node_certs }}'
when: sync_certs|default(false)
run_once: true
notify: set secret_changed
when: sync_certs|default(false)
- name: Gen_certs | Gather node certs
shell: "tar cfz - -C {{ kube_cert_dir }} {{ node_certs|join(' ') }} | base64 --wrap=0"
register: node_cert_data
delegate_to: "{{groups['kube-master'][0]}}"
run_once: true
when: sync_certs|default(false)
- name: Gen_certs | Copy certs on masters
copy:
content: "{{ item.content|b64decode }}"
dest: "{{ item.source }}"
with_items: '{{slurp_certs.results}}'
shell: "echo '{{master_cert_data.stdout|quote}}' | base64 -d | tar xz -C {{ kube_cert_dir }}"
changed_when: false
when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and
inventory_hostname != groups['kube-master'][0]
- name: Gen_certs | Copy certs on nodes
copy:
content: "{{ item.content|b64decode }}"
dest: "{{ item.source }}"
with_items: '{{slurp_certs.results}}'
when: item.item in node_certs and
inventory_hostname in groups['kube-node'] and sync_certs|default(false) and
shell: "echo '{{node_cert_data.stdout|quote}}' | base64 -d | tar xz -C {{ kube_cert_dir }}"
changed_when: false
when: inventory_hostname in groups['kube-node'] and sync_certs|default(false) and
inventory_hostname != groups['kube-master'][0]
- name: Gen_certs | check certificate permissions

View file

@ -43,20 +43,15 @@
delegate_to: "{{groups['kube-master'][0]}}"
when: sync_tokens|default(false)
- name: Gen_tokens | Get the tokens from first master
slurp:
src: "{{ item }}"
register: slurp_tokens
with_items: '{{tokens_list.stdout_lines}}'
run_once: true
- name: Gen_tokens | Gather tokens
shell: "tar cfz - {{ tokens_list.stdout_lines | join(' ') }} | base64 --wrap=0"
register: tokens_data
delegate_to: "{{groups['kube-master'][0]}}"
run_once: true
when: sync_tokens|default(false)
notify: set secret_changed
- name: Gen_tokens | Copy tokens on masters
copy:
content: "{{ item.content|b64decode }}"
dest: "{{ item.source }}"
with_items: '{{slurp_tokens.results}}'
shell: "echo '{{ tokens_data.stdout|quote }}' | base64 -d | tar xz -C /"
changed_when: false
when: inventory_hostname in groups['kube-master'] and sync_tokens|default(false) and
inventory_hostname != groups['kube-master'][0]

View file

@ -1,6 +1,6 @@
---
- hosts: localhost
sudo: False
become: false
gather_facts: no
vars:
cloud_machine_type: g1-small

View file

@ -1,6 +1,6 @@
---
- hosts: localhost
sudo: False
become: false
gather_facts: no
vars:
cloud_machine_type: f1-micro

View file

@ -2,6 +2,16 @@ node1 ansible_ssh_host={{gce.instance_data[0].public_ip}}
node2 ansible_ssh_host={{gce.instance_data[1].public_ip}}
node3 ansible_ssh_host={{gce.instance_data[2].public_ip}}
{% if mode is defined and mode == "separate" %}
[kube-master]
node1
[kube-node]
node2
[etcd]
node3
{% else %}
[kube-master]
node1
node2
@ -14,6 +24,7 @@ node3
[etcd]
node1
node2
{% endif %}
[k8s-cluster:children]
kube-node