Merge branch 'master' of github.com:kubespray/kargo

This commit is contained in:
ant31 2016-11-07 12:05:36 +01:00
commit 31f9ef82e7
61 changed files with 508 additions and 226 deletions

View file

@ -10,81 +10,95 @@ env:
TEST_ID=$TRAVIS_JOB_NUMBER
CONTAINER_ENGINE=docker
PRIVATE_KEY=$GCE_PRIVATE_KEY
GS_ACCESS_KEY_ID=$GS_KEY
GS_SECRET_ACCESS_KEY=$GS_SECRET
ANSIBLE_KEEP_REMOTE_FILES=1
CLUSTER_MODE=default
matrix:
# Debian Jessie
- >-
KUBE_NETWORK_PLUGIN=flannel
CLOUD_IMAGE=debian-8-kubespray
CLOUD_REGION=europe-west1-b
CLUSTER_MODE=default
- >-
KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=debian-8-kubespray
CLOUD_REGION=us-central1-c
CLUSTER_MODE=default
- >-
KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=debian-8-kubespray
CLOUD_REGION=us-east1-d
CLUSTER_MODE=default
# Centos 7
- >-
KUBE_NETWORK_PLUGIN=flannel
CLOUD_IMAGE=centos-7-sudo
CLOUD_REGION=asia-east1-c
CLUSTER_MODE=default
- >-
KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=centos-7-sudo
CLOUD_REGION=europe-west1-b
CLUSTER_MODE=default
- >-
KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=centos-7-sudo
CLOUD_REGION=us-central1-c
CLUSTER_MODE=default
# Redhat 7
- >-
KUBE_NETWORK_PLUGIN=flannel
CLOUD_IMAGE=rhel-7-sudo
CLOUD_REGION=us-east1-d
CLUSTER_MODE=default
- >-
KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=rhel-7-sudo
CLOUD_REGION=asia-east1-c
CLUSTER_MODE=default
- >-
KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=rhel-7-sudo
CLOUD_REGION=europe-west1-b
CLUSTER_MODE=default
# Ubuntu 16.04
- >-
KUBE_NETWORK_PLUGIN=flannel
CLOUD_IMAGE=ubuntu-1604-xenial
CLOUD_REGION=us-central1-c
CLUSTER_MODE=default
- >-
KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=ubuntu-1604-xenial
CLOUD_REGION=us-east1-d
CLUSTER_MODE=default
- >-
KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=ubuntu-1604-xenial
CLOUD_REGION=asia-east1-c
CLUSTER_MODE=default
# Ubuntu 15.10
# Extra cases for separated roles
- >-
KUBE_NETWORK_PLUGIN=flannel
CLOUD_IMAGE=ubuntu-1510-wily
CLOUD_IMAGE=rhel-7-sudo
CLOUD_REGION=europe-west1-b
CLUSTER_MODE=separate
- >-
KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=ubuntu-1510-wily
CLOUD_IMAGE=ubuntu-1604-xenial
CLOUD_REGION=us-central1-a
CLUSTER_MODE=separate
- >-
KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=ubuntu-1510-wily
CLOUD_IMAGE=debian-8-kubespray
CLOUD_REGION=us-east1-d
CLUSTER_MODE=separate
before_install:
@ -92,7 +106,8 @@ before_install:
- pip install --user boto -U
- pip install --user ansible
- pip install --user netaddr
- pip install --user apache-libcloud
# W/A https://github.com/ansible/ansible-modules-core/issues/5196#issuecomment-253766186
- pip install --user apache-libcloud==0.20.1
cache:
- directories:
@ -109,12 +124,11 @@ before_script:
- $HOME/.local/bin/ansible-playbook --version
- cp tests/ansible.cfg .
# - "echo $HOME/.local/bin/ansible-playbook -i inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml"
## Configure ansible deployment logs to be collected as an artifact. Enable when GCS configured, see https://docs.travis-ci.com/user/deployment/gcs
# - $HOME/.local/bin/ansible-playbook -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root scritps/configure-logs.yaml
script:
- >
$HOME/.local/bin/ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts -c local $LOG_LEVEL
$HOME/.local/bin/ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local $LOG_LEVEL
-e mode=${CLUSTER_MODE}
-e test_id=${TEST_ID}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e gce_project_id=${GCE_PROJECT_ID}
@ -133,8 +147,15 @@ script:
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/020_check-create-pod.yml $LOG_LEVEL
## Ping the between 2 pod
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
## Collect env info, enable it once GCS configured, see https://docs.travis-ci.com/user/deployment/gcs
# - $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root scritps/collect-info.yaml
after_failure:
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root scripts/collect-info.yaml >/dev/null
- >
$HOME/.local/bin/ansible-playbook tests/cloud_playbooks/upload-logs-gcs.yml -i "localhost," -c local
-e test_id=${TEST_ID}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e gs_key=${GS_ACCESS_KEY_ID}
-e gs_skey=${GS_SECRET_ACCESS_KEY}
after_script:
- >

3
OWNERS
View file

@ -4,3 +4,6 @@
owners:
- Smana
- ant31
- bogdando
- mattymo
- rsmitty

View file

@ -13,7 +13,7 @@ If you have questions, you can [invite yourself](https://slack.kubespray.io/) to
To deploy the cluster you can use :
[**kargo-cli**](https://github.com/kubespray/kargo-cli) <br>
[**kargo-cli**](https://github.com/kubespray/kargo-cli) (deprecated, a newer [go](https://github.com/Smana/kargo-cli/tree/kargogo) version soon)<br>
**Ansible** usual commands <br>
**vagrant** by simply running `vagrant up` (for tests purposes) <br>
@ -41,10 +41,10 @@ Supported Linux distributions
Versions
--------------
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.3.0 <br>
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.4.3 <br>
[etcd](https://github.com/coreos/etcd/releases) v3.0.1 <br>
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.20.0 <br>
[flanneld](https://github.com/coreos/flannel/releases) v0.5.5 <br>
[flanneld](https://github.com/coreos/flannel/releases) v0.6.2 <br>
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.22.0 <br>
[weave](http://weave.works/) v1.6.1 <br>
[docker](https://www.docker.com/) v1.10.3 <br>

15
Vagrantfile vendored
View file

@ -16,7 +16,7 @@ $vm_cpus = 1
$shared_folders = {}
$forwarded_ports = {}
$subnet = "172.17.8"
$box = "bento/ubuntu-14.04"
$box = "bento/ubuntu-16.04"
host_vars = {}
@ -38,6 +38,13 @@ if ! File.exist?(File.join(File.dirname($inventory), "hosts"))
end
end
if Vagrant.has_plugin?("vagrant-proxyconf")
$no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
(1..$num_instances).each do |i|
$no_proxy += ",#{$subnet}.#{i+100}"
end
end
Vagrant.configure("2") do |config|
# always use Vagrants insecure key
config.ssh.insert_key = false
@ -52,6 +59,12 @@ Vagrant.configure("2") do |config|
config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config|
config.vm.hostname = vm_name
if Vagrant.has_plugin?("vagrant-proxyconf")
config.proxy.http = ENV['HTTP_PROXY'] || ENV['http_proxy'] || ""
config.proxy.https = ENV['HTTPS_PROXY'] || ENV['https_proxy'] || ""
config.proxy.no_proxy = $no_proxy
end
if $expose_docker_tcp
config.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true
end

View file

@ -1,4 +1,7 @@
[ssh_connection]
pipelining=True
[defaults]
[defaults]
host_key_checking=False
gathering = smart
fact_caching = jsonfile
fact_caching_connection = /tmp

View file

@ -10,21 +10,22 @@
- hosts: all
gather_facts: true
- hosts: etcd:!k8s-cluster
- hosts: all
roles:
- { role: kubernetes/preinstall, tags: preinstall }
- hosts: etcd:!k8s-cluster
roles:
- { role: etcd, tags: etcd }
- hosts: k8s-cluster
roles:
- { role: kubernetes/preinstall, tags: preinstall }
- { role: etcd, tags: etcd }
- { role: kubernetes/node, tags: node }
- { role: network_plugin, tags: network }
- hosts: kube-master
roles:
- { role: kubernetes/preinstall, tags: preinstall }
- { role: kubernetes/master, tags: master }
- hosts: k8s-cluster

Binary file not shown.

After

Width:  |  Height:  |  Size: 57 KiB

View file

@ -33,15 +33,29 @@ Kube-apiserver
--------------
K8s components require a loadbalancer to access the apiservers via a reverse
proxy. A kube-proxy does not support multiple apiservers for the time being so
proxy. Kargo includes support for an nginx-based proxy that resides on each
non-master Kubernetes node. This is referred to as localhost loadbalancing. It
is less efficient than a dedicated load balancer because it creates extra
health checks on the Kubernetes apiserver, but is more practical for scenarios
where an external LB or virtual IP management is inconvenient.
This option is configured by the variable `loadbalancer_apiserver_localhost`.
you will need to configure your own loadbalancer to achieve HA. Note that
deploying a loadbalancer is up to a user and is not covered by ansible roles
in Kargo. By default, it only configures a non-HA endpoint, which points to
the `access_ip` or IP address of the first server node in the `kube-master`
group. It can also configure clients to use endpoints for a given loadbalancer
type.
type. The following diagram shows how traffic to the apiserver is directed.
A loadbalancer (LB) may be an external or internal one. An external LB
![Image](figures/loadbalancer_localhost.png?raw=true)
Note: Kubernetes master nodes still use insecure localhost access because
there are bugs in Kubernetes <1.5.0 in using TLS auth on master role
services. This makes backends receiving unencrypted traffic and may be a
security issue when interconnecting different nodes, or maybe not, if those
belong to the isolated management network without external access.
A user may opt to use an external loadbalancer (LB) instead. An external LB
provides access for external clients, while the internal LB accepts client
connections only to the localhost, similarly to the etcd-proxy HA endpoints.
Given a frontend `VIP` address and `IP1, IP2` addresses of backends, here is
@ -69,47 +83,18 @@ loadbalancer_apiserver:
This domain name, or default "lb-apiserver.kubernetes.local", will be inserted
into the `/etc/hosts` file of all servers in the `k8s-cluster` group. Note that
the HAProxy service should as well be HA and requires a VIP management, which
is out of scope of this doc.
is out of scope of this doc. Specifying an external LB overrides any internal
localhost LB configuration.
The internal LB may be the case if you do not want to operate a VIP management
HA stack and require no external and no secure access to the K8s API. The group
var `loadbalancer_apiserver_localhost` (defaults to `false`) controls that
deployment layout. When enabled, it is expected each node in the `k8s-cluster`
group to run a loadbalancer that listens the localhost frontend and has all
of the apiservers as backends. Here is an example configuration for a HAProxy
service acting as an internal LB:
```
listen kubernetes-apiserver-http
bind localhost:8080
mode tcp
timeout client 3h
timeout server 3h
server master1 <IP1>:8080
server master2 <IP2>:8080
balance leastconn
```
And the corresponding example global vars config:
```
loadbalancer_apiserver_localhost: true
```
This var overrides an external LB configuration, if any. Note that for this
example, the `kubernetes-apiserver-http` endpoint has backends receiving
unencrypted traffic, which may be a security issue when interconnecting
different nodes, or may be not, if those belong to the isolated management
network without external access.
In order to achieve HA for HAProxy instances, those must be running on the
each node in the `k8s-cluster` group as well, but require no VIP, thus
no VIP management.
Note: In order to achieve HA for HAProxy instances, those must be running on
the each node in the `k8s-cluster` group as well, but require no VIP, thus
no VIP management.
Access endpoints are evaluated automagically, as the following:
| Endpoint type | kube-master | non-master |
|------------------------------|---------------|---------------------|
| Local LB (overrides ext) | http://lc:p | http://lc:p |
| Local LB | http://lc:p | https://lc:sp |
| External LB, no internal | https://lb:lp | https://lb:lp |
| No ext/int LB (default) | http://lc:p | https://m[0].aip:sp |

View file

@ -1,6 +1,10 @@
Kargo's roadmap
=================
### Kubeadm
- Propose kubeadm as an option in order to setup the kubernetes cluster.
That would probably improve deployment speed and certs management [#553](https://github.com/kubespray/kargo/issues/553)
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kargo/issues/320)
- the playbook would install and configure docker/rkt and the etcd cluster
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
@ -26,13 +30,14 @@ Kargo's roadmap
- single test with the Ansible version n-1 per day
- Test idempotency on on single OS but for all network plugins/container engines
- single test on AWS per day
- test different achitectures :
- test different achitectures :
- 3 instances, 3 are members of the etcd cluster, 2 of them acting as master and node, 1 as node
- 5 instances, 3 are etcd and nodes, 2 are masters only
- 7 instances, 3 etcd only, 2 masters, 2 nodes
- test scale up cluster: +1 etcd, +1 master, +1 node
### Lifecycle
- Adopt the kubeadm tool by delegating CM tasks it is capable to accomplish well [#553](https://github.com/kubespray/kargo/issues/553)
- Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kargo/issues/154)
- Drain worker node when shutting down/deleting an instance
@ -56,7 +61,7 @@ While waiting for the issue [kubernetes/kubernetes#18174](https://github.com/kub
### Kargo API
- Perform all actions through an **API**
- Store inventories / configurations of mulltiple clusters
- make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
- make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
### Addons (with kpm)
Include optionals deployments to init the cluster:
@ -65,7 +70,7 @@ Include optionals deployments to init the cluster:
- **Prometheus**
##### Others
##### Dashboards:
- kubernetes-dashboard
- Fabric8

View file

@ -64,8 +64,9 @@ ndots: 5
# This may be the case if clients support and loadbalance multiple etcd servers natively.
etcd_multiaccess: false
# Assume there are no internal loadbalancers for apiservers exist
loadbalancer_apiserver_localhost: false
# Assume there are no internal loadbalancers for apiservers exist and listen on
# kube_apiserver_port (default 443)
loadbalancer_apiserver_localhost: true
# Choose network plugin (calico, weave or flannel)
kube_network_plugin: flannel
@ -108,9 +109,9 @@ kube_apiserver_insecure_port: 8080 # (http)
# Do not install additional dnsmasq
skip_dnsmasq: false
# Upstream dns servers used by dnsmasq
upstream_dns_servers:
- 8.8.8.8
- 8.8.4.4
#upstream_dns_servers:
# - 8.8.8.8
# - 8.8.4.4
#
# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
dns_setup: true

View file

@ -10,3 +10,16 @@
# Max of 2 is allowed here (a 1 is reserved for the dns_server)
#nameservers:
# - 127.0.0.1
# Versions
dnsmasq_version: 2.72
# Images
dnsmasq_image_repo: "andyshinn/dnsmasq"
dnsmasq_image_tag: "{{ dnsmasq_version }}"
# Skip dnsmasq setup
skip_dnsmasq: false
# Skip setting up dnsmasq daemonset
skip_dnsmasq_k8s: "{{ skip_dnsmasq }}"

View file

@ -1,5 +1,5 @@
---
- include: dnsmasq.yml
when: "{{ not skip_dnsmasq|bool }}"
when: "{{ not skip_dnsmasq_k8s|bool }}"
- include: resolvconf.yml

View file

@ -13,6 +13,8 @@ server=/{{ dns_domain }}/{{ skydns_server }}
{% for srv in upstream_dns_servers %}
server={{ srv }}
{% endfor %}
{% elif cloud_provider is defined and cloud_provider == "gce" %}
server=169.254.169.254
{% else %}
server=8.8.8.8
server=8.8.4.4

View file

@ -14,7 +14,7 @@ spec:
spec:
containers:
- name: dnsmasq
image: andyshinn/dnsmasq:2.72
image: "{{ dnsmasq_image_repo }}:{{ dnsmasq_image_tag }}"
command:
- dnsmasq
args:

View file

@ -3,6 +3,7 @@
command: /bin/true
notify:
- Docker | reload systemd
- Docker | reload docker.socket
- Docker | reload docker
- Docker | pause while Docker restarts
- Docker | wait for docker
@ -16,6 +17,12 @@
name: docker
state: restarted
- name: Docker | reload docker.socket
service:
name: docker.socket
state: restarted
when: ansible_os_family == 'CoreOS'
- name: Docker | pause while Docker restarts
pause: seconds=10 prompt="Waiting for docker restart"

View file

@ -5,16 +5,17 @@ local_release_dir: /tmp
download_run_once: False
# Versions
include_vars: kube_versions.yml
kube_version: v1.4.3
etcd_version: v3.0.6
#TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
# after migration to container download
calico_version: v0.20.0
calico_cni_version: v1.3.1
calico_version: v0.22.0
calico_cni_version: v1.4.2
weave_version: v1.6.1
flannel_version: 0.5.5
flannel_version: v0.6.2
flannel_server_helper_version: 0.1
pod_infra_version: 3.0
# Download URL's
etcd_download_url: "https://storage.googleapis.com/kargo/{{etcd_version}}_etcd"
@ -23,8 +24,8 @@ calico_cni_ipam_download_url: "https://storage.googleapis.com/kargo/{{calico_cni
weave_download_url: "https://storage.googleapis.com/kargo/{{weave_version}}_weave"
# Checksums
calico_cni_checksum: "ac05cb9254b5aaa5822cf10325983431bd25489147f2edf9dec7e43d99c43e77"
calico_cni_ipam_checksum: "3df6951a30749c279229e7e318e74ac4e41263996125be65257db7cd25097273"
calico_cni_checksum: "9cab29764681e9d80da826e4b2cd10841cc01a749e0018867d96dd76a4691548"
calico_cni_ipam_checksum: "09d076b15b791956efee91646e47fdfdcf382db16082cef4f542a9fff7bae172"
weave_checksum: "9bf9d6e5a839e7bcbb28cc00c7acae9d09284faa3e7a3720ca9c2b9e93c68580"
etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485"
@ -43,6 +44,8 @@ calico_node_image_repo: "calico/node"
calico_node_image_tag: "{{ calico_version }}"
hyperkube_image_repo: "quay.io/coreos/hyperkube"
hyperkube_image_tag: "{{ kube_version }}_coreos.0"
pod_infra_image_repo: "gcr.io/google_containers/pause-amd64"
pod_infra_image_tag: "{{ pod_infra_version }}"
downloads:
calico_cni_plugin:
@ -108,6 +111,10 @@ downloads:
repo: "{{ calico_node_image_repo }}"
tag: "{{ calico_node_image_tag }}"
enabled: "{{ kube_network_plugin == 'calico' }}"
pod_infra:
container: true
repo: "{{ pod_infra_image_repo }}"
tag: "{{ pod_infra_image_tag }}"
download:
container: "{{ file.container|default('false') }}"

View file

@ -1,6 +1,4 @@
---
- include_vars: kube_versions.yml
- name: downloading...
debug:
msg: "{{ download.url }}"
@ -63,11 +61,22 @@
- set_fact:
fname: "{{local_release_dir}}/containers/{{download.repo|regex_replace('/|\0|:', '_')}}:{{download.tag|regex_replace('/|\0|:', '_')}}.tar"
- name: "Set default value for 'container_changed' to false"
set_fact:
container_changed: false
- name: "Update the 'container_changed' fact"
set_fact:
container_changed: "{{ not 'up to date' in pull_task_result.stdout }}"
when: "{{ download.enabled|bool and download.container|bool }}"
delegate_to: "{{ groups['kube-master'][0] if download_run_once|bool else inventory_hostname }}"
run_once: "{{ download_run_once|bool }}"
- name: Download | save container images
shell: docker save "{{ download.repo }}:{{ download.tag }}" > "{{ fname }}"
delegate_to: "{{groups['kube-master'][0]}}"
run_once: true
when: ansible_os_family != "CoreOS" and download_run_once|bool and download.enabled|bool and download.container|bool
when: ansible_os_family != "CoreOS" and download_run_once|bool and download.enabled|bool and download.container|bool and container_changed|bool
- name: Download | get container images
synchronize:
@ -78,8 +87,8 @@
until: get_task|success
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
when: ansible_os_family != "CoreOS" and inventory_hostname != groups['kube-master'][0] and download_run_once|bool and download.enabled|bool and download.container|bool
when: ansible_os_family != "CoreOS" and inventory_hostname != groups['kube-master'][0] and download_run_once|bool and download.enabled|bool and download.container|bool and container_changed|bool
- name: Download | load container images
shell: docker load < "{{ fname }}"
when: ansible_os_family != "CoreOS" and inventory_hostname != groups['kube-master'][0] and download_run_once|bool and download.enabled|bool and download.container|bool
when: ansible_os_family != "CoreOS" and inventory_hostname != groups['kube-master'][0] and download_run_once|bool and download.enabled|bool and download.container|bool and container_changed|bool

View file

@ -1 +0,0 @@
kube_version: v1.3.0

View file

@ -1,6 +1,6 @@
---
- name: Configure | Check if member is in cluster
shell: "etcdctl --no-sync --peers={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}"
shell: "{{ bin_dir }}/etcdctl --no-sync --peers={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}"
register: etcd_member_in_cluster
ignore_errors: true
changed_when: false
@ -8,7 +8,7 @@
- name: Configure | Add member to the cluster if it is not there
when: is_etcd_master and etcd_member_in_cluster.rc != 0 and etcd_cluster_is_healthy.rc == 0
shell: "etcdctl --peers={{ etcd_access_addresses }} member add {{ etcd_member_name }} {{ etcd_peer_url }}"
shell: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} member add {{ etcd_member_name }} {{ etcd_peer_url }}"
- name: Configure | Copy etcd.service systemd file
template:

View file

@ -1,6 +1,6 @@
---
- name: Configure | Check if cluster is healthy
shell: "etcdctl --peers={{ etcd_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
shell: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
register: etcd_cluster_is_healthy
ignore_errors: true
changed_when: false

View file

@ -2,4 +2,4 @@ ETCD_DATA_DIR=/var/lib/etcd-proxy
ETCD_PROXY=on
ETCD_LISTEN_CLIENT_URLS={{ etcd_access_endpoint }}
ETCD_NAME={{ etcd_proxy_member_name | default("etcd-proxy") }}
ETCD_INITIAL_CLUSTER={% for host in groups['etcd'] %}etcd{{ loop.index|string }}={{ hostvars[host]['etcd_peer_url'] }}{% if not loop.last %},{% endif %}{% endfor %}
ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }}

View file

@ -13,4 +13,4 @@ ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd
ETCD_LISTEN_PEER_URLS=http://{{ etcd_address }}:2380
ETCD_NAME={{ etcd_member_name }}
ETCD_PROXY=off
ETCD_INITIAL_CLUSTER={% for host in groups['etcd'] %}etcd{{ loop.index|string }}={{ hostvars[host]['etcd_peer_url'] }}{% if not loop.last %},{% endif %}{% endfor %}
ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }}

View file

@ -0,0 +1,12 @@
# Versions
kubedns_version: 1.7
kubednsmasq_version: 1.3
exechealthz_version: 1.1
# Images
kubedns_image_repo: "gcr.io/google_containers/kubedns-amd64"
kubedns_image_tag: "{{ kubedns_version }}"
kubednsmasq_image_repo: "gcr.io/google_containers/kube-dnsmasq-amd64"
kubednsmasq_image_tag: "{{ kubednsmasq_version }}"
exechealthz_image_repo: "gcr.io/google_containers/exechealthz-amd64"
exechealthz_image_tag: "{{ exechealthz_version }}"

View file

@ -0,0 +1,10 @@
- name: Write calico-policy-controller yaml
template: src=calico-policy-controller.yml.j2 dest=/etc/kubernetes/calico-policy-controller.yml
when: inventory_hostname == groups['kube-master'][0]
- name: Start of Calico policy controller
kube:
kubectl: "{{bin_dir}}/kubectl"
filename: /etc/kubernetes/calico-policy-controller.yml
when: inventory_hostname == groups['kube-master'][0]

View file

@ -17,3 +17,7 @@
state: "{{item.changed | ternary('latest','present') }}"
with_items: "{{ manifests.results }}"
when: inventory_hostname == groups['kube-master'][0]
- include: tasks/calico-policy-controller.yml
when: enable_network_policy is defined and enable_network_policy == True

View file

@ -0,0 +1,40 @@
apiVersion: extensions/v1beta1
kind: ReplicaSet
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
matchLabels:
kubernetes.io/cluster-service: "true"
k8s-app: calico-policy
template:
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
k8s-app: calico-policy
spec:
hostNetwork: true
containers:
- name: calico-policy-controller
image: calico/kube-policy-controller:latest
env:
- name: ETCD_ENDPOINTS
value: "{{ etcd_endpoint }}"
# Location of the Kubernetes API - this shouldn't need to be
# changed so long as it is used in conjunction with
# CONFIGURE_ETC_HOSTS="true".
- name: K8S_API
value: "https://kubernetes.default:443"
# Configure /etc/hosts within the container to resolve
# the kubernetes.default Service to the correct clusterIP
# using the environment provided by the kubelet.
# This removes the need for KubeDNS to resolve the Service.
- name: CONFIGURE_ETC_HOSTS
value: "true"

View file

@ -21,7 +21,7 @@ spec:
spec:
containers:
- name: kubedns
image: gcr.io/google_containers/kubedns-amd64:1.7
image: "{{ kubedns_image_repo }}:{{ kubedns_image_tag }}"
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -63,7 +63,7 @@ spec:
name: dns-tcp-local
protocol: TCP
- name: dnsmasq
image: gcr.io/google_containers/kube-dnsmasq-amd64:1.3
image: "{{ kubednsmasq_image_repo }}:{{ kubednsmasq_image_tag }}"
args:
- --log-facility=-
- --cache-size=1000
@ -77,7 +77,7 @@ spec:
name: dns-tcp
protocol: TCP
- name: healthz
image: gcr.io/google_containers/exechealthz-amd64:1.1
image: "{{ exechealthz_image_repo }}:{{ exechealthz_image_tag }}"
resources:
# keep request = limit to keep this container in guaranteed class
limits:

View file

@ -10,3 +10,21 @@ kube_users_dir: "{{ kube_config_dir }}/users"
# An experimental dev/test only dynamic volumes provisioner,
# for PetSets. Works for kube>=v1.3 only.
kube_hostpath_dynamic_provisioner: "false"
# This is where you can drop yaml/json files and the kubelet will run those
# pods on startup
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
# This directory is where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernets.
# This puts them in a sane location.
# Editting this value will almost surely break something. Don't
# change it. Things like the systemd scripts are hard coded to
# look in here. Don't do it.
kube_config_dir: /etc/kubernetes
# change to 0.0.0.0 to enable insecure access from anywhere (not recommended)
kube_apiserver_insecure_bind_address: 127.0.0.1
# Logging directory (sysvinit systems)
kube_log_dir: "/var/log/kubernetes"

View file

@ -16,7 +16,7 @@ spec:
- --etcd-quorum-read=true
- --insecure-bind-address={{ kube_apiserver_insecure_bind_address }}
- --apiserver-count={{ kube_apiserver_count }}
- --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,ServiceAccount,ResourceQuota
- --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota
- --service-cluster-ip-range={{ kube_service_addresses }}
- --client-ca-file={{ kube_cert_dir }}/ca.pem
- --basic-auth-file={{ kube_users_dir }}/known_users.csv
@ -30,6 +30,9 @@ spec:
{% for conf in kube_api_runtime_config %}
- --runtime-config={{ conf }}
{% endfor %}
{% endif %}
{% if enable_network_policy is defined and enable_network_policy == True %}
- --runtime-config=extensions/v1beta1/networkpolicies=true
{% endif %}
- --v={{ kube_log_level | default('2') }}
- --allow-privileged=true

View file

@ -1,6 +1,13 @@
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl"
# change to 0.0.0.0 to enable insecure access from anywhere (not recommended)
kube_apiserver_insecure_bind_address: 127.0.0.1
# This is where you can drop yaml/json files and the kubelet will run those
# pods on startup
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
dns_domain: "{{ cluster_name }}"
# resolv.conf to base dns config
@ -14,3 +21,17 @@ kube_proxy_masquerade_all: true
# kube_api_runtime_config:
# - extensions/v1beta1/daemonsets=true
# - extensions/v1beta1/deployments=true
# Logging directory (sysvinit systems)
kube_log_dir: "/var/log/kubernetes"
# This directory is where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernets.
# This puts them in a sane location.
# Editting this value will almost surely break something. Don't
# change it. Things like the systemd scripts are hard coded to
# look in here. Don't do it.
kube_config_dir: /etc/kubernetes
nginx_image_repo: nginx
nginx_image_tag: 1.11.4-alpine

View file

@ -2,4 +2,6 @@
dependencies:
- role: download
file: "{{ downloads.hyperkube }}"
- role: download
file: "{{ downloads.pod_infra }}"
- role: kubernetes/secrets

View file

@ -1,6 +1,9 @@
---
- include: install.yml
- include: nginx-proxy.yml
when: is_kube_master == false and loadbalancer_apiserver_localhost|default(false)
- name: Write Calico cni config
template:
src: "cni-calico.conf.j2"

View file

@ -0,0 +1,9 @@
---
- name: nginx-proxy | Write static pod
template: src=manifests/nginx-proxy.manifest.j2 dest=/etc/kubernetes/manifests/nginx-proxy.yml
- name: nginx-proxy | Make nginx directory
file: path=/etc/nginx state=directory mode=0700 owner=root
- name: nginx-proxy | Write nginx-proxy configuration
template: src=nginx.conf.j2 dest="/etc/nginx/nginx.conf" owner=root mode=0755 backup=yes

View file

@ -1,9 +1,16 @@
{
"name": "calico-k8s-network",
"type": "calico",
"etcd_authority": "{{ etcd_authority }}",
"log_level": "info",
"ipam": {
"type": "calico-ipam"
},
{% if enable_network_policy is defined and enable_network_policy == True %}
"policy": {
"type": "k8s"
},
{% endif %}
"kubernetes": {
"kubeconfig": "{{ kube_config_dir }}/node-kubeconfig.yaml"
}
}

View file

@ -20,11 +20,11 @@ KUBELET_REGISTER_NODE="--register-node=false"
{% endif %}
# location of the api-server
{% if dns_setup|bool and skip_dnsmasq|bool %}
KUBELET_ARGS="--cluster_dns={{ skydns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --config={{ kube_manifest_dir }} --resolv-conf={{ kube_resolv_conf }}"
KUBELET_ARGS="--cluster_dns={{ skydns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --config={{ kube_manifest_dir }} --resolv-conf={{ kube_resolv_conf }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}"
{% elif dns_setup|bool %}
KUBELET_ARGS="--cluster_dns={{ dns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --config={{ kube_manifest_dir }} --resolv-conf={{ kube_resolv_conf }}"
KUBELET_ARGS="--cluster_dns={{ dns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --config={{ kube_manifest_dir }} --resolv-conf={{ kube_resolv_conf }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}"
{% else %}
KUBELET_ARGS="--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }}"
KUBELET_ARGS="--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}"
{% endif %}
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "weave"] %}
KUBELET_NETWORK_PLUGIN="--network-plugin=cni --network-plugin-dir=/etc/cni/net.d"

View file

@ -17,6 +17,7 @@ spec:
- --kubeconfig=/etc/kubernetes/node-kubeconfig.yaml
{% endif %}
- --bind-address={{ ip | default(ansible_default_ipv4.address) }}
- --cluster-cidr={{ kube_pods_subnet }}
- --proxy-mode={{ kube_proxy_mode }}
{% if kube_proxy_masquerade_all and kube_proxy_mode == "iptables" %}
- --masquerade-all

View file

@ -0,0 +1,20 @@
apiVersion: v1
kind: Pod
metadata:
name: nginx-proxy
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: nginx-proxy
image: {{ nginx_image_repo }}:{{ nginx_image_tag }}
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/nginx
name: etc-nginx
readOnly: true
volumes:
- name: etc-nginx
hostPath:
path: /etc/nginx

View file

@ -0,0 +1,26 @@
error_log stderr notice;
worker_processes auto;
events {
multi_accept on;
use epoll;
worker_connections 1024;
}
stream {
upstream kube_apiserver {
least_conn;
{% for host in groups['kube-master'] -%}
server {{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address'])) }}:{{ kube_apiserver_port }};
{% endfor %}
}
server {
listen {{ kube_apiserver_port }};
proxy_pass kube_apiserver;
proxy_timeout 3s;
proxy_connect_timeout 1s;
}
}

View file

@ -4,6 +4,7 @@ clusters:
- name: local
cluster:
certificate-authority: {{ kube_cert_dir }}/ca.pem
server: {{ kube_apiserver_endpoint }}
users:
- name: kubelet
user:

View file

@ -21,6 +21,7 @@ kube_log_dir: "/var/log/kubernetes"
# pods on startup
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
epel_rpm_download_url: "https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm"
common_required_pkgs:
- python-httplib2

View file

@ -91,7 +91,7 @@
changed_when: False
- name: Install epel-release on RedHat/CentOS
shell: rpm -qa | grep epel-release || rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
shell: rpm -qa | grep epel-release || rpm -ivh {{ epel_rpm_download_url }}
when: ansible_distribution in ["CentOS","RedHat"] and
ansible_distribution_major_version >= 7
changed_when: False

View file

@ -5,12 +5,12 @@
- set_fact: is_kube_master="{{ inventory_hostname in groups['kube-master'] }}"
- set_fact: first_kube_master="{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}"
- set_fact:
kube_apiserver_insecure_bind_address: |-
{% if loadbalancer_apiserver_localhost %}{{ kube_apiserver_address }}{% else %}127.0.0.1{% endif %}
loadbalancer_apiserver_localhost: false
when: loadbalancer_apiserver is defined
- set_fact:
kube_apiserver_endpoint: |-
{% if loadbalancer_apiserver_localhost -%}
http://127.0.0.1:{{ kube_apiserver_insecure_port }}
{% if not is_kube_master and loadbalancer_apiserver_localhost -%}
https://localhost:{{ kube_apiserver_port }}
{%- elif is_kube_master and loadbalancer_apiserver is not defined -%}
http://127.0.0.1:{{ kube_apiserver_insecure_port }}
{%- else -%}
@ -30,7 +30,7 @@
- set_fact:
etcd_access_addresses: |-
{% for item in groups['etcd'] -%}
http://{{ hostvars[item].etcd_access_address }}:2379{% if not loop.last %},{% endif %}
http://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2379{% if not loop.last %},{% endif %}
{%- endfor %}
- set_fact: etcd_access_endpoint="{% if etcd_multiaccess %}{{ etcd_access_addresses }}{% else %}{{ etcd_endpoint }}{% endif %}"
- set_fact:
@ -38,6 +38,11 @@
{% for host in groups['etcd'] %}
{% if inventory_hostname == host %}{{"etcd"+loop.index|string }}{% endif %}
{% endfor %}
- set_fact:
etcd_peer_addresses: |-
{% for item in groups['etcd'] -%}
{{ "etcd"+loop.index|string }}=http://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2380{% if not loop.last %},{% endif %}
{%- endfor %}
- set_fact:
etcd_proxy_member_name: |-
{% for host in groups['k8s-cluster'] %}

View file

@ -6,3 +6,16 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
# This is where to save basic auth file
kube_users_dir: "{{ kube_config_dir }}/users"
# This directory is where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernets.
# This puts them in a sane location.
# Editting this value will almost surely break something. Don't
# change it. Things like the systemd scripts are hard coded to
# look in here. Don't do it.
kube_config_dir: /etc/kubernetes
# This directory is where all the additional scripts go
# that Kubernetes normally puts in /srv/kubernetes.
# This puts them in a sane location
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"

View file

@ -26,8 +26,8 @@ Usage : $(basename $0) -f <config> [-d <ssldir>]
-h | --help : Show this message
-f | --config : Openssl configuration file
-d | --ssldir : Directory where the certificates will be installed
ex :
ex :
$(basename $0) -f openssl.conf -d /srv/ssl
EOF
}
@ -37,7 +37,7 @@ while (($#)); do
case "$1" in
-h | --help) usage; exit 0;;
-f | --config) CONFIG=${2}; shift 2;;
-d | --ssldir) SSLDIR="${2}"; shift 2;;
-d | --ssldir) SSLDIR="${2}"; shift 2;;
*)
usage
echo "ERROR : Unknown option"
@ -68,6 +68,7 @@ openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN
openssl genrsa -out apiserver-key.pem 2048 > /dev/null 2>&1
openssl req -new -key apiserver-key.pem -out apiserver.csr -subj "/CN=kube-apiserver" -config ${CONFIG} > /dev/null 2>&1
openssl x509 -req -in apiserver.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out apiserver.pem -days 365 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
cat ca.pem >> apiserver.pem
# Nodes and Admin
for i in node admin; do

View file

@ -27,7 +27,7 @@
sync_tokens: true
when: >-
{%- set tokens = {'sync': False} -%}
{%- for server in groups['kube-master']
{%- for server in groups['kube-master'] | intersect(play_hosts)
if (not hostvars[server].known_tokens.stat.exists) or
(hostvars[server].known_tokens.stat.checksum != known_tokens_master.stat.checksum|default('')) -%}
{%- set _ = tokens.update({'sync': True}) -%}

View file

@ -27,31 +27,30 @@
master_certs: ['ca-key.pem', 'admin.pem', 'admin-key.pem', 'apiserver-key.pem', 'apiserver.pem']
node_certs: ['ca.pem', 'node.pem', 'node-key.pem']
- name: Gen_certs | Get the certs from first master
slurp:
src: "{{ kube_cert_dir }}/{{ item }}"
- name: Gen_certs | Gather master certs
shell: "tar cfz - -C {{ kube_cert_dir }} {{ master_certs|join(' ') }} {{ node_certs|join(' ') }} | base64 --wrap=0"
register: master_cert_data
delegate_to: "{{groups['kube-master'][0]}}"
register: slurp_certs
with_items: '{{ master_certs + node_certs }}'
when: sync_certs|default(false)
run_once: true
notify: set secret_changed
when: sync_certs|default(false)
- name: Gen_certs | Gather node certs
shell: "tar cfz - -C {{ kube_cert_dir }} {{ node_certs|join(' ') }} | base64 --wrap=0"
register: node_cert_data
delegate_to: "{{groups['kube-master'][0]}}"
run_once: true
when: sync_certs|default(false)
- name: Gen_certs | Copy certs on masters
copy:
content: "{{ item.content|b64decode }}"
dest: "{{ item.source }}"
with_items: '{{slurp_certs.results}}'
shell: "echo '{{master_cert_data.stdout|quote}}' | base64 -d | tar xz -C {{ kube_cert_dir }}"
changed_when: false
when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and
inventory_hostname != groups['kube-master'][0]
- name: Gen_certs | Copy certs on nodes
copy:
content: "{{ item.content|b64decode }}"
dest: "{{ item.source }}"
with_items: '{{slurp_certs.results}}'
when: item.item in node_certs and
inventory_hostname in groups['kube-node'] and sync_certs|default(false) and
shell: "echo '{{node_cert_data.stdout|quote}}' | base64 -d | tar xz -C {{ kube_cert_dir }}"
changed_when: false
when: inventory_hostname in groups['kube-node'] and sync_certs|default(false) and
inventory_hostname != groups['kube-master'][0]
- name: Gen_certs | check certificate permissions
@ -65,3 +64,30 @@
shell: chmod 0600 {{ kube_cert_dir}}/*key.pem
when: inventory_hostname in groups['kube-master']
changed_when: false
- name: Gen_certs | target ca-certificates directory
set_fact:
ca_cert_dir: |-
{% if ansible_os_family == "Debian" -%}
/usr/local/share/ca-certificates
{%- elif ansible_os_family == "RedHat" -%}
/etc/pki/ca-trust/source/anchors
{%- elif ansible_os_family == "CoreOS" -%}
/etc/ssl/certs
{%- endif %}
- name: Gen_certs | add CA to trusted CA dir
copy:
src: "{{ kube_cert_dir }}/ca.pem"
dest: "{{ ca_cert_dir }}/kube-ca.crt"
remote_src: true
register: kube_ca_cert
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/CoreOS)
command: update-ca-certificates
when: kube_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS"]
- name: Gen_certs | update ca-certificatesa (RedHat)
command: update-ca-trust extract
when: kube_ca_cert.changed and ansible_os_family == "RedHat"

View file

@ -43,20 +43,15 @@
delegate_to: "{{groups['kube-master'][0]}}"
when: sync_tokens|default(false)
- name: Gen_tokens | Get the tokens from first master
slurp:
src: "{{ item }}"
register: slurp_tokens
with_items: '{{tokens_list.stdout_lines}}'
run_once: true
- name: Gen_tokens | Gather tokens
shell: "tar cfz - {{ tokens_list.stdout_lines | join(' ') }} | base64 --wrap=0"
register: tokens_data
delegate_to: "{{groups['kube-master'][0]}}"
run_once: true
when: sync_tokens|default(false)
notify: set secret_changed
- name: Gen_tokens | Copy tokens on masters
copy:
content: "{{ item.content|b64decode }}"
dest: "{{ item.source }}"
with_items: '{{slurp_tokens.results}}'
shell: "echo '{{ tokens_data.stdout|quote }}' | base64 -d | tar xz -C /"
changed_when: false
when: inventory_hostname in groups['kube-master'] and sync_tokens|default(false) and
inventory_hostname != groups['kube-master'][0]

View file

@ -11,12 +11,18 @@ DNS.1 = kubernetes
DNS.2 = kubernetes.default
DNS.3 = kubernetes.default.svc
DNS.4 = kubernetes.default.svc.{{ dns_domain }}
DNS.5 = localhost
{% for host in groups['kube-master'] %}
DNS.{{ 5 + loop.index }} = {{ host }}
{% endfor %}
{% if loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined %}
DNS.5 = {{ apiserver_loadbalancer_domain_name }}
{% set idx = groups['kube-master'] | length | int + 5 %}
DNS.{{ idx | string }} = {{ apiserver_loadbalancer_domain_name }}
{% endif %}
{% for host in groups['kube-master'] %}
IP.{{ 2 * loop.index - 1 }} = {{ hostvars[host]['access_ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
IP.{{ 2 * loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
{% endfor %}
{% set idx = groups['kube-master'] | length | int * 2 + 1 %}
IP.{{ idx | string }} = {{ kube_apiserver_ip }}
IP.{{ idx }} = {{ kube_apiserver_ip }}
IP.{{ idx + 1 }} = 127.0.0.1

View file

@ -7,4 +7,4 @@ ipip: false
# Set to true if you want your calico cni binaries to overwrite the
# ones from hyperkube while leaving other cni plugins intact.
overwrite_hyperkube_cni: false
overwrite_hyperkube_cni: true

View file

@ -22,16 +22,6 @@
changed_when: false
notify: restart calico-node
- name: Calico | Do not use hyperkube cni if kube_version under v1.3.4
set_fact:
use_hyperkube_cni: false
when: kube_version | version_compare('v1.3.4','<')
- name: Calico | Use hyperkube cni if kube_version above v1.3.4
set_fact:
use_hyperkube_cni: true
when: kube_version | version_compare('v1.3.4','>=')
- name: Calico | Copy cni plugins from hyperkube
command: "/usr/bin/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /usr/bin/rsync -a /opt/cni/bin/ /cnibindir/"
register: cni_task_result
@ -39,17 +29,16 @@
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
changed_when: false
when: "{{ use_hyperkube_cni|bool }}"
- name: Calico | Install calico cni bin
command: rsync -pi "{{ local_release_dir }}/calico/bin/calico" "/opt/cni/bin/calico"
changed_when: false
when: "{{ not use_hyperkube_cni|bool or overwrite_hyperkube_cni|bool }}"
when: "{{ overwrite_hyperkube_cni|bool }}"
- name: Calico | Install calico-ipam cni bin
command: rsync -pi "{{ local_release_dir }}/calico/bin/calico-ipam" "/opt/cni/bin/calico-ipam"
changed_when: false
when: "{{ not use_hyperkube_cni|bool or overwrite_hyperkube_cni|bool }}"
when: "{{ overwrite_hyperkube_cni|bool }}"
- name: Calico | wait for etcd
uri: url=http://localhost:2379/health
@ -90,7 +79,7 @@
environment:
NO_DEFAULT_POOLS: true
run_once: true
when: calico_conf.status == 404
when: calico_conf.status == 404 or "nodes" not in calico_conf.content
- name: Calico | Get calico configuration from etcd
uri:

View file

@ -9,17 +9,6 @@
notify:
- restart docker
- name: Weave | Determine hyperkube cni to use depending of the version of kube
set_fact:
use_hyperkube_cni: >
{%- if kube_version | version_compare('v1.3.4','>=') -%}
true
{%- elif kube_version | version_compare('v1.3.4','<') -%}
false
{%- else -%}
{{ ErrorCannotRecognizeVersion }}
{%- endif -%}
- name: Weave | Copy cni plugins from hyperkube
command: "/usr/bin/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -r /opt/cni/bin/. /cnibindir/"
register: cni_task_result
@ -27,7 +16,6 @@
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
changed_when: false
when: "{{ use_hyperkube_cni|bool }}"
- name: Weave | Install weave
command: rsync -piu "{{ local_release_dir }}/weave/bin/weave" "{{ bin_dir }}/weave"

View file

@ -2,11 +2,11 @@
local_release_dir: /tmp
# Versions
include_vars: kube_versions.yml
kube_version: v1.4.3
etcd_version: v3.0.6
calico_version: v0.20.0
calico_cni_version: v1.3.1
calico_version: v0.22.0
calico_cni_version: v1.4.2
weave_version: v1.6.1
# Download URL's
@ -16,8 +16,8 @@ calico_cni_ipam_download_url: "https://github.com/projectcalico/calico-cni/relea
weave_download_url: "https://github.com/weaveworks/weave/releases/download/{{weave_version}}/weave"
# Checksums
calico_cni_checksum: "ac05cb9254b5aaa5822cf10325983431bd25489147f2edf9dec7e43d99c43e77"
calico_cni_ipam_checksum: "3df6951a30749c279229e7e318e74ac4e41263996125be65257db7cd25097273"
calico_cni_checksum: "9cab29764681e9d80da826e4b2cd10841cc01a749e0018867d96dd76a4691548"
calico_cni_ipam_checksum: "09d076b15b791956efee91646e47fdfdcf382db16082cef4f542a9fff7bae172"
weave_checksum: "9bf9d6e5a839e7bcbb28cc00c7acae9d09284faa3e7a3720ca9c2b9e93c68580"
etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485"

View file

@ -1,6 +1,4 @@
---
- include_vars: "kube_versions.yml"
- name: Create dest directories
file: path={{local_release_dir}}/{{item.dest|dirname}} state=directory recurse=yes
with_items: '{{downloads}}'

View file

@ -1 +0,0 @@
kube_version: v1.3.0

View file

@ -6,16 +6,10 @@
vars:
debug: false
commands:
- name: git_info
cmd: find . -type d -name .git -execdir sh -c 'gen-gitinfos.sh global|head -12' \;
- name: timedate_info
cmd: timedatectl status
- name: space_info
cmd: df -h
- name: kernel_info
cmd: uname -r
- name: distro_info
cmd: cat /etc/issue.net
- name: docker_info
cmd: docker info
- name: ip_info
@ -24,23 +18,26 @@
cmd: ip ro
- name: proc_info
cmd: ps auxf | grep -v ]$
- name: systemctl_info
cmd: systemctl status
- name: systemctl_failed_info
cmd: systemctl --state=failed --no-pager
- name: k8s_info
cmd: kubectl get all --all-namespaces -o wide
- name: errors_info
cmd: journalctl -p err --utc --no-pager
- name: etcd_info
cmd: etcdctl --debug cluster-health
logs:
- /var/log/ansible.log
- /var/log/ansible/ansible.log
- /var/log/syslog
- /var/log/daemon.log
- /var/log/kern.log
- inventory/inventory.ini
- cluster.yml
- /var/log/dpkg.log
- /var/log/apt/history.log
- /var/log/yum.log
- /var/log/calico/bird/current
- /var/log/calico/bird6/current
- /var/log/calico/felix/current
- /var/log/calico/confd/current
tasks:
- name: Storing commands output
@ -50,7 +47,7 @@
with_items: "{{commands}}"
- debug: var=item
with_items: output.results
with_items: "{{output.results}}"
when: debug
- name: Fetch results

View file

@ -1,39 +0,0 @@
---
- hosts: localhost
become: true
gather_facts: no
vars:
log_path: /var/log/ansible/
conf_file: /etc/ansible/ansible.cfg
human_readable_plugin: false
callback_plugin_path: /usr/share/ansible/plugins/callback
tasks:
- name: LOGS | ensure log path
file: path="{{log_path}}" state=directory owner={{ansible_ssh_user}}
- name: LOGS | ensure plugin path
file: path="{{callback_plugin_path}}" state=directory owner={{ansible_ssh_user}}
when: human_readable_plugin
- name: LOGS | get plugin
git: repo=https://gist.github.com/cd706de198c85a8255f6.git dest=/tmp/cd706de198c85a8255f6
when: human_readable_plugin
- name: LOGS | install plugin
copy: src=/tmp/cd706de198c85a8255f6/human_log.py dest="{{callback_plugin_path}}"
when: human_readable_plugin
- name: LOGS | config
lineinfile:
line: "log_path={{log_path}}/ansible.log"
regexp: "^#log_path|^log_path"
dest: "{{conf_file}}"
- name: LOGS | callback plugin
lineinfile:
line: "callback_plugins={{callback_plugin_path}}"
regexp: "^#callback_plugins|^callback_plugins"
dest: "{{conf_file}}"
when: human_readable_plugin

View file

@ -1,4 +1,7 @@
[ssh_connection]
pipelining=True
[defaults]
[defaults]
host_key_checking=False
gathering = smart
fact_caching = jsonfile
fact_caching_connection = /tmp

View file

@ -1,6 +1,6 @@
---
- hosts: localhost
sudo: False
become: false
gather_facts: no
vars:
cloud_machine_type: g1-small

View file

@ -1,6 +1,6 @@
---
- hosts: localhost
sudo: False
become: false
gather_facts: no
vars:
cloud_machine_type: f1-micro

View file

@ -0,0 +1,43 @@
---
- hosts: localhost
become: false
gather_facts: no
vars:
expire: 72000
tasks:
- name: replace_test_id
set_fact:
test_name: "{{ test_id | regex_replace('\\.', '-') }}"
- name: Create a bucket
gc_storage:
bucket: "{{ test_name }}"
mode: create
expiration: "{{ expire }}"
permission: private
gs_access_key: gs_key
gs_secret_key: gs_skey
- name: Upload collected diagnostic info
gc_storage:
bucket: "{{ test_name }}"
mode: put
permission: private
expiration: "{{ expire }}"
object: "build-{{ test_name }}-{{ kube_network_plugin }}-logs.tar.gz"
src: logs.tar.gz
gs_access_key: gs_key
gs_secret_key: gs_skey
- name: Get a link
gc_storage:
bucket: "{{ test_name }}"
object: "build-{{ test_name }}-{{ kube_network_plugin }}-logs.tar.gz"
mode: get_url
register: url
gs_access_key: gs_key
gs_secret_key: gs_skey
- debug: msg="Download URL {{get_url}}"

View file

@ -2,6 +2,16 @@ node1 ansible_ssh_host={{gce.instance_data[0].public_ip}}
node2 ansible_ssh_host={{gce.instance_data[1].public_ip}}
node3 ansible_ssh_host={{gce.instance_data[2].public_ip}}
{% if mode is defined and mode == "separate" %}
[kube-master]
node1
[kube-node]
node2
[etcd]
node3
{% else %}
[kube-master]
node1
node2
@ -14,6 +24,7 @@ node3
[etcd]
node1
node2
{% endif %}
[k8s-cluster:children]
kube-node