Merge branch 'master' of github.com:kubespray/kargo

This commit is contained in:
ant31 2016-11-07 12:05:36 +01:00
commit 31f9ef82e7
61 changed files with 508 additions and 226 deletions

View file

@ -10,81 +10,95 @@ env:
TEST_ID=$TRAVIS_JOB_NUMBER TEST_ID=$TRAVIS_JOB_NUMBER
CONTAINER_ENGINE=docker CONTAINER_ENGINE=docker
PRIVATE_KEY=$GCE_PRIVATE_KEY PRIVATE_KEY=$GCE_PRIVATE_KEY
GS_ACCESS_KEY_ID=$GS_KEY
GS_SECRET_ACCESS_KEY=$GS_SECRET
ANSIBLE_KEEP_REMOTE_FILES=1 ANSIBLE_KEEP_REMOTE_FILES=1
CLUSTER_MODE=default
matrix: matrix:
# Debian Jessie # Debian Jessie
- >- - >-
KUBE_NETWORK_PLUGIN=flannel KUBE_NETWORK_PLUGIN=flannel
CLOUD_IMAGE=debian-8-kubespray CLOUD_IMAGE=debian-8-kubespray
CLOUD_REGION=europe-west1-b CLOUD_REGION=europe-west1-b
CLUSTER_MODE=default
- >- - >-
KUBE_NETWORK_PLUGIN=calico KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=debian-8-kubespray CLOUD_IMAGE=debian-8-kubespray
CLOUD_REGION=us-central1-c CLOUD_REGION=us-central1-c
CLUSTER_MODE=default
- >- - >-
KUBE_NETWORK_PLUGIN=weave KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=debian-8-kubespray CLOUD_IMAGE=debian-8-kubespray
CLOUD_REGION=us-east1-d CLOUD_REGION=us-east1-d
CLUSTER_MODE=default
# Centos 7 # Centos 7
- >- - >-
KUBE_NETWORK_PLUGIN=flannel KUBE_NETWORK_PLUGIN=flannel
CLOUD_IMAGE=centos-7-sudo CLOUD_IMAGE=centos-7-sudo
CLOUD_REGION=asia-east1-c CLOUD_REGION=asia-east1-c
CLUSTER_MODE=default
- >- - >-
KUBE_NETWORK_PLUGIN=calico KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=centos-7-sudo CLOUD_IMAGE=centos-7-sudo
CLOUD_REGION=europe-west1-b CLOUD_REGION=europe-west1-b
CLUSTER_MODE=default
- >- - >-
KUBE_NETWORK_PLUGIN=weave KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=centos-7-sudo CLOUD_IMAGE=centos-7-sudo
CLOUD_REGION=us-central1-c CLOUD_REGION=us-central1-c
CLUSTER_MODE=default
# Redhat 7 # Redhat 7
- >- - >-
KUBE_NETWORK_PLUGIN=flannel KUBE_NETWORK_PLUGIN=flannel
CLOUD_IMAGE=rhel-7-sudo CLOUD_IMAGE=rhel-7-sudo
CLOUD_REGION=us-east1-d CLOUD_REGION=us-east1-d
CLUSTER_MODE=default
- >- - >-
KUBE_NETWORK_PLUGIN=calico KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=rhel-7-sudo CLOUD_IMAGE=rhel-7-sudo
CLOUD_REGION=asia-east1-c CLOUD_REGION=asia-east1-c
CLUSTER_MODE=default
- >- - >-
KUBE_NETWORK_PLUGIN=weave KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=rhel-7-sudo CLOUD_IMAGE=rhel-7-sudo
CLOUD_REGION=europe-west1-b CLOUD_REGION=europe-west1-b
CLUSTER_MODE=default
# Ubuntu 16.04 # Ubuntu 16.04
- >- - >-
KUBE_NETWORK_PLUGIN=flannel KUBE_NETWORK_PLUGIN=flannel
CLOUD_IMAGE=ubuntu-1604-xenial CLOUD_IMAGE=ubuntu-1604-xenial
CLOUD_REGION=us-central1-c CLOUD_REGION=us-central1-c
CLUSTER_MODE=default
- >- - >-
KUBE_NETWORK_PLUGIN=calico KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=ubuntu-1604-xenial CLOUD_IMAGE=ubuntu-1604-xenial
CLOUD_REGION=us-east1-d CLOUD_REGION=us-east1-d
CLUSTER_MODE=default
- >- - >-
KUBE_NETWORK_PLUGIN=weave KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=ubuntu-1604-xenial CLOUD_IMAGE=ubuntu-1604-xenial
CLOUD_REGION=asia-east1-c CLOUD_REGION=asia-east1-c
CLUSTER_MODE=default
# Ubuntu 15.10 # Extra cases for separated roles
- >- - >-
KUBE_NETWORK_PLUGIN=flannel KUBE_NETWORK_PLUGIN=flannel
CLOUD_IMAGE=ubuntu-1510-wily CLOUD_IMAGE=rhel-7-sudo
CLOUD_REGION=europe-west1-b CLOUD_REGION=europe-west1-b
CLUSTER_MODE=separate
- >- - >-
KUBE_NETWORK_PLUGIN=calico KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=ubuntu-1510-wily CLOUD_IMAGE=ubuntu-1604-xenial
CLOUD_REGION=us-central1-a CLOUD_REGION=us-central1-a
CLUSTER_MODE=separate
- >- - >-
KUBE_NETWORK_PLUGIN=weave KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=ubuntu-1510-wily CLOUD_IMAGE=debian-8-kubespray
CLOUD_REGION=us-east1-d CLOUD_REGION=us-east1-d
CLUSTER_MODE=separate
before_install: before_install:
@ -92,7 +106,8 @@ before_install:
- pip install --user boto -U - pip install --user boto -U
- pip install --user ansible - pip install --user ansible
- pip install --user netaddr - pip install --user netaddr
- pip install --user apache-libcloud # W/A https://github.com/ansible/ansible-modules-core/issues/5196#issuecomment-253766186
- pip install --user apache-libcloud==0.20.1
cache: cache:
- directories: - directories:
@ -109,12 +124,11 @@ before_script:
- $HOME/.local/bin/ansible-playbook --version - $HOME/.local/bin/ansible-playbook --version
- cp tests/ansible.cfg . - cp tests/ansible.cfg .
# - "echo $HOME/.local/bin/ansible-playbook -i inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml" # - "echo $HOME/.local/bin/ansible-playbook -i inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml"
## Configure ansible deployment logs to be collected as an artifact. Enable when GCS configured, see https://docs.travis-ci.com/user/deployment/gcs
# - $HOME/.local/bin/ansible-playbook -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root scritps/configure-logs.yaml
script: script:
- > - >
$HOME/.local/bin/ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts -c local $LOG_LEVEL $HOME/.local/bin/ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local $LOG_LEVEL
-e mode=${CLUSTER_MODE}
-e test_id=${TEST_ID} -e test_id=${TEST_ID}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN} -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e gce_project_id=${GCE_PROJECT_ID} -e gce_project_id=${GCE_PROJECT_ID}
@ -133,8 +147,15 @@ script:
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/020_check-create-pod.yml $LOG_LEVEL - $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/020_check-create-pod.yml $LOG_LEVEL
## Ping the between 2 pod ## Ping the between 2 pod
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL - $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
## Collect env info, enable it once GCS configured, see https://docs.travis-ci.com/user/deployment/gcs
# - $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root scritps/collect-info.yaml after_failure:
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root scripts/collect-info.yaml >/dev/null
- >
$HOME/.local/bin/ansible-playbook tests/cloud_playbooks/upload-logs-gcs.yml -i "localhost," -c local
-e test_id=${TEST_ID}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e gs_key=${GS_ACCESS_KEY_ID}
-e gs_skey=${GS_SECRET_ACCESS_KEY}
after_script: after_script:
- > - >

3
OWNERS
View file

@ -4,3 +4,6 @@
owners: owners:
- Smana - Smana
- ant31 - ant31
- bogdando
- mattymo
- rsmitty

View file

@ -13,7 +13,7 @@ If you have questions, you can [invite yourself](https://slack.kubespray.io/) to
To deploy the cluster you can use : To deploy the cluster you can use :
[**kargo-cli**](https://github.com/kubespray/kargo-cli) <br> [**kargo-cli**](https://github.com/kubespray/kargo-cli) (deprecated, a newer [go](https://github.com/Smana/kargo-cli/tree/kargogo) version soon)<br>
**Ansible** usual commands <br> **Ansible** usual commands <br>
**vagrant** by simply running `vagrant up` (for tests purposes) <br> **vagrant** by simply running `vagrant up` (for tests purposes) <br>
@ -41,10 +41,10 @@ Supported Linux distributions
Versions Versions
-------------- --------------
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.3.0 <br> [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.4.3 <br>
[etcd](https://github.com/coreos/etcd/releases) v3.0.1 <br> [etcd](https://github.com/coreos/etcd/releases) v3.0.1 <br>
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.20.0 <br> [flanneld](https://github.com/coreos/flannel/releases) v0.6.2 <br>
[flanneld](https://github.com/coreos/flannel/releases) v0.5.5 <br> [calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.22.0 <br>
[weave](http://weave.works/) v1.6.1 <br> [weave](http://weave.works/) v1.6.1 <br>
[docker](https://www.docker.com/) v1.10.3 <br> [docker](https://www.docker.com/) v1.10.3 <br>

15
Vagrantfile vendored
View file

@ -16,7 +16,7 @@ $vm_cpus = 1
$shared_folders = {} $shared_folders = {}
$forwarded_ports = {} $forwarded_ports = {}
$subnet = "172.17.8" $subnet = "172.17.8"
$box = "bento/ubuntu-14.04" $box = "bento/ubuntu-16.04"
host_vars = {} host_vars = {}
@ -38,6 +38,13 @@ if ! File.exist?(File.join(File.dirname($inventory), "hosts"))
end end
end end
if Vagrant.has_plugin?("vagrant-proxyconf")
$no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
(1..$num_instances).each do |i|
$no_proxy += ",#{$subnet}.#{i+100}"
end
end
Vagrant.configure("2") do |config| Vagrant.configure("2") do |config|
# always use Vagrants insecure key # always use Vagrants insecure key
config.ssh.insert_key = false config.ssh.insert_key = false
@ -52,6 +59,12 @@ Vagrant.configure("2") do |config|
config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config| config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config|
config.vm.hostname = vm_name config.vm.hostname = vm_name
if Vagrant.has_plugin?("vagrant-proxyconf")
config.proxy.http = ENV['HTTP_PROXY'] || ENV['http_proxy'] || ""
config.proxy.https = ENV['HTTPS_PROXY'] || ENV['https_proxy'] || ""
config.proxy.no_proxy = $no_proxy
end
if $expose_docker_tcp if $expose_docker_tcp
config.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true config.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true
end end

View file

@ -2,3 +2,6 @@
pipelining=True pipelining=True
[defaults] [defaults]
host_key_checking=False host_key_checking=False
gathering = smart
fact_caching = jsonfile
fact_caching_connection = /tmp

View file

@ -10,21 +10,22 @@
- hosts: all - hosts: all
gather_facts: true gather_facts: true
- hosts: etcd:!k8s-cluster - hosts: all
roles: roles:
- { role: kubernetes/preinstall, tags: preinstall } - { role: kubernetes/preinstall, tags: preinstall }
- hosts: etcd:!k8s-cluster
roles:
- { role: etcd, tags: etcd } - { role: etcd, tags: etcd }
- hosts: k8s-cluster - hosts: k8s-cluster
roles: roles:
- { role: kubernetes/preinstall, tags: preinstall }
- { role: etcd, tags: etcd } - { role: etcd, tags: etcd }
- { role: kubernetes/node, tags: node } - { role: kubernetes/node, tags: node }
- { role: network_plugin, tags: network } - { role: network_plugin, tags: network }
- hosts: kube-master - hosts: kube-master
roles: roles:
- { role: kubernetes/preinstall, tags: preinstall }
- { role: kubernetes/master, tags: master } - { role: kubernetes/master, tags: master }
- hosts: k8s-cluster - hosts: k8s-cluster

Binary file not shown.

After

Width:  |  Height:  |  Size: 57 KiB

View file

@ -33,15 +33,29 @@ Kube-apiserver
-------------- --------------
K8s components require a loadbalancer to access the apiservers via a reverse K8s components require a loadbalancer to access the apiservers via a reverse
proxy. A kube-proxy does not support multiple apiservers for the time being so proxy. Kargo includes support for an nginx-based proxy that resides on each
non-master Kubernetes node. This is referred to as localhost loadbalancing. It
is less efficient than a dedicated load balancer because it creates extra
health checks on the Kubernetes apiserver, but is more practical for scenarios
where an external LB or virtual IP management is inconvenient.
This option is configured by the variable `loadbalancer_apiserver_localhost`.
you will need to configure your own loadbalancer to achieve HA. Note that you will need to configure your own loadbalancer to achieve HA. Note that
deploying a loadbalancer is up to a user and is not covered by ansible roles deploying a loadbalancer is up to a user and is not covered by ansible roles
in Kargo. By default, it only configures a non-HA endpoint, which points to in Kargo. By default, it only configures a non-HA endpoint, which points to
the `access_ip` or IP address of the first server node in the `kube-master` the `access_ip` or IP address of the first server node in the `kube-master`
group. It can also configure clients to use endpoints for a given loadbalancer group. It can also configure clients to use endpoints for a given loadbalancer
type. type. The following diagram shows how traffic to the apiserver is directed.
A loadbalancer (LB) may be an external or internal one. An external LB ![Image](figures/loadbalancer_localhost.png?raw=true)
Note: Kubernetes master nodes still use insecure localhost access because
there are bugs in Kubernetes <1.5.0 in using TLS auth on master role
services. This makes backends receiving unencrypted traffic and may be a
security issue when interconnecting different nodes, or maybe not, if those
belong to the isolated management network without external access.
A user may opt to use an external loadbalancer (LB) instead. An external LB
provides access for external clients, while the internal LB accepts client provides access for external clients, while the internal LB accepts client
connections only to the localhost, similarly to the etcd-proxy HA endpoints. connections only to the localhost, similarly to the etcd-proxy HA endpoints.
Given a frontend `VIP` address and `IP1, IP2` addresses of backends, here is Given a frontend `VIP` address and `IP1, IP2` addresses of backends, here is
@ -69,47 +83,18 @@ loadbalancer_apiserver:
This domain name, or default "lb-apiserver.kubernetes.local", will be inserted This domain name, or default "lb-apiserver.kubernetes.local", will be inserted
into the `/etc/hosts` file of all servers in the `k8s-cluster` group. Note that into the `/etc/hosts` file of all servers in the `k8s-cluster` group. Note that
the HAProxy service should as well be HA and requires a VIP management, which the HAProxy service should as well be HA and requires a VIP management, which
is out of scope of this doc. is out of scope of this doc. Specifying an external LB overrides any internal
localhost LB configuration.
The internal LB may be the case if you do not want to operate a VIP management Note: In order to achieve HA for HAProxy instances, those must be running on
HA stack and require no external and no secure access to the K8s API. The group the each node in the `k8s-cluster` group as well, but require no VIP, thus
var `loadbalancer_apiserver_localhost` (defaults to `false`) controls that no VIP management.
deployment layout. When enabled, it is expected each node in the `k8s-cluster`
group to run a loadbalancer that listens the localhost frontend and has all
of the apiservers as backends. Here is an example configuration for a HAProxy
service acting as an internal LB:
```
listen kubernetes-apiserver-http
bind localhost:8080
mode tcp
timeout client 3h
timeout server 3h
server master1 <IP1>:8080
server master2 <IP2>:8080
balance leastconn
```
And the corresponding example global vars config:
```
loadbalancer_apiserver_localhost: true
```
This var overrides an external LB configuration, if any. Note that for this
example, the `kubernetes-apiserver-http` endpoint has backends receiving
unencrypted traffic, which may be a security issue when interconnecting
different nodes, or may be not, if those belong to the isolated management
network without external access.
In order to achieve HA for HAProxy instances, those must be running on the
each node in the `k8s-cluster` group as well, but require no VIP, thus
no VIP management.
Access endpoints are evaluated automagically, as the following: Access endpoints are evaluated automagically, as the following:
| Endpoint type | kube-master | non-master | | Endpoint type | kube-master | non-master |
|------------------------------|---------------|---------------------| |------------------------------|---------------|---------------------|
| Local LB (overrides ext) | http://lc:p | http://lc:p | | Local LB | http://lc:p | https://lc:sp |
| External LB, no internal | https://lb:lp | https://lb:lp | | External LB, no internal | https://lb:lp | https://lb:lp |
| No ext/int LB (default) | http://lc:p | https://m[0].aip:sp | | No ext/int LB (default) | http://lc:p | https://m[0].aip:sp |

View file

@ -1,6 +1,10 @@
Kargo's roadmap Kargo's roadmap
================= =================
### Kubeadm
- Propose kubeadm as an option in order to setup the kubernetes cluster.
That would probably improve deployment speed and certs management [#553](https://github.com/kubespray/kargo/issues/553)
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kargo/issues/320) ### Self deployment (pull-mode) [#320](https://github.com/kubespray/kargo/issues/320)
- the playbook would install and configure docker/rkt and the etcd cluster - the playbook would install and configure docker/rkt and the etcd cluster
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars. - the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
@ -33,6 +37,7 @@ Kargo's roadmap
- test scale up cluster: +1 etcd, +1 master, +1 node - test scale up cluster: +1 etcd, +1 master, +1 node
### Lifecycle ### Lifecycle
- Adopt the kubeadm tool by delegating CM tasks it is capable to accomplish well [#553](https://github.com/kubespray/kargo/issues/553)
- Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kargo/issues/154) - Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kargo/issues/154)
- Drain worker node when shutting down/deleting an instance - Drain worker node when shutting down/deleting an instance

View file

@ -64,8 +64,9 @@ ndots: 5
# This may be the case if clients support and loadbalance multiple etcd servers natively. # This may be the case if clients support and loadbalance multiple etcd servers natively.
etcd_multiaccess: false etcd_multiaccess: false
# Assume there are no internal loadbalancers for apiservers exist # Assume there are no internal loadbalancers for apiservers exist and listen on
loadbalancer_apiserver_localhost: false # kube_apiserver_port (default 443)
loadbalancer_apiserver_localhost: true
# Choose network plugin (calico, weave or flannel) # Choose network plugin (calico, weave or flannel)
kube_network_plugin: flannel kube_network_plugin: flannel
@ -108,9 +109,9 @@ kube_apiserver_insecure_port: 8080 # (http)
# Do not install additional dnsmasq # Do not install additional dnsmasq
skip_dnsmasq: false skip_dnsmasq: false
# Upstream dns servers used by dnsmasq # Upstream dns servers used by dnsmasq
upstream_dns_servers: #upstream_dns_servers:
- 8.8.8.8 # - 8.8.8.8
- 8.8.4.4 # - 8.8.4.4
# #
# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md # # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
dns_setup: true dns_setup: true

View file

@ -10,3 +10,16 @@
# Max of 2 is allowed here (a 1 is reserved for the dns_server) # Max of 2 is allowed here (a 1 is reserved for the dns_server)
#nameservers: #nameservers:
# - 127.0.0.1 # - 127.0.0.1
# Versions
dnsmasq_version: 2.72
# Images
dnsmasq_image_repo: "andyshinn/dnsmasq"
dnsmasq_image_tag: "{{ dnsmasq_version }}"
# Skip dnsmasq setup
skip_dnsmasq: false
# Skip setting up dnsmasq daemonset
skip_dnsmasq_k8s: "{{ skip_dnsmasq }}"

View file

@ -1,5 +1,5 @@
--- ---
- include: dnsmasq.yml - include: dnsmasq.yml
when: "{{ not skip_dnsmasq|bool }}" when: "{{ not skip_dnsmasq_k8s|bool }}"
- include: resolvconf.yml - include: resolvconf.yml

View file

@ -13,6 +13,8 @@ server=/{{ dns_domain }}/{{ skydns_server }}
{% for srv in upstream_dns_servers %} {% for srv in upstream_dns_servers %}
server={{ srv }} server={{ srv }}
{% endfor %} {% endfor %}
{% elif cloud_provider is defined and cloud_provider == "gce" %}
server=169.254.169.254
{% else %} {% else %}
server=8.8.8.8 server=8.8.8.8
server=8.8.4.4 server=8.8.4.4

View file

@ -14,7 +14,7 @@ spec:
spec: spec:
containers: containers:
- name: dnsmasq - name: dnsmasq
image: andyshinn/dnsmasq:2.72 image: "{{ dnsmasq_image_repo }}:{{ dnsmasq_image_tag }}"
command: command:
- dnsmasq - dnsmasq
args: args:

View file

@ -3,6 +3,7 @@
command: /bin/true command: /bin/true
notify: notify:
- Docker | reload systemd - Docker | reload systemd
- Docker | reload docker.socket
- Docker | reload docker - Docker | reload docker
- Docker | pause while Docker restarts - Docker | pause while Docker restarts
- Docker | wait for docker - Docker | wait for docker
@ -16,6 +17,12 @@
name: docker name: docker
state: restarted state: restarted
- name: Docker | reload docker.socket
service:
name: docker.socket
state: restarted
when: ansible_os_family == 'CoreOS'
- name: Docker | pause while Docker restarts - name: Docker | pause while Docker restarts
pause: seconds=10 prompt="Waiting for docker restart" pause: seconds=10 prompt="Waiting for docker restart"

View file

@ -5,16 +5,17 @@ local_release_dir: /tmp
download_run_once: False download_run_once: False
# Versions # Versions
include_vars: kube_versions.yml kube_version: v1.4.3
etcd_version: v3.0.6 etcd_version: v3.0.6
#TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults #TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
# after migration to container download # after migration to container download
calico_version: v0.20.0 calico_version: v0.22.0
calico_cni_version: v1.3.1 calico_cni_version: v1.4.2
weave_version: v1.6.1 weave_version: v1.6.1
flannel_version: 0.5.5 flannel_version: v0.6.2
flannel_server_helper_version: 0.1 flannel_server_helper_version: 0.1
pod_infra_version: 3.0
# Download URL's # Download URL's
etcd_download_url: "https://storage.googleapis.com/kargo/{{etcd_version}}_etcd" etcd_download_url: "https://storage.googleapis.com/kargo/{{etcd_version}}_etcd"
@ -23,8 +24,8 @@ calico_cni_ipam_download_url: "https://storage.googleapis.com/kargo/{{calico_cni
weave_download_url: "https://storage.googleapis.com/kargo/{{weave_version}}_weave" weave_download_url: "https://storage.googleapis.com/kargo/{{weave_version}}_weave"
# Checksums # Checksums
calico_cni_checksum: "ac05cb9254b5aaa5822cf10325983431bd25489147f2edf9dec7e43d99c43e77" calico_cni_checksum: "9cab29764681e9d80da826e4b2cd10841cc01a749e0018867d96dd76a4691548"
calico_cni_ipam_checksum: "3df6951a30749c279229e7e318e74ac4e41263996125be65257db7cd25097273" calico_cni_ipam_checksum: "09d076b15b791956efee91646e47fdfdcf382db16082cef4f542a9fff7bae172"
weave_checksum: "9bf9d6e5a839e7bcbb28cc00c7acae9d09284faa3e7a3720ca9c2b9e93c68580" weave_checksum: "9bf9d6e5a839e7bcbb28cc00c7acae9d09284faa3e7a3720ca9c2b9e93c68580"
etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485" etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485"
@ -43,6 +44,8 @@ calico_node_image_repo: "calico/node"
calico_node_image_tag: "{{ calico_version }}" calico_node_image_tag: "{{ calico_version }}"
hyperkube_image_repo: "quay.io/coreos/hyperkube" hyperkube_image_repo: "quay.io/coreos/hyperkube"
hyperkube_image_tag: "{{ kube_version }}_coreos.0" hyperkube_image_tag: "{{ kube_version }}_coreos.0"
pod_infra_image_repo: "gcr.io/google_containers/pause-amd64"
pod_infra_image_tag: "{{ pod_infra_version }}"
downloads: downloads:
calico_cni_plugin: calico_cni_plugin:
@ -108,6 +111,10 @@ downloads:
repo: "{{ calico_node_image_repo }}" repo: "{{ calico_node_image_repo }}"
tag: "{{ calico_node_image_tag }}" tag: "{{ calico_node_image_tag }}"
enabled: "{{ kube_network_plugin == 'calico' }}" enabled: "{{ kube_network_plugin == 'calico' }}"
pod_infra:
container: true
repo: "{{ pod_infra_image_repo }}"
tag: "{{ pod_infra_image_tag }}"
download: download:
container: "{{ file.container|default('false') }}" container: "{{ file.container|default('false') }}"

View file

@ -1,6 +1,4 @@
--- ---
- include_vars: kube_versions.yml
- name: downloading... - name: downloading...
debug: debug:
msg: "{{ download.url }}" msg: "{{ download.url }}"
@ -63,11 +61,22 @@
- set_fact: - set_fact:
fname: "{{local_release_dir}}/containers/{{download.repo|regex_replace('/|\0|:', '_')}}:{{download.tag|regex_replace('/|\0|:', '_')}}.tar" fname: "{{local_release_dir}}/containers/{{download.repo|regex_replace('/|\0|:', '_')}}:{{download.tag|regex_replace('/|\0|:', '_')}}.tar"
- name: "Set default value for 'container_changed' to false"
set_fact:
container_changed: false
- name: "Update the 'container_changed' fact"
set_fact:
container_changed: "{{ not 'up to date' in pull_task_result.stdout }}"
when: "{{ download.enabled|bool and download.container|bool }}"
delegate_to: "{{ groups['kube-master'][0] if download_run_once|bool else inventory_hostname }}"
run_once: "{{ download_run_once|bool }}"
- name: Download | save container images - name: Download | save container images
shell: docker save "{{ download.repo }}:{{ download.tag }}" > "{{ fname }}" shell: docker save "{{ download.repo }}:{{ download.tag }}" > "{{ fname }}"
delegate_to: "{{groups['kube-master'][0]}}" delegate_to: "{{groups['kube-master'][0]}}"
run_once: true run_once: true
when: ansible_os_family != "CoreOS" and download_run_once|bool and download.enabled|bool and download.container|bool when: ansible_os_family != "CoreOS" and download_run_once|bool and download.enabled|bool and download.container|bool and container_changed|bool
- name: Download | get container images - name: Download | get container images
synchronize: synchronize:
@ -78,8 +87,8 @@
until: get_task|success until: get_task|success
retries: 4 retries: 4
delay: "{{ retry_stagger | random + 3 }}" delay: "{{ retry_stagger | random + 3 }}"
when: ansible_os_family != "CoreOS" and inventory_hostname != groups['kube-master'][0] and download_run_once|bool and download.enabled|bool and download.container|bool when: ansible_os_family != "CoreOS" and inventory_hostname != groups['kube-master'][0] and download_run_once|bool and download.enabled|bool and download.container|bool and container_changed|bool
- name: Download | load container images - name: Download | load container images
shell: docker load < "{{ fname }}" shell: docker load < "{{ fname }}"
when: ansible_os_family != "CoreOS" and inventory_hostname != groups['kube-master'][0] and download_run_once|bool and download.enabled|bool and download.container|bool when: ansible_os_family != "CoreOS" and inventory_hostname != groups['kube-master'][0] and download_run_once|bool and download.enabled|bool and download.container|bool and container_changed|bool

View file

@ -1 +0,0 @@
kube_version: v1.3.0

View file

@ -1,6 +1,6 @@
--- ---
- name: Configure | Check if member is in cluster - name: Configure | Check if member is in cluster
shell: "etcdctl --no-sync --peers={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}" shell: "{{ bin_dir }}/etcdctl --no-sync --peers={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}"
register: etcd_member_in_cluster register: etcd_member_in_cluster
ignore_errors: true ignore_errors: true
changed_when: false changed_when: false
@ -8,7 +8,7 @@
- name: Configure | Add member to the cluster if it is not there - name: Configure | Add member to the cluster if it is not there
when: is_etcd_master and etcd_member_in_cluster.rc != 0 and etcd_cluster_is_healthy.rc == 0 when: is_etcd_master and etcd_member_in_cluster.rc != 0 and etcd_cluster_is_healthy.rc == 0
shell: "etcdctl --peers={{ etcd_access_addresses }} member add {{ etcd_member_name }} {{ etcd_peer_url }}" shell: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} member add {{ etcd_member_name }} {{ etcd_peer_url }}"
- name: Configure | Copy etcd.service systemd file - name: Configure | Copy etcd.service systemd file
template: template:

View file

@ -1,6 +1,6 @@
--- ---
- name: Configure | Check if cluster is healthy - name: Configure | Check if cluster is healthy
shell: "etcdctl --peers={{ etcd_access_addresses }} cluster-health | grep -q 'cluster is healthy'" shell: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
register: etcd_cluster_is_healthy register: etcd_cluster_is_healthy
ignore_errors: true ignore_errors: true
changed_when: false changed_when: false

View file

@ -2,4 +2,4 @@ ETCD_DATA_DIR=/var/lib/etcd-proxy
ETCD_PROXY=on ETCD_PROXY=on
ETCD_LISTEN_CLIENT_URLS={{ etcd_access_endpoint }} ETCD_LISTEN_CLIENT_URLS={{ etcd_access_endpoint }}
ETCD_NAME={{ etcd_proxy_member_name | default("etcd-proxy") }} ETCD_NAME={{ etcd_proxy_member_name | default("etcd-proxy") }}
ETCD_INITIAL_CLUSTER={% for host in groups['etcd'] %}etcd{{ loop.index|string }}={{ hostvars[host]['etcd_peer_url'] }}{% if not loop.last %},{% endif %}{% endfor %} ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }}

View file

@ -13,4 +13,4 @@ ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd
ETCD_LISTEN_PEER_URLS=http://{{ etcd_address }}:2380 ETCD_LISTEN_PEER_URLS=http://{{ etcd_address }}:2380
ETCD_NAME={{ etcd_member_name }} ETCD_NAME={{ etcd_member_name }}
ETCD_PROXY=off ETCD_PROXY=off
ETCD_INITIAL_CLUSTER={% for host in groups['etcd'] %}etcd{{ loop.index|string }}={{ hostvars[host]['etcd_peer_url'] }}{% if not loop.last %},{% endif %}{% endfor %} ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }}

View file

@ -0,0 +1,12 @@
# Versions
kubedns_version: 1.7
kubednsmasq_version: 1.3
exechealthz_version: 1.1
# Images
kubedns_image_repo: "gcr.io/google_containers/kubedns-amd64"
kubedns_image_tag: "{{ kubedns_version }}"
kubednsmasq_image_repo: "gcr.io/google_containers/kube-dnsmasq-amd64"
kubednsmasq_image_tag: "{{ kubednsmasq_version }}"
exechealthz_image_repo: "gcr.io/google_containers/exechealthz-amd64"
exechealthz_image_tag: "{{ exechealthz_version }}"

View file

@ -0,0 +1,10 @@
- name: Write calico-policy-controller yaml
template: src=calico-policy-controller.yml.j2 dest=/etc/kubernetes/calico-policy-controller.yml
when: inventory_hostname == groups['kube-master'][0]
- name: Start of Calico policy controller
kube:
kubectl: "{{bin_dir}}/kubectl"
filename: /etc/kubernetes/calico-policy-controller.yml
when: inventory_hostname == groups['kube-master'][0]

View file

@ -17,3 +17,7 @@
state: "{{item.changed | ternary('latest','present') }}" state: "{{item.changed | ternary('latest','present') }}"
with_items: "{{ manifests.results }}" with_items: "{{ manifests.results }}"
when: inventory_hostname == groups['kube-master'][0] when: inventory_hostname == groups['kube-master'][0]
- include: tasks/calico-policy-controller.yml
when: enable_network_policy is defined and enable_network_policy == True

View file

@ -0,0 +1,40 @@
apiVersion: extensions/v1beta1
kind: ReplicaSet
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
matchLabels:
kubernetes.io/cluster-service: "true"
k8s-app: calico-policy
template:
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
k8s-app: calico-policy
spec:
hostNetwork: true
containers:
- name: calico-policy-controller
image: calico/kube-policy-controller:latest
env:
- name: ETCD_ENDPOINTS
value: "{{ etcd_endpoint }}"
# Location of the Kubernetes API - this shouldn't need to be
# changed so long as it is used in conjunction with
# CONFIGURE_ETC_HOSTS="true".
- name: K8S_API
value: "https://kubernetes.default:443"
# Configure /etc/hosts within the container to resolve
# the kubernetes.default Service to the correct clusterIP
# using the environment provided by the kubelet.
# This removes the need for KubeDNS to resolve the Service.
- name: CONFIGURE_ETC_HOSTS
value: "true"

View file

@ -21,7 +21,7 @@ spec:
spec: spec:
containers: containers:
- name: kubedns - name: kubedns
image: gcr.io/google_containers/kubedns-amd64:1.7 image: "{{ kubedns_image_repo }}:{{ kubedns_image_tag }}"
resources: resources:
# TODO: Set memory limits when we've profiled the container for large # TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in # clusters, then set request = limit to keep this container in
@ -63,7 +63,7 @@ spec:
name: dns-tcp-local name: dns-tcp-local
protocol: TCP protocol: TCP
- name: dnsmasq - name: dnsmasq
image: gcr.io/google_containers/kube-dnsmasq-amd64:1.3 image: "{{ kubednsmasq_image_repo }}:{{ kubednsmasq_image_tag }}"
args: args:
- --log-facility=- - --log-facility=-
- --cache-size=1000 - --cache-size=1000
@ -77,7 +77,7 @@ spec:
name: dns-tcp name: dns-tcp
protocol: TCP protocol: TCP
- name: healthz - name: healthz
image: gcr.io/google_containers/exechealthz-amd64:1.1 image: "{{ exechealthz_image_repo }}:{{ exechealthz_image_tag }}"
resources: resources:
# keep request = limit to keep this container in guaranteed class # keep request = limit to keep this container in guaranteed class
limits: limits:

View file

@ -10,3 +10,21 @@ kube_users_dir: "{{ kube_config_dir }}/users"
# An experimental dev/test only dynamic volumes provisioner, # An experimental dev/test only dynamic volumes provisioner,
# for PetSets. Works for kube>=v1.3 only. # for PetSets. Works for kube>=v1.3 only.
kube_hostpath_dynamic_provisioner: "false" kube_hostpath_dynamic_provisioner: "false"
# This is where you can drop yaml/json files and the kubelet will run those
# pods on startup
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
# This directory is where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernets.
# This puts them in a sane location.
# Editting this value will almost surely break something. Don't
# change it. Things like the systemd scripts are hard coded to
# look in here. Don't do it.
kube_config_dir: /etc/kubernetes
# change to 0.0.0.0 to enable insecure access from anywhere (not recommended)
kube_apiserver_insecure_bind_address: 127.0.0.1
# Logging directory (sysvinit systems)
kube_log_dir: "/var/log/kubernetes"

View file

@ -16,7 +16,7 @@ spec:
- --etcd-quorum-read=true - --etcd-quorum-read=true
- --insecure-bind-address={{ kube_apiserver_insecure_bind_address }} - --insecure-bind-address={{ kube_apiserver_insecure_bind_address }}
- --apiserver-count={{ kube_apiserver_count }} - --apiserver-count={{ kube_apiserver_count }}
- --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,ServiceAccount,ResourceQuota - --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota
- --service-cluster-ip-range={{ kube_service_addresses }} - --service-cluster-ip-range={{ kube_service_addresses }}
- --client-ca-file={{ kube_cert_dir }}/ca.pem - --client-ca-file={{ kube_cert_dir }}/ca.pem
- --basic-auth-file={{ kube_users_dir }}/known_users.csv - --basic-auth-file={{ kube_users_dir }}/known_users.csv
@ -30,6 +30,9 @@ spec:
{% for conf in kube_api_runtime_config %} {% for conf in kube_api_runtime_config %}
- --runtime-config={{ conf }} - --runtime-config={{ conf }}
{% endfor %} {% endfor %}
{% endif %}
{% if enable_network_policy is defined and enable_network_policy == True %}
- --runtime-config=extensions/v1beta1/networkpolicies=true
{% endif %} {% endif %}
- --v={{ kube_log_level | default('2') }} - --v={{ kube_log_level | default('2') }}
- --allow-privileged=true - --allow-privileged=true

View file

@ -1,6 +1,13 @@
# This is where all the cert scripts and certs will be located # This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl" kube_cert_dir: "{{ kube_config_dir }}/ssl"
# change to 0.0.0.0 to enable insecure access from anywhere (not recommended)
kube_apiserver_insecure_bind_address: 127.0.0.1
# This is where you can drop yaml/json files and the kubelet will run those
# pods on startup
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
dns_domain: "{{ cluster_name }}" dns_domain: "{{ cluster_name }}"
# resolv.conf to base dns config # resolv.conf to base dns config
@ -14,3 +21,17 @@ kube_proxy_masquerade_all: true
# kube_api_runtime_config: # kube_api_runtime_config:
# - extensions/v1beta1/daemonsets=true # - extensions/v1beta1/daemonsets=true
# - extensions/v1beta1/deployments=true # - extensions/v1beta1/deployments=true
# Logging directory (sysvinit systems)
kube_log_dir: "/var/log/kubernetes"
# This directory is where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernets.
# This puts them in a sane location.
# Editting this value will almost surely break something. Don't
# change it. Things like the systemd scripts are hard coded to
# look in here. Don't do it.
kube_config_dir: /etc/kubernetes
nginx_image_repo: nginx
nginx_image_tag: 1.11.4-alpine

View file

@ -2,4 +2,6 @@
dependencies: dependencies:
- role: download - role: download
file: "{{ downloads.hyperkube }}" file: "{{ downloads.hyperkube }}"
- role: download
file: "{{ downloads.pod_infra }}"
- role: kubernetes/secrets - role: kubernetes/secrets

View file

@ -1,6 +1,9 @@
--- ---
- include: install.yml - include: install.yml
- include: nginx-proxy.yml
when: is_kube_master == false and loadbalancer_apiserver_localhost|default(false)
- name: Write Calico cni config - name: Write Calico cni config
template: template:
src: "cni-calico.conf.j2" src: "cni-calico.conf.j2"

View file

@ -0,0 +1,9 @@
---
- name: nginx-proxy | Write static pod
template: src=manifests/nginx-proxy.manifest.j2 dest=/etc/kubernetes/manifests/nginx-proxy.yml
- name: nginx-proxy | Make nginx directory
file: path=/etc/nginx state=directory mode=0700 owner=root
- name: nginx-proxy | Write nginx-proxy configuration
template: src=nginx.conf.j2 dest="/etc/nginx/nginx.conf" owner=root mode=0755 backup=yes

View file

@ -1,9 +1,16 @@
{ {
"name": "calico-k8s-network", "name": "calico-k8s-network",
"type": "calico", "type": "calico",
"etcd_authority": "{{ etcd_authority }}",
"log_level": "info", "log_level": "info",
"ipam": { "ipam": {
"type": "calico-ipam" "type": "calico-ipam"
},
{% if enable_network_policy is defined and enable_network_policy == True %}
"policy": {
"type": "k8s"
},
{% endif %}
"kubernetes": {
"kubeconfig": "{{ kube_config_dir }}/node-kubeconfig.yaml"
} }
} }

View file

@ -20,11 +20,11 @@ KUBELET_REGISTER_NODE="--register-node=false"
{% endif %} {% endif %}
# location of the api-server # location of the api-server
{% if dns_setup|bool and skip_dnsmasq|bool %} {% if dns_setup|bool and skip_dnsmasq|bool %}
KUBELET_ARGS="--cluster_dns={{ skydns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --config={{ kube_manifest_dir }} --resolv-conf={{ kube_resolv_conf }}" KUBELET_ARGS="--cluster_dns={{ skydns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --config={{ kube_manifest_dir }} --resolv-conf={{ kube_resolv_conf }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}"
{% elif dns_setup|bool %} {% elif dns_setup|bool %}
KUBELET_ARGS="--cluster_dns={{ dns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --config={{ kube_manifest_dir }} --resolv-conf={{ kube_resolv_conf }}" KUBELET_ARGS="--cluster_dns={{ dns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --config={{ kube_manifest_dir }} --resolv-conf={{ kube_resolv_conf }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}"
{% else %} {% else %}
KUBELET_ARGS="--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }}" KUBELET_ARGS="--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}"
{% endif %} {% endif %}
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "weave"] %} {% if kube_network_plugin is defined and kube_network_plugin in ["calico", "weave"] %}
KUBELET_NETWORK_PLUGIN="--network-plugin=cni --network-plugin-dir=/etc/cni/net.d" KUBELET_NETWORK_PLUGIN="--network-plugin=cni --network-plugin-dir=/etc/cni/net.d"

View file

@ -17,6 +17,7 @@ spec:
- --kubeconfig=/etc/kubernetes/node-kubeconfig.yaml - --kubeconfig=/etc/kubernetes/node-kubeconfig.yaml
{% endif %} {% endif %}
- --bind-address={{ ip | default(ansible_default_ipv4.address) }} - --bind-address={{ ip | default(ansible_default_ipv4.address) }}
- --cluster-cidr={{ kube_pods_subnet }}
- --proxy-mode={{ kube_proxy_mode }} - --proxy-mode={{ kube_proxy_mode }}
{% if kube_proxy_masquerade_all and kube_proxy_mode == "iptables" %} {% if kube_proxy_masquerade_all and kube_proxy_mode == "iptables" %}
- --masquerade-all - --masquerade-all

View file

@ -0,0 +1,20 @@
apiVersion: v1
kind: Pod
metadata:
name: nginx-proxy
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: nginx-proxy
image: {{ nginx_image_repo }}:{{ nginx_image_tag }}
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/nginx
name: etc-nginx
readOnly: true
volumes:
- name: etc-nginx
hostPath:
path: /etc/nginx

View file

@ -0,0 +1,26 @@
error_log stderr notice;
worker_processes auto;
events {
multi_accept on;
use epoll;
worker_connections 1024;
}
stream {
upstream kube_apiserver {
least_conn;
{% for host in groups['kube-master'] -%}
server {{ hostvars[host]['access_ip'] | default(hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address'])) }}:{{ kube_apiserver_port }};
{% endfor %}
}
server {
listen {{ kube_apiserver_port }};
proxy_pass kube_apiserver;
proxy_timeout 3s;
proxy_connect_timeout 1s;
}
}

View file

@ -4,6 +4,7 @@ clusters:
- name: local - name: local
cluster: cluster:
certificate-authority: {{ kube_cert_dir }}/ca.pem certificate-authority: {{ kube_cert_dir }}/ca.pem
server: {{ kube_apiserver_endpoint }}
users: users:
- name: kubelet - name: kubelet
user: user:

View file

@ -21,6 +21,7 @@ kube_log_dir: "/var/log/kubernetes"
# pods on startup # pods on startup
kube_manifest_dir: "{{ kube_config_dir }}/manifests" kube_manifest_dir: "{{ kube_config_dir }}/manifests"
epel_rpm_download_url: "https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm"
common_required_pkgs: common_required_pkgs:
- python-httplib2 - python-httplib2

View file

@ -91,7 +91,7 @@
changed_when: False changed_when: False
- name: Install epel-release on RedHat/CentOS - name: Install epel-release on RedHat/CentOS
shell: rpm -qa | grep epel-release || rpm -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm shell: rpm -qa | grep epel-release || rpm -ivh {{ epel_rpm_download_url }}
when: ansible_distribution in ["CentOS","RedHat"] and when: ansible_distribution in ["CentOS","RedHat"] and
ansible_distribution_major_version >= 7 ansible_distribution_major_version >= 7
changed_when: False changed_when: False

View file

@ -5,12 +5,12 @@
- set_fact: is_kube_master="{{ inventory_hostname in groups['kube-master'] }}" - set_fact: is_kube_master="{{ inventory_hostname in groups['kube-master'] }}"
- set_fact: first_kube_master="{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}" - set_fact: first_kube_master="{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}"
- set_fact: - set_fact:
kube_apiserver_insecure_bind_address: |- loadbalancer_apiserver_localhost: false
{% if loadbalancer_apiserver_localhost %}{{ kube_apiserver_address }}{% else %}127.0.0.1{% endif %} when: loadbalancer_apiserver is defined
- set_fact: - set_fact:
kube_apiserver_endpoint: |- kube_apiserver_endpoint: |-
{% if loadbalancer_apiserver_localhost -%} {% if not is_kube_master and loadbalancer_apiserver_localhost -%}
http://127.0.0.1:{{ kube_apiserver_insecure_port }} https://localhost:{{ kube_apiserver_port }}
{%- elif is_kube_master and loadbalancer_apiserver is not defined -%} {%- elif is_kube_master and loadbalancer_apiserver is not defined -%}
http://127.0.0.1:{{ kube_apiserver_insecure_port }} http://127.0.0.1:{{ kube_apiserver_insecure_port }}
{%- else -%} {%- else -%}
@ -30,7 +30,7 @@
- set_fact: - set_fact:
etcd_access_addresses: |- etcd_access_addresses: |-
{% for item in groups['etcd'] -%} {% for item in groups['etcd'] -%}
http://{{ hostvars[item].etcd_access_address }}:2379{% if not loop.last %},{% endif %} http://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2379{% if not loop.last %},{% endif %}
{%- endfor %} {%- endfor %}
- set_fact: etcd_access_endpoint="{% if etcd_multiaccess %}{{ etcd_access_addresses }}{% else %}{{ etcd_endpoint }}{% endif %}" - set_fact: etcd_access_endpoint="{% if etcd_multiaccess %}{{ etcd_access_addresses }}{% else %}{{ etcd_endpoint }}{% endif %}"
- set_fact: - set_fact:
@ -38,6 +38,11 @@
{% for host in groups['etcd'] %} {% for host in groups['etcd'] %}
{% if inventory_hostname == host %}{{"etcd"+loop.index|string }}{% endif %} {% if inventory_hostname == host %}{{"etcd"+loop.index|string }}{% endif %}
{% endfor %} {% endfor %}
- set_fact:
etcd_peer_addresses: |-
{% for item in groups['etcd'] -%}
{{ "etcd"+loop.index|string }}=http://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2380{% if not loop.last %},{% endif %}
{%- endfor %}
- set_fact: - set_fact:
etcd_proxy_member_name: |- etcd_proxy_member_name: |-
{% for host in groups['k8s-cluster'] %} {% for host in groups['k8s-cluster'] %}

View file

@ -6,3 +6,16 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
# This is where to save basic auth file # This is where to save basic auth file
kube_users_dir: "{{ kube_config_dir }}/users" kube_users_dir: "{{ kube_config_dir }}/users"
# This directory is where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernets.
# This puts them in a sane location.
# Editting this value will almost surely break something. Don't
# change it. Things like the systemd scripts are hard coded to
# look in here. Don't do it.
kube_config_dir: /etc/kubernetes
# This directory is where all the additional scripts go
# that Kubernetes normally puts in /srv/kubernetes.
# This puts them in a sane location
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"

View file

@ -68,6 +68,7 @@ openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN
openssl genrsa -out apiserver-key.pem 2048 > /dev/null 2>&1 openssl genrsa -out apiserver-key.pem 2048 > /dev/null 2>&1
openssl req -new -key apiserver-key.pem -out apiserver.csr -subj "/CN=kube-apiserver" -config ${CONFIG} > /dev/null 2>&1 openssl req -new -key apiserver-key.pem -out apiserver.csr -subj "/CN=kube-apiserver" -config ${CONFIG} > /dev/null 2>&1
openssl x509 -req -in apiserver.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out apiserver.pem -days 365 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1 openssl x509 -req -in apiserver.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out apiserver.pem -days 365 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
cat ca.pem >> apiserver.pem
# Nodes and Admin # Nodes and Admin
for i in node admin; do for i in node admin; do

View file

@ -27,7 +27,7 @@
sync_tokens: true sync_tokens: true
when: >- when: >-
{%- set tokens = {'sync': False} -%} {%- set tokens = {'sync': False} -%}
{%- for server in groups['kube-master'] {%- for server in groups['kube-master'] | intersect(play_hosts)
if (not hostvars[server].known_tokens.stat.exists) or if (not hostvars[server].known_tokens.stat.exists) or
(hostvars[server].known_tokens.stat.checksum != known_tokens_master.stat.checksum|default('')) -%} (hostvars[server].known_tokens.stat.checksum != known_tokens_master.stat.checksum|default('')) -%}
{%- set _ = tokens.update({'sync': True}) -%} {%- set _ = tokens.update({'sync': True}) -%}

View file

@ -27,31 +27,30 @@
master_certs: ['ca-key.pem', 'admin.pem', 'admin-key.pem', 'apiserver-key.pem', 'apiserver.pem'] master_certs: ['ca-key.pem', 'admin.pem', 'admin-key.pem', 'apiserver-key.pem', 'apiserver.pem']
node_certs: ['ca.pem', 'node.pem', 'node-key.pem'] node_certs: ['ca.pem', 'node.pem', 'node-key.pem']
- name: Gen_certs | Get the certs from first master - name: Gen_certs | Gather master certs
slurp: shell: "tar cfz - -C {{ kube_cert_dir }} {{ master_certs|join(' ') }} {{ node_certs|join(' ') }} | base64 --wrap=0"
src: "{{ kube_cert_dir }}/{{ item }}" register: master_cert_data
delegate_to: "{{groups['kube-master'][0]}}" delegate_to: "{{groups['kube-master'][0]}}"
register: slurp_certs
with_items: '{{ master_certs + node_certs }}'
when: sync_certs|default(false)
run_once: true run_once: true
notify: set secret_changed when: sync_certs|default(false)
- name: Gen_certs | Gather node certs
shell: "tar cfz - -C {{ kube_cert_dir }} {{ node_certs|join(' ') }} | base64 --wrap=0"
register: node_cert_data
delegate_to: "{{groups['kube-master'][0]}}"
run_once: true
when: sync_certs|default(false)
- name: Gen_certs | Copy certs on masters - name: Gen_certs | Copy certs on masters
copy: shell: "echo '{{master_cert_data.stdout|quote}}' | base64 -d | tar xz -C {{ kube_cert_dir }}"
content: "{{ item.content|b64decode }}" changed_when: false
dest: "{{ item.source }}"
with_items: '{{slurp_certs.results}}'
when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and
inventory_hostname != groups['kube-master'][0] inventory_hostname != groups['kube-master'][0]
- name: Gen_certs | Copy certs on nodes - name: Gen_certs | Copy certs on nodes
copy: shell: "echo '{{node_cert_data.stdout|quote}}' | base64 -d | tar xz -C {{ kube_cert_dir }}"
content: "{{ item.content|b64decode }}" changed_when: false
dest: "{{ item.source }}" when: inventory_hostname in groups['kube-node'] and sync_certs|default(false) and
with_items: '{{slurp_certs.results}}'
when: item.item in node_certs and
inventory_hostname in groups['kube-node'] and sync_certs|default(false) and
inventory_hostname != groups['kube-master'][0] inventory_hostname != groups['kube-master'][0]
- name: Gen_certs | check certificate permissions - name: Gen_certs | check certificate permissions
@ -65,3 +64,30 @@
shell: chmod 0600 {{ kube_cert_dir}}/*key.pem shell: chmod 0600 {{ kube_cert_dir}}/*key.pem
when: inventory_hostname in groups['kube-master'] when: inventory_hostname in groups['kube-master']
changed_when: false changed_when: false
- name: Gen_certs | target ca-certificates directory
set_fact:
ca_cert_dir: |-
{% if ansible_os_family == "Debian" -%}
/usr/local/share/ca-certificates
{%- elif ansible_os_family == "RedHat" -%}
/etc/pki/ca-trust/source/anchors
{%- elif ansible_os_family == "CoreOS" -%}
/etc/ssl/certs
{%- endif %}
- name: Gen_certs | add CA to trusted CA dir
copy:
src: "{{ kube_cert_dir }}/ca.pem"
dest: "{{ ca_cert_dir }}/kube-ca.crt"
remote_src: true
register: kube_ca_cert
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/CoreOS)
command: update-ca-certificates
when: kube_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS"]
- name: Gen_certs | update ca-certificatesa (RedHat)
command: update-ca-trust extract
when: kube_ca_cert.changed and ansible_os_family == "RedHat"

View file

@ -43,20 +43,15 @@
delegate_to: "{{groups['kube-master'][0]}}" delegate_to: "{{groups['kube-master'][0]}}"
when: sync_tokens|default(false) when: sync_tokens|default(false)
- name: Gen_tokens | Get the tokens from first master - name: Gen_tokens | Gather tokens
slurp: shell: "tar cfz - {{ tokens_list.stdout_lines | join(' ') }} | base64 --wrap=0"
src: "{{ item }}" register: tokens_data
register: slurp_tokens
with_items: '{{tokens_list.stdout_lines}}'
run_once: true
delegate_to: "{{groups['kube-master'][0]}}" delegate_to: "{{groups['kube-master'][0]}}"
run_once: true
when: sync_tokens|default(false) when: sync_tokens|default(false)
notify: set secret_changed
- name: Gen_tokens | Copy tokens on masters - name: Gen_tokens | Copy tokens on masters
copy: shell: "echo '{{ tokens_data.stdout|quote }}' | base64 -d | tar xz -C /"
content: "{{ item.content|b64decode }}" changed_when: false
dest: "{{ item.source }}"
with_items: '{{slurp_tokens.results}}'
when: inventory_hostname in groups['kube-master'] and sync_tokens|default(false) and when: inventory_hostname in groups['kube-master'] and sync_tokens|default(false) and
inventory_hostname != groups['kube-master'][0] inventory_hostname != groups['kube-master'][0]

View file

@ -11,12 +11,18 @@ DNS.1 = kubernetes
DNS.2 = kubernetes.default DNS.2 = kubernetes.default
DNS.3 = kubernetes.default.svc DNS.3 = kubernetes.default.svc
DNS.4 = kubernetes.default.svc.{{ dns_domain }} DNS.4 = kubernetes.default.svc.{{ dns_domain }}
DNS.5 = localhost
{% for host in groups['kube-master'] %}
DNS.{{ 5 + loop.index }} = {{ host }}
{% endfor %}
{% if loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined %} {% if loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined %}
DNS.5 = {{ apiserver_loadbalancer_domain_name }} {% set idx = groups['kube-master'] | length | int + 5 %}
DNS.{{ idx | string }} = {{ apiserver_loadbalancer_domain_name }}
{% endif %} {% endif %}
{% for host in groups['kube-master'] %} {% for host in groups['kube-master'] %}
IP.{{ 2 * loop.index - 1 }} = {{ hostvars[host]['access_ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }} IP.{{ 2 * loop.index - 1 }} = {{ hostvars[host]['access_ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
IP.{{ 2 * loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }} IP.{{ 2 * loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
{% endfor %} {% endfor %}
{% set idx = groups['kube-master'] | length | int * 2 + 1 %} {% set idx = groups['kube-master'] | length | int * 2 + 1 %}
IP.{{ idx | string }} = {{ kube_apiserver_ip }} IP.{{ idx }} = {{ kube_apiserver_ip }}
IP.{{ idx + 1 }} = 127.0.0.1

View file

@ -7,4 +7,4 @@ ipip: false
# Set to true if you want your calico cni binaries to overwrite the # Set to true if you want your calico cni binaries to overwrite the
# ones from hyperkube while leaving other cni plugins intact. # ones from hyperkube while leaving other cni plugins intact.
overwrite_hyperkube_cni: false overwrite_hyperkube_cni: true

View file

@ -22,16 +22,6 @@
changed_when: false changed_when: false
notify: restart calico-node notify: restart calico-node
- name: Calico | Do not use hyperkube cni if kube_version under v1.3.4
set_fact:
use_hyperkube_cni: false
when: kube_version | version_compare('v1.3.4','<')
- name: Calico | Use hyperkube cni if kube_version above v1.3.4
set_fact:
use_hyperkube_cni: true
when: kube_version | version_compare('v1.3.4','>=')
- name: Calico | Copy cni plugins from hyperkube - name: Calico | Copy cni plugins from hyperkube
command: "/usr/bin/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /usr/bin/rsync -a /opt/cni/bin/ /cnibindir/" command: "/usr/bin/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /usr/bin/rsync -a /opt/cni/bin/ /cnibindir/"
register: cni_task_result register: cni_task_result
@ -39,17 +29,16 @@
retries: 4 retries: 4
delay: "{{ retry_stagger | random + 3 }}" delay: "{{ retry_stagger | random + 3 }}"
changed_when: false changed_when: false
when: "{{ use_hyperkube_cni|bool }}"
- name: Calico | Install calico cni bin - name: Calico | Install calico cni bin
command: rsync -pi "{{ local_release_dir }}/calico/bin/calico" "/opt/cni/bin/calico" command: rsync -pi "{{ local_release_dir }}/calico/bin/calico" "/opt/cni/bin/calico"
changed_when: false changed_when: false
when: "{{ not use_hyperkube_cni|bool or overwrite_hyperkube_cni|bool }}" when: "{{ overwrite_hyperkube_cni|bool }}"
- name: Calico | Install calico-ipam cni bin - name: Calico | Install calico-ipam cni bin
command: rsync -pi "{{ local_release_dir }}/calico/bin/calico-ipam" "/opt/cni/bin/calico-ipam" command: rsync -pi "{{ local_release_dir }}/calico/bin/calico-ipam" "/opt/cni/bin/calico-ipam"
changed_when: false changed_when: false
when: "{{ not use_hyperkube_cni|bool or overwrite_hyperkube_cni|bool }}" when: "{{ overwrite_hyperkube_cni|bool }}"
- name: Calico | wait for etcd - name: Calico | wait for etcd
uri: url=http://localhost:2379/health uri: url=http://localhost:2379/health
@ -90,7 +79,7 @@
environment: environment:
NO_DEFAULT_POOLS: true NO_DEFAULT_POOLS: true
run_once: true run_once: true
when: calico_conf.status == 404 when: calico_conf.status == 404 or "nodes" not in calico_conf.content
- name: Calico | Get calico configuration from etcd - name: Calico | Get calico configuration from etcd
uri: uri:

View file

@ -9,17 +9,6 @@
notify: notify:
- restart docker - restart docker
- name: Weave | Determine hyperkube cni to use depending of the version of kube
set_fact:
use_hyperkube_cni: >
{%- if kube_version | version_compare('v1.3.4','>=') -%}
true
{%- elif kube_version | version_compare('v1.3.4','<') -%}
false
{%- else -%}
{{ ErrorCannotRecognizeVersion }}
{%- endif -%}
- name: Weave | Copy cni plugins from hyperkube - name: Weave | Copy cni plugins from hyperkube
command: "/usr/bin/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -r /opt/cni/bin/. /cnibindir/" command: "/usr/bin/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -r /opt/cni/bin/. /cnibindir/"
register: cni_task_result register: cni_task_result
@ -27,7 +16,6 @@
retries: 4 retries: 4
delay: "{{ retry_stagger | random + 3 }}" delay: "{{ retry_stagger | random + 3 }}"
changed_when: false changed_when: false
when: "{{ use_hyperkube_cni|bool }}"
- name: Weave | Install weave - name: Weave | Install weave
command: rsync -piu "{{ local_release_dir }}/weave/bin/weave" "{{ bin_dir }}/weave" command: rsync -piu "{{ local_release_dir }}/weave/bin/weave" "{{ bin_dir }}/weave"

View file

@ -2,11 +2,11 @@
local_release_dir: /tmp local_release_dir: /tmp
# Versions # Versions
include_vars: kube_versions.yml kube_version: v1.4.3
etcd_version: v3.0.6 etcd_version: v3.0.6
calico_version: v0.20.0 calico_version: v0.22.0
calico_cni_version: v1.3.1 calico_cni_version: v1.4.2
weave_version: v1.6.1 weave_version: v1.6.1
# Download URL's # Download URL's
@ -16,8 +16,8 @@ calico_cni_ipam_download_url: "https://github.com/projectcalico/calico-cni/relea
weave_download_url: "https://github.com/weaveworks/weave/releases/download/{{weave_version}}/weave" weave_download_url: "https://github.com/weaveworks/weave/releases/download/{{weave_version}}/weave"
# Checksums # Checksums
calico_cni_checksum: "ac05cb9254b5aaa5822cf10325983431bd25489147f2edf9dec7e43d99c43e77" calico_cni_checksum: "9cab29764681e9d80da826e4b2cd10841cc01a749e0018867d96dd76a4691548"
calico_cni_ipam_checksum: "3df6951a30749c279229e7e318e74ac4e41263996125be65257db7cd25097273" calico_cni_ipam_checksum: "09d076b15b791956efee91646e47fdfdcf382db16082cef4f542a9fff7bae172"
weave_checksum: "9bf9d6e5a839e7bcbb28cc00c7acae9d09284faa3e7a3720ca9c2b9e93c68580" weave_checksum: "9bf9d6e5a839e7bcbb28cc00c7acae9d09284faa3e7a3720ca9c2b9e93c68580"
etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485" etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485"

View file

@ -1,6 +1,4 @@
--- ---
- include_vars: "kube_versions.yml"
- name: Create dest directories - name: Create dest directories
file: path={{local_release_dir}}/{{item.dest|dirname}} state=directory recurse=yes file: path={{local_release_dir}}/{{item.dest|dirname}} state=directory recurse=yes
with_items: '{{downloads}}' with_items: '{{downloads}}'

View file

@ -1 +0,0 @@
kube_version: v1.3.0

View file

@ -6,16 +6,10 @@
vars: vars:
debug: false debug: false
commands: commands:
- name: git_info
cmd: find . -type d -name .git -execdir sh -c 'gen-gitinfos.sh global|head -12' \;
- name: timedate_info - name: timedate_info
cmd: timedatectl status cmd: timedatectl status
- name: space_info
cmd: df -h
- name: kernel_info - name: kernel_info
cmd: uname -r cmd: uname -r
- name: distro_info
cmd: cat /etc/issue.net
- name: docker_info - name: docker_info
cmd: docker info cmd: docker info
- name: ip_info - name: ip_info
@ -24,23 +18,26 @@
cmd: ip ro cmd: ip ro
- name: proc_info - name: proc_info
cmd: ps auxf | grep -v ]$ cmd: ps auxf | grep -v ]$
- name: systemctl_info
cmd: systemctl status
- name: systemctl_failed_info - name: systemctl_failed_info
cmd: systemctl --state=failed --no-pager cmd: systemctl --state=failed --no-pager
- name: k8s_info - name: k8s_info
cmd: kubectl get all --all-namespaces -o wide cmd: kubectl get all --all-namespaces -o wide
- name: errors_info - name: errors_info
cmd: journalctl -p err --utc --no-pager cmd: journalctl -p err --utc --no-pager
- name: etcd_info
cmd: etcdctl --debug cluster-health
logs: logs:
- /var/log/ansible.log
- /var/log/ansible/ansible.log
- /var/log/syslog - /var/log/syslog
- /var/log/daemon.log - /var/log/daemon.log
- /var/log/kern.log - /var/log/kern.log
- inventory/inventory.ini - /var/log/dpkg.log
- cluster.yml - /var/log/apt/history.log
- /var/log/yum.log
- /var/log/calico/bird/current
- /var/log/calico/bird6/current
- /var/log/calico/felix/current
- /var/log/calico/confd/current
tasks: tasks:
- name: Storing commands output - name: Storing commands output
@ -50,7 +47,7 @@
with_items: "{{commands}}" with_items: "{{commands}}"
- debug: var=item - debug: var=item
with_items: output.results with_items: "{{output.results}}"
when: debug when: debug
- name: Fetch results - name: Fetch results

View file

@ -1,39 +0,0 @@
---
- hosts: localhost
become: true
gather_facts: no
vars:
log_path: /var/log/ansible/
conf_file: /etc/ansible/ansible.cfg
human_readable_plugin: false
callback_plugin_path: /usr/share/ansible/plugins/callback
tasks:
- name: LOGS | ensure log path
file: path="{{log_path}}" state=directory owner={{ansible_ssh_user}}
- name: LOGS | ensure plugin path
file: path="{{callback_plugin_path}}" state=directory owner={{ansible_ssh_user}}
when: human_readable_plugin
- name: LOGS | get plugin
git: repo=https://gist.github.com/cd706de198c85a8255f6.git dest=/tmp/cd706de198c85a8255f6
when: human_readable_plugin
- name: LOGS | install plugin
copy: src=/tmp/cd706de198c85a8255f6/human_log.py dest="{{callback_plugin_path}}"
when: human_readable_plugin
- name: LOGS | config
lineinfile:
line: "log_path={{log_path}}/ansible.log"
regexp: "^#log_path|^log_path"
dest: "{{conf_file}}"
- name: LOGS | callback plugin
lineinfile:
line: "callback_plugins={{callback_plugin_path}}"
regexp: "^#callback_plugins|^callback_plugins"
dest: "{{conf_file}}"
when: human_readable_plugin

View file

@ -2,3 +2,6 @@
pipelining=True pipelining=True
[defaults] [defaults]
host_key_checking=False host_key_checking=False
gathering = smart
fact_caching = jsonfile
fact_caching_connection = /tmp

View file

@ -1,6 +1,6 @@
--- ---
- hosts: localhost - hosts: localhost
sudo: False become: false
gather_facts: no gather_facts: no
vars: vars:
cloud_machine_type: g1-small cloud_machine_type: g1-small

View file

@ -1,6 +1,6 @@
--- ---
- hosts: localhost - hosts: localhost
sudo: False become: false
gather_facts: no gather_facts: no
vars: vars:
cloud_machine_type: f1-micro cloud_machine_type: f1-micro

View file

@ -0,0 +1,43 @@
---
- hosts: localhost
become: false
gather_facts: no
vars:
expire: 72000
tasks:
- name: replace_test_id
set_fact:
test_name: "{{ test_id | regex_replace('\\.', '-') }}"
- name: Create a bucket
gc_storage:
bucket: "{{ test_name }}"
mode: create
expiration: "{{ expire }}"
permission: private
gs_access_key: gs_key
gs_secret_key: gs_skey
- name: Upload collected diagnostic info
gc_storage:
bucket: "{{ test_name }}"
mode: put
permission: private
expiration: "{{ expire }}"
object: "build-{{ test_name }}-{{ kube_network_plugin }}-logs.tar.gz"
src: logs.tar.gz
gs_access_key: gs_key
gs_secret_key: gs_skey
- name: Get a link
gc_storage:
bucket: "{{ test_name }}"
object: "build-{{ test_name }}-{{ kube_network_plugin }}-logs.tar.gz"
mode: get_url
register: url
gs_access_key: gs_key
gs_secret_key: gs_skey
- debug: msg="Download URL {{get_url}}"

View file

@ -2,6 +2,16 @@ node1 ansible_ssh_host={{gce.instance_data[0].public_ip}}
node2 ansible_ssh_host={{gce.instance_data[1].public_ip}} node2 ansible_ssh_host={{gce.instance_data[1].public_ip}}
node3 ansible_ssh_host={{gce.instance_data[2].public_ip}} node3 ansible_ssh_host={{gce.instance_data[2].public_ip}}
{% if mode is defined and mode == "separate" %}
[kube-master]
node1
[kube-node]
node2
[etcd]
node3
{% else %}
[kube-master] [kube-master]
node1 node1
node2 node2
@ -14,6 +24,7 @@ node3
[etcd] [etcd]
node1 node1
node2 node2
{% endif %}
[k8s-cluster:children] [k8s-cluster:children]
kube-node kube-node