Merge remote-tracking branch 'upstream/master'

This commit is contained in:
Anton Nerozya 2017-06-22 10:53:40 +02:00
commit 11477d31b2
18 changed files with 108 additions and 30 deletions

View file

@ -256,7 +256,7 @@ before_script:
.coreos_calico_sep_variables: &coreos_calico_sep_variables
# stage: deploy-gce-part1
KUBE_NETWORK_PLUGIN: calico
CLOUD_IMAGE: coreos-stable-1298-6-0-v20170315
CLOUD_IMAGE: coreos-stable
CLOUD_REGION: us-west1-b
CLUSTER_MODE: separate
BOOTSTRAP_OS: coreos
@ -296,7 +296,7 @@ before_script:
.coreos_canal_variables: &coreos_canal_variables
# stage: deploy-gce-part2
KUBE_NETWORK_PLUGIN: canal
CLOUD_IMAGE: coreos-stable-1298-6-0-v20170315
CLOUD_IMAGE: coreos-stable
CLOUD_REGION: us-east1-b
CLUSTER_MODE: default
BOOTSTRAP_OS: coreos

View file

@ -57,10 +57,12 @@ Versions of supported components
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.23.0 <br>
[canal](https://github.com/projectcalico/canal) (given calico/flannel versions) <br>
[weave](http://weave.works/) v1.8.2 <br>
[docker](https://www.docker.com/) v1.13.1 <br>
[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 <br>
[docker](https://www.docker.com/) v1.13.1 (see note)<br>
[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)<br>
Note: rkt support as docker alternative is limited to control plane (etcd and
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
Note 2: rkt support as docker alternative is limited to control plane (etcd and
kubelet). Docker is still used for Kubernetes cluster workloads and network
plugins' related OS services. Also note, only one of the supported network
plugins can be deployed for a given single cluster.

18
Vagrantfile vendored
View file

@ -7,6 +7,15 @@ Vagrant.require_version ">= 1.8.0"
CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb")
COREOS_URL_TEMPLATE = "https://storage.googleapis.com/%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json"
SUPPORTED_OS = {
"coreos-stable" => {box: "coreos-stable", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
"coreos-alpha" => {box: "coreos-alpha", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
"coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
"ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "ubuntu"},
}
# Defaults for config options defined in CONFIG
$num_instances = 3
$instance_name_prefix = "k8s"
@ -16,7 +25,7 @@ $vm_cpus = 1
$shared_folders = {}
$forwarded_ports = {}
$subnet = "172.17.8"
$box = "bento/ubuntu-16.04"
$os = "ubuntu"
# The first three nodes are etcd servers
$etcd_instances = $num_instances
# The first two nodes are masters
@ -31,6 +40,7 @@ if File.exist?(CONFIG)
require CONFIG
end
$box = SUPPORTED_OS[$os][:box]
# if $inventory is not set, try to use example
$inventory = File.join(File.dirname(__FILE__), "inventory") if ! $inventory
@ -56,7 +66,10 @@ Vagrant.configure("2") do |config|
# always use Vagrants insecure key
config.ssh.insert_key = false
config.vm.box = $box
if SUPPORTED_OS[$os].has_key? :box_url
config.vm.box_url = SUPPORTED_OS[$os][:box_url]
end
config.ssh.username = SUPPORTED_OS[$os][:user]
# plugin conflict
if Vagrant.has_plugin?("vagrant-vbguest") then
config.vbguest.auto_update = false
@ -103,6 +116,7 @@ Vagrant.configure("2") do |config|
# Override the default 'calico' with flannel.
# inventory/group_vars/k8s-cluster.yml
"kube_network_plugin": "flannel",
"bootstrap_os": SUPPORTED_OS[$os][:bootstrap_os]
}
config.vm.network :private_network, ip: ip

View file

@ -37,12 +37,8 @@ Then you can use the instance ids to find the connected [neutron](https://wiki.o
Given the port ids on the left, you can set the `allowed_address_pairs` in neutron:
# allow kube_service_addresses network
neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18
neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18
# allow kube_pods_subnet network
neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.64.0/18
neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.64.0/18
# allow kube_service_addresses and kube_pods_subnet network
neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18 ip_address=10.233.64.0/18
neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18 ip_address=10.233.64.0/18
Now you can finally run the playbook.

View file

@ -39,3 +39,31 @@ k8s-01 Ready 45s
k8s-02 Ready 45s
k8s-03 Ready 45s
```
Customize Vagrant
=================
You can override the default settings in the `Vagrantfile` either by directly modifying the `Vagrantfile`
or through an override file.
In the same directory as the `Vagrantfile`, create a folder called `vagrant` and create `config.rb` file in it.
You're able to override the variables defined in `Vagrantfile` by providing the value in the `vagrant/config.rb` file,
e.g.:
echo '$forwarded_ports = {8001 => 8001}' >> vagrant/config.rb
and after `vagrant up` or `vagrant reload`, your host will have port forwarding setup with the guest on port 8001.
Use alternative OS for Vagrant
==============================
By default, Vagrant uses Ubuntu 16.04 box to provision a local cluster. You may use an alternative supported
operating system for your local cluster.
Customize `$os` variable in `Vagrantfile` or as override, e.g.,:
echo '$os = "coreos-stable"' >> vagrant/config.rb
The supported operating systems for vagrant are defined in the `SUPPORTED_OS` constant in the `Vagrantfile`.

View file

@ -31,12 +31,18 @@
register: etcd_vault_login_result
when: inventory_hostname == groups.etcd|first
- name: gen_certs_vault | Set fact for vault_client_token
set_fact:
vault_client_token: "{{ etcd_vault_login_result.get('json', {}).get('auth', {}).get('client_token') }}"
delegate_to: "{{ groups['etcd'][0] }}"
- name: gen_certs_vault | Set fact for Vault API token
set_fact:
etcd_vault_headers:
Accept: application/json
Content-Type: application/json
X-Vault-Token: "{{ hostvars[groups.etcd|first]['etcd_vault_login_result']['json']['auth']['client_token'] }}"
X-Vault-Token: "{{ vault_client_token }}"
when: vault_client_token != ""
# Issue master certs to Etcd nodes
- include: ../../vault/tasks/shared/issue_cert.yml

View file

@ -15,5 +15,5 @@
when: helm_container.changed
- name: Helm | Set up bash completion
shell: "umask 022 && {{ bin_dir }}/helm completion >/etc/bash_completion.d/helm.sh"
shell: "umask 022 && {{ bin_dir }}/helm completion bash >/etc/bash_completion.d/helm.sh"
when: ( helm_container.changed and not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] )

View file

@ -105,6 +105,11 @@ spec:
- mountPath: {{ etcd_cert_dir }}
name: etcd-certs
readOnly: true
{% if cloud_provider is defined and cloud_provider == 'aws' and ansible_os_family == 'RedHat' %}
- mountPath: /etc/ssl/certs/ca-bundle.crt
name: rhel-ca-bundle
readOnly: true
{% endif %}
volumes:
- hostPath:
path: {{ kube_config_dir }}
@ -115,3 +120,8 @@ spec:
- hostPath:
path: {{ etcd_cert_dir }}
name: etcd-certs
{% if cloud_provider is defined and cloud_provider == 'aws' and ansible_os_family == 'RedHat' %}
- hostPath:
path: /etc/ssl/certs/ca-bundle.crt
name: rhel-ca-bundle
{% endif %}

View file

@ -51,3 +51,9 @@ kubelet_load_modules: false
##Support custom flags to be passed to kubelet
kubelet_custom_flags: []
# This setting is used for rkt based kubelet for deploying hyperkube
# from a docker based registry ( controls --insecure and docker:// )
## Empty vaule for quay.io containers
## docker for docker registry containers
kube_hyperkube_image_repo: ""

View file

@ -53,7 +53,12 @@ ExecStart=/usr/bin/rkt run \
--mount volume=var-lib-kubelet,target=/var/lib/kubelet \
--mount volume=var-log,target=/var/log \
--stage1-from-dir=stage1-fly.aci \
{% if kube_hyperkube_image_repo == "docker" %}
--insecure-options=image \
docker://{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} \
{% else %}
{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} \
{% endif %}
--uuid-file-save=/var/run/kubelet.uuid \
--debug --exec=/kubelet -- \
$KUBE_LOGTOSTDERR \

View file

@ -9,7 +9,7 @@
vars:
sync_file: "{{ item }}"
sync_file_dir: "{{ kube_cert_dir }}"
sync_file_group: "{{ kuber_cert_group }}"
sync_file_group: "{{ kube_cert_group }}"
sync_file_hosts: "{{ groups['k8s-cluster'] }}"
sync_file_is_cert: true
sync_file_owner: kube
@ -29,7 +29,7 @@
vars:
sync_file: ca.pem
sync_file_dir: "{{ kube_cert_dir }}"
sync_file_group: "{{ kuber_cert_group }}"
sync_file_group: "{{ kube_cert_group }}"
sync_file_hosts: "{{ groups['k8s-cluster'] }}"
sync_file_owner: kube

View file

@ -4,11 +4,16 @@
failed_when: false
notify: Flannel | restart docker
# special cases for atomic because it defaults to live-restore: true
# So we disable live-restore to pickup the new flannel IP. After
# we enable it, we have to restart docker again to pickup the new
# setting and restore the original behavior
- name: Flannel | restart docker
command: /bin/true
notify:
- Flannel | reload systemd
- Flannel | reload docker.socket
- Flannel | configure docker live-restore true (atomic)
- Flannel | reload docker
- Flannel | pause while Docker restarts
- Flannel | wait for docker
@ -22,6 +27,13 @@
state: restarted
when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS']
- name: Flannel | configure docker live-restore true (atomic)
replace:
name: /etc/docker/daemon.json
regexp: '"live-restore":.*true'
replace: '"live-restore": false'
when: is_atomic
- name: Flannel | reload docker
service:
name: docker

View file

@ -1,6 +1,6 @@
---
- name: Uncordon node
command: "{{ bin_dir }}/kubectl uncordon {{ ansible_hostname }}"
command: "{{ bin_dir }}/kubectl uncordon {{ inventory_hostname }}"
delegate_to: "{{ groups['kube-master'][0] }}"
when: needs_cordoning|default(false)

View file

@ -14,7 +14,7 @@
{% endif %}
- name: Cordon node
command: "{{ bin_dir }}/kubectl cordon {{ ansible_hostname }}"
command: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }}"
delegate_to: "{{ groups['kube-master'][0] }}"
when: needs_cordoning
@ -25,6 +25,6 @@
--ignore-daemonsets
--grace-period {{ drain_grace_period }}
--timeout {{ drain_timeout }}
--delete-local-data {{ ansible_hostname }}
--delete-local-data {{ inventory_hostname }}
delegate_to: "{{ groups['kube-master'][0] }}"
when: needs_cordoning

View file

@ -3,7 +3,7 @@
- name: bootstrap/ca_trust | pull CA from cert from groups.vault|first
command: "cat {{ vault_cert_dir }}/ca.pem"
register: vault_cert_file_cat
when: inventory_hostname == groups.vault|first
delegate_to: "{{ groups['vault']|first }}"
# This part is mostly stolen from the etcd role
- name: bootstrap/ca_trust | target ca-certificate store file
@ -19,7 +19,7 @@
- name: bootstrap/ca_trust | add CA to trusted CA dir
copy:
content: "{{ hostvars[groups.vault|first]['vault_cert_file_cat']['stdout'] }}"
content: "{{ vault_cert_file_cat.get('stdout') }}"
dest: "{{ ca_cert_path }}"
register: vault_ca_cert

View file

@ -12,11 +12,11 @@
- name: "sync_file | Set facts for file contents"
set_fact:
sync_file_contents: "{{ hostvars[sync_file_srcs|first]['sync_file_cat']['stdout'] }}"
sync_file_contents: "{{ hostvars[sync_file_srcs|first].get('sync_file_cat', {}).get('stdout') }}"
- name: "sync_file | Set fact for key contents"
set_fact:
sync_file_key_contents: "{{ hostvars[sync_file_srcs|first]['sync_file_key_cat']['stdout'] }}"
sync_file_key_contents: "{{ hostvars[sync_file_srcs|first].get('sync_file_key_cat', {}).get('stdout') }}"
when: sync_file_is_cert|d()
- name: "sync_file | Ensure the directory exists"

View file

@ -36,7 +36,7 @@
with_items: "{{ sync_file_hosts | unique }}"
loop_control:
loop_var: host_item
when: hostvars[host_item]["sync_file_stat"]["stat"]["exists"]|bool
when: hostvars[host_item].get("sync_file_stat", {}).get("stat", {}).get("exists")
- name: "sync_file | Combine all possible key file sync sources"
set_fact:
@ -44,7 +44,7 @@
with_items: "{{ sync_file_hosts | unique }}"
loop_control:
loop_var: host_item
when: sync_file_is_cert|d() and hostvars[host_item]["sync_file_key_stat"]["stat"]["exists"]|bool
when: sync_file_is_cert|d() and hostvars[host_item].get("sync_file_key_stat", {}).get("stat", {}).get("exists")
- name: "sync_file | Remove sync sources with files that do not match sync_file_srcs|first"
set_fact:
@ -52,7 +52,7 @@
when: >-
sync_file_srcs|d([])|length > 1 and
inventory_hostname != sync_file_srcs|first and
sync_file_stat.stat.get("checksum") != hostvars[sync_file_srcs|first]["sync_file_stat"]["stat"]["checksum"]
sync_file_stat.stat.get("checksum") != hostvars[sync_file_srcs|first].get("sync_file_stat", {}).get("stat", {}).get("checksum")
- name: "sync_file | Remove sync sources with keys that do not match sync_file_srcs|first"
set_fact:
@ -61,7 +61,7 @@
sync_file_is_cert|d() and
sync_file_key_srcs|d([])|length > 1 and
inventory_hostname != sync_file_key_srcs|first and
sync_file_key_stat.stat.checksum != hostvars[sync_file_srcs|first]["sync_file_key_stat"]["stat"]["checksum"]
sync_file_key_stat.stat.get("checksum") != hostvars[sync_file_srcs|first].get("sync_file_key_stat", {}).get("stat", {}).get("checksum")
- name: "sync_file | Consolidate file and key sources"
set_fact:

View file

@ -67,7 +67,6 @@
- { role: kubernetes/node, tags: node }
- { role: kubernetes/master, tags: master }
- { role: network_plugin, tags: network }
- { role: upgrade/post-upgrade, tags: post-upgrade }
#Finally handle worker upgrades, based on given batch size
- hosts: kube-node:!kube-master