diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index be43c4f06..1cd419951 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -256,7 +256,7 @@ before_script:
.coreos_calico_sep_variables: &coreos_calico_sep_variables
# stage: deploy-gce-part1
KUBE_NETWORK_PLUGIN: calico
- CLOUD_IMAGE: coreos-stable-1298-6-0-v20170315
+ CLOUD_IMAGE: coreos-stable
CLOUD_REGION: us-west1-b
CLUSTER_MODE: separate
BOOTSTRAP_OS: coreos
@@ -296,7 +296,7 @@ before_script:
.coreos_canal_variables: &coreos_canal_variables
# stage: deploy-gce-part2
KUBE_NETWORK_PLUGIN: canal
- CLOUD_IMAGE: coreos-stable-1298-6-0-v20170315
+ CLOUD_IMAGE: coreos-stable
CLOUD_REGION: us-east1-b
CLUSTER_MODE: default
BOOTSTRAP_OS: coreos
diff --git a/README.md b/README.md
index 94ba1716d..aa1360a77 100644
--- a/README.md
+++ b/README.md
@@ -57,10 +57,12 @@ Versions of supported components
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.23.0
[canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
[weave](http://weave.works/) v1.8.2
-[docker](https://www.docker.com/) v1.13.1
-[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0
+[docker](https://www.docker.com/) v1.13.1 (see note)
+[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)
-Note: rkt support as docker alternative is limited to control plane (etcd and
+Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
+
+Note 2: rkt support as docker alternative is limited to control plane (etcd and
kubelet). Docker is still used for Kubernetes cluster workloads and network
plugins' related OS services. Also note, only one of the supported network
plugins can be deployed for a given single cluster.
diff --git a/Vagrantfile b/Vagrantfile
index b769199b1..a2c2c1c8f 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -7,6 +7,15 @@ Vagrant.require_version ">= 1.8.0"
CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb")
+COREOS_URL_TEMPLATE = "https://storage.googleapis.com/%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json"
+
+SUPPORTED_OS = {
+ "coreos-stable" => {box: "coreos-stable", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
+ "coreos-alpha" => {box: "coreos-alpha", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
+ "coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
+ "ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "ubuntu"},
+}
+
# Defaults for config options defined in CONFIG
$num_instances = 3
$instance_name_prefix = "k8s"
@@ -16,7 +25,7 @@ $vm_cpus = 1
$shared_folders = {}
$forwarded_ports = {}
$subnet = "172.17.8"
-$box = "bento/ubuntu-16.04"
+$os = "ubuntu"
# The first three nodes are etcd servers
$etcd_instances = $num_instances
# The first two nodes are masters
@@ -31,6 +40,7 @@ if File.exist?(CONFIG)
require CONFIG
end
+$box = SUPPORTED_OS[$os][:box]
# if $inventory is not set, try to use example
$inventory = File.join(File.dirname(__FILE__), "inventory") if ! $inventory
@@ -56,7 +66,10 @@ Vagrant.configure("2") do |config|
# always use Vagrants insecure key
config.ssh.insert_key = false
config.vm.box = $box
-
+ if SUPPORTED_OS[$os].has_key? :box_url
+ config.vm.box_url = SUPPORTED_OS[$os][:box_url]
+ end
+ config.ssh.username = SUPPORTED_OS[$os][:user]
# plugin conflict
if Vagrant.has_plugin?("vagrant-vbguest") then
config.vbguest.auto_update = false
@@ -103,6 +116,7 @@ Vagrant.configure("2") do |config|
# Override the default 'calico' with flannel.
# inventory/group_vars/k8s-cluster.yml
"kube_network_plugin": "flannel",
+ "bootstrap_os": SUPPORTED_OS[$os][:bootstrap_os]
}
config.vm.network :private_network, ip: ip
diff --git a/docs/openstack.md b/docs/openstack.md
index 1a82133c0..77bb293bf 100644
--- a/docs/openstack.md
+++ b/docs/openstack.md
@@ -37,12 +37,8 @@ Then you can use the instance ids to find the connected [neutron](https://wiki.o
Given the port ids on the left, you can set the `allowed_address_pairs` in neutron:
- # allow kube_service_addresses network
- neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18
- neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18
-
- # allow kube_pods_subnet network
- neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.64.0/18
- neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.64.0/18
+ # allow kube_service_addresses and kube_pods_subnet network
+ neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18 ip_address=10.233.64.0/18
+ neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18 ip_address=10.233.64.0/18
Now you can finally run the playbook.
diff --git a/docs/vagrant.md b/docs/vagrant.md
index 02132c140..1b0073799 100644
--- a/docs/vagrant.md
+++ b/docs/vagrant.md
@@ -39,3 +39,31 @@ k8s-01 Ready 45s
k8s-02 Ready 45s
k8s-03 Ready 45s
```
+
+Customize Vagrant
+=================
+
+You can override the default settings in the `Vagrantfile` either by directly modifying the `Vagrantfile`
+or through an override file.
+
+In the same directory as the `Vagrantfile`, create a folder called `vagrant` and create `config.rb` file in it.
+
+You're able to override the variables defined in `Vagrantfile` by providing the value in the `vagrant/config.rb` file,
+e.g.:
+
+ echo '$forwarded_ports = {8001 => 8001}' >> vagrant/config.rb
+
+and after `vagrant up` or `vagrant reload`, your host will have port forwarding setup with the guest on port 8001.
+
+Use alternative OS for Vagrant
+==============================
+
+By default, Vagrant uses Ubuntu 16.04 box to provision a local cluster. You may use an alternative supported
+operating system for your local cluster.
+
+Customize `$os` variable in `Vagrantfile` or as override, e.g.,:
+
+ echo '$os = "coreos-stable"' >> vagrant/config.rb
+
+
+The supported operating systems for vagrant are defined in the `SUPPORTED_OS` constant in the `Vagrantfile`.
diff --git a/roles/etcd/tasks/gen_certs_vault.yml b/roles/etcd/tasks/gen_certs_vault.yml
index e45b2d02d..b0dbb1a4a 100644
--- a/roles/etcd/tasks/gen_certs_vault.yml
+++ b/roles/etcd/tasks/gen_certs_vault.yml
@@ -31,12 +31,18 @@
register: etcd_vault_login_result
when: inventory_hostname == groups.etcd|first
+- name: gen_certs_vault | Set fact for vault_client_token
+ set_fact:
+ vault_client_token: "{{ etcd_vault_login_result.get('json', {}).get('auth', {}).get('client_token') }}"
+ delegate_to: "{{ groups['etcd'][0] }}"
+
- name: gen_certs_vault | Set fact for Vault API token
set_fact:
etcd_vault_headers:
Accept: application/json
Content-Type: application/json
- X-Vault-Token: "{{ hostvars[groups.etcd|first]['etcd_vault_login_result']['json']['auth']['client_token'] }}"
+ X-Vault-Token: "{{ vault_client_token }}"
+ when: vault_client_token != ""
# Issue master certs to Etcd nodes
- include: ../../vault/tasks/shared/issue_cert.yml
diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml
index 1d50f8b9b..f12875da2 100644
--- a/roles/kubernetes-apps/helm/tasks/main.yml
+++ b/roles/kubernetes-apps/helm/tasks/main.yml
@@ -15,5 +15,5 @@
when: helm_container.changed
- name: Helm | Set up bash completion
- shell: "umask 022 && {{ bin_dir }}/helm completion >/etc/bash_completion.d/helm.sh"
+ shell: "umask 022 && {{ bin_dir }}/helm completion bash >/etc/bash_completion.d/helm.sh"
when: ( helm_container.changed and not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] )
diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
index b0f1a2f53..851cca060 100644
--- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
+++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2
@@ -105,6 +105,11 @@ spec:
- mountPath: {{ etcd_cert_dir }}
name: etcd-certs
readOnly: true
+{% if cloud_provider is defined and cloud_provider == 'aws' and ansible_os_family == 'RedHat' %}
+ - mountPath: /etc/ssl/certs/ca-bundle.crt
+ name: rhel-ca-bundle
+ readOnly: true
+{% endif %}
volumes:
- hostPath:
path: {{ kube_config_dir }}
@@ -115,3 +120,8 @@ spec:
- hostPath:
path: {{ etcd_cert_dir }}
name: etcd-certs
+{% if cloud_provider is defined and cloud_provider == 'aws' and ansible_os_family == 'RedHat' %}
+ - hostPath:
+ path: /etc/ssl/certs/ca-bundle.crt
+ name: rhel-ca-bundle
+{% endif %}
\ No newline at end of file
diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml
index 7ef6d01e0..4e34dcc99 100644
--- a/roles/kubernetes/node/defaults/main.yml
+++ b/roles/kubernetes/node/defaults/main.yml
@@ -51,3 +51,9 @@ kubelet_load_modules: false
##Support custom flags to be passed to kubelet
kubelet_custom_flags: []
+
+# This setting is used for rkt based kubelet for deploying hyperkube
+# from a docker based registry ( controls --insecure and docker:// )
+## Empty vaule for quay.io containers
+## docker for docker registry containers
+kube_hyperkube_image_repo: ""
diff --git a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 b/roles/kubernetes/node/templates/kubelet.rkt.service.j2
index 5f8351458..2c889d8c6 100644
--- a/roles/kubernetes/node/templates/kubelet.rkt.service.j2
+++ b/roles/kubernetes/node/templates/kubelet.rkt.service.j2
@@ -53,7 +53,12 @@ ExecStart=/usr/bin/rkt run \
--mount volume=var-lib-kubelet,target=/var/lib/kubelet \
--mount volume=var-log,target=/var/log \
--stage1-from-dir=stage1-fly.aci \
+{% if kube_hyperkube_image_repo == "docker" %}
+ --insecure-options=image \
+ docker://{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} \
+{% else %}
{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} \
+{% endif %}
--uuid-file-save=/var/run/kubelet.uuid \
--debug --exec=/kubelet -- \
$KUBE_LOGTOSTDERR \
diff --git a/roles/kubernetes/secrets/tasks/sync_kube_node_certs.yml b/roles/kubernetes/secrets/tasks/sync_kube_node_certs.yml
index 9d6deb563..884f6c436 100644
--- a/roles/kubernetes/secrets/tasks/sync_kube_node_certs.yml
+++ b/roles/kubernetes/secrets/tasks/sync_kube_node_certs.yml
@@ -9,7 +9,7 @@
vars:
sync_file: "{{ item }}"
sync_file_dir: "{{ kube_cert_dir }}"
- sync_file_group: "{{ kuber_cert_group }}"
+ sync_file_group: "{{ kube_cert_group }}"
sync_file_hosts: "{{ groups['k8s-cluster'] }}"
sync_file_is_cert: true
sync_file_owner: kube
@@ -29,7 +29,7 @@
vars:
sync_file: ca.pem
sync_file_dir: "{{ kube_cert_dir }}"
- sync_file_group: "{{ kuber_cert_group }}"
+ sync_file_group: "{{ kube_cert_group }}"
sync_file_hosts: "{{ groups['k8s-cluster'] }}"
sync_file_owner: kube
diff --git a/roles/network_plugin/flannel/handlers/main.yml b/roles/network_plugin/flannel/handlers/main.yml
index 98c93a53a..bd4058976 100644
--- a/roles/network_plugin/flannel/handlers/main.yml
+++ b/roles/network_plugin/flannel/handlers/main.yml
@@ -4,11 +4,16 @@
failed_when: false
notify: Flannel | restart docker
+# special cases for atomic because it defaults to live-restore: true
+# So we disable live-restore to pickup the new flannel IP. After
+# we enable it, we have to restart docker again to pickup the new
+# setting and restore the original behavior
- name: Flannel | restart docker
command: /bin/true
notify:
- Flannel | reload systemd
- Flannel | reload docker.socket
+ - Flannel | configure docker live-restore true (atomic)
- Flannel | reload docker
- Flannel | pause while Docker restarts
- Flannel | wait for docker
@@ -22,6 +27,13 @@
state: restarted
when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS']
+- name: Flannel | configure docker live-restore true (atomic)
+ replace:
+ name: /etc/docker/daemon.json
+ regexp: '"live-restore":.*true'
+ replace: '"live-restore": false'
+ when: is_atomic
+
- name: Flannel | reload docker
service:
name: docker
diff --git a/roles/upgrade/post-upgrade/tasks/main.yml b/roles/upgrade/post-upgrade/tasks/main.yml
index bff9983ff..c32f42491 100644
--- a/roles/upgrade/post-upgrade/tasks/main.yml
+++ b/roles/upgrade/post-upgrade/tasks/main.yml
@@ -1,6 +1,6 @@
---
- name: Uncordon node
- command: "{{ bin_dir }}/kubectl uncordon {{ ansible_hostname }}"
+ command: "{{ bin_dir }}/kubectl uncordon {{ inventory_hostname }}"
delegate_to: "{{ groups['kube-master'][0] }}"
when: needs_cordoning|default(false)
diff --git a/roles/upgrade/pre-upgrade/tasks/main.yml b/roles/upgrade/pre-upgrade/tasks/main.yml
index f2251375b..a2b34927f 100644
--- a/roles/upgrade/pre-upgrade/tasks/main.yml
+++ b/roles/upgrade/pre-upgrade/tasks/main.yml
@@ -14,7 +14,7 @@
{% endif %}
- name: Cordon node
- command: "{{ bin_dir }}/kubectl cordon {{ ansible_hostname }}"
+ command: "{{ bin_dir }}/kubectl cordon {{ inventory_hostname }}"
delegate_to: "{{ groups['kube-master'][0] }}"
when: needs_cordoning
@@ -25,6 +25,6 @@
--ignore-daemonsets
--grace-period {{ drain_grace_period }}
--timeout {{ drain_timeout }}
- --delete-local-data {{ ansible_hostname }}
+ --delete-local-data {{ inventory_hostname }}
delegate_to: "{{ groups['kube-master'][0] }}"
when: needs_cordoning
diff --git a/roles/vault/tasks/bootstrap/ca_trust.yml b/roles/vault/tasks/bootstrap/ca_trust.yml
index 57e25610b..63ab256d5 100644
--- a/roles/vault/tasks/bootstrap/ca_trust.yml
+++ b/roles/vault/tasks/bootstrap/ca_trust.yml
@@ -3,7 +3,7 @@
- name: bootstrap/ca_trust | pull CA from cert from groups.vault|first
command: "cat {{ vault_cert_dir }}/ca.pem"
register: vault_cert_file_cat
- when: inventory_hostname == groups.vault|first
+ delegate_to: "{{ groups['vault']|first }}"
# This part is mostly stolen from the etcd role
- name: bootstrap/ca_trust | target ca-certificate store file
@@ -19,7 +19,7 @@
- name: bootstrap/ca_trust | add CA to trusted CA dir
copy:
- content: "{{ hostvars[groups.vault|first]['vault_cert_file_cat']['stdout'] }}"
+ content: "{{ vault_cert_file_cat.get('stdout') }}"
dest: "{{ ca_cert_path }}"
register: vault_ca_cert
diff --git a/roles/vault/tasks/shared/sync.yml b/roles/vault/tasks/shared/sync.yml
index 02818b5f1..bbfedbc4c 100644
--- a/roles/vault/tasks/shared/sync.yml
+++ b/roles/vault/tasks/shared/sync.yml
@@ -12,11 +12,11 @@
- name: "sync_file | Set facts for file contents"
set_fact:
- sync_file_contents: "{{ hostvars[sync_file_srcs|first]['sync_file_cat']['stdout'] }}"
+ sync_file_contents: "{{ hostvars[sync_file_srcs|first].get('sync_file_cat', {}).get('stdout') }}"
- name: "sync_file | Set fact for key contents"
set_fact:
- sync_file_key_contents: "{{ hostvars[sync_file_srcs|first]['sync_file_key_cat']['stdout'] }}"
+ sync_file_key_contents: "{{ hostvars[sync_file_srcs|first].get('sync_file_key_cat', {}).get('stdout') }}"
when: sync_file_is_cert|d()
- name: "sync_file | Ensure the directory exists"
diff --git a/roles/vault/tasks/shared/sync_file.yml b/roles/vault/tasks/shared/sync_file.yml
index 484d4aced..ef53e9d90 100644
--- a/roles/vault/tasks/shared/sync_file.yml
+++ b/roles/vault/tasks/shared/sync_file.yml
@@ -36,7 +36,7 @@
with_items: "{{ sync_file_hosts | unique }}"
loop_control:
loop_var: host_item
- when: hostvars[host_item]["sync_file_stat"]["stat"]["exists"]|bool
+ when: hostvars[host_item].get("sync_file_stat", {}).get("stat", {}).get("exists")
- name: "sync_file | Combine all possible key file sync sources"
set_fact:
@@ -44,7 +44,7 @@
with_items: "{{ sync_file_hosts | unique }}"
loop_control:
loop_var: host_item
- when: sync_file_is_cert|d() and hostvars[host_item]["sync_file_key_stat"]["stat"]["exists"]|bool
+ when: sync_file_is_cert|d() and hostvars[host_item].get("sync_file_key_stat", {}).get("stat", {}).get("exists")
- name: "sync_file | Remove sync sources with files that do not match sync_file_srcs|first"
set_fact:
@@ -52,7 +52,7 @@
when: >-
sync_file_srcs|d([])|length > 1 and
inventory_hostname != sync_file_srcs|first and
- sync_file_stat.stat.get("checksum") != hostvars[sync_file_srcs|first]["sync_file_stat"]["stat"]["checksum"]
+ sync_file_stat.stat.get("checksum") != hostvars[sync_file_srcs|first].get("sync_file_stat", {}).get("stat", {}).get("checksum")
- name: "sync_file | Remove sync sources with keys that do not match sync_file_srcs|first"
set_fact:
@@ -61,7 +61,7 @@
sync_file_is_cert|d() and
sync_file_key_srcs|d([])|length > 1 and
inventory_hostname != sync_file_key_srcs|first and
- sync_file_key_stat.stat.checksum != hostvars[sync_file_srcs|first]["sync_file_key_stat"]["stat"]["checksum"]
+ sync_file_key_stat.stat.get("checksum") != hostvars[sync_file_srcs|first].get("sync_file_key_stat", {}).get("stat", {}).get("checksum")
- name: "sync_file | Consolidate file and key sources"
set_fact:
diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml
index 8ff4ad4b8..0b4613820 100644
--- a/upgrade-cluster.yml
+++ b/upgrade-cluster.yml
@@ -67,7 +67,6 @@
- { role: kubernetes/node, tags: node }
- { role: kubernetes/master, tags: master }
- { role: network_plugin, tags: network }
- - { role: upgrade/post-upgrade, tags: post-upgrade }
#Finally handle worker upgrades, based on given batch size
- hosts: kube-node:!kube-master