From e22759d8f01078ad29ce09d3f57665508abc2d09 Mon Sep 17 00:00:00 2001 From: Cornelius Keller Date: Wed, 24 Jan 2018 09:43:06 +0100 Subject: [PATCH 001/177] fix nodePort for weave --- roles/network_plugin/weave/tasks/main.yml | 7 +++++++ .../weave/templates/weavenet.conflist.j2 | 16 ++++++++++++++++ 2 files changed, 23 insertions(+) create mode 100644 roles/network_plugin/weave/templates/weavenet.conflist.j2 diff --git a/roles/network_plugin/weave/tasks/main.yml b/roles/network_plugin/weave/tasks/main.yml index 30c209f8b..d843e4545 100644 --- a/roles/network_plugin/weave/tasks/main.yml +++ b/roles/network_plugin/weave/tasks/main.yml @@ -2,6 +2,13 @@ - include: seed.yml when: weave_mode_seed + +- name: template weavenet conflist + template: + src: weavenet.conflist.j2 + dest: /etc/cni/net.d/00-weave.conflist + owner: kube + - name: Weave | Copy cni plugins from hyperkube command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -r /opt/cni/bin/. /cnibindir/" register: cni_task_result diff --git a/roles/network_plugin/weave/templates/weavenet.conflist.j2 b/roles/network_plugin/weave/templates/weavenet.conflist.j2 new file mode 100644 index 000000000..45ae0b967 --- /dev/null +++ b/roles/network_plugin/weave/templates/weavenet.conflist.j2 @@ -0,0 +1,16 @@ +{ + "cniVersion": "0.3.0", + "name": "mynet", + "plugins": [ + { + "name": "weave", + "type": "weave-net", + "hairpinMode": true + }, + { + "type": "portmap", + "capabilities": {"portMappings": true}, + "snat": true + } + ] +} From f8a59446e87a436de851a928c0ee0706f33e28dd Mon Sep 17 00:00:00 2001 From: Damian Nowak Date: Fri, 9 Feb 2018 10:39:40 -0600 Subject: [PATCH 002/177] Enable OOM killing When etcd exceeds its memory limit, it becomes useless but keeps running. We should let OOM killer kill etcd process in the container, so systemd can spot the problem and restart etcd according to "Restart" setting in etcd.service unit file. If OOME problem keep repeating, i.e. it happens every single restart, systemd will eventually back off and stop restarting it anyway. --restart=on-failure:5 in this file has no effect because memory allocation error doesn't by itself cause the process to die Related: https://github.com/kubernetes-incubator/kubespray/blob/master/roles/etcd/templates/etcd-docker.service.j2 This kind of reverts a change introduced in #1860. --- roles/etcd/templates/etcd.j2 | 1 - 1 file changed, 1 deletion(-) diff --git a/roles/etcd/templates/etcd.j2 b/roles/etcd/templates/etcd.j2 index d916a7570..9ac08e073 100644 --- a/roles/etcd/templates/etcd.j2 +++ b/roles/etcd/templates/etcd.j2 @@ -9,7 +9,6 @@ {% if etcd_memory_limit is defined %} --memory={{ etcd_memory_limit|regex_replace('Mi', 'M') }} \ {% endif %} - --oom-kill-disable \ {% if etcd_cpu_limit is defined %} --cpu-shares={{ etcd_cpu_limit|regex_replace('m', '') }} \ {% endif %} From 911af3f3315b38cf40942f9a96cf66ee1f3366e6 Mon Sep 17 00:00:00 2001 From: Merouane Atig Date: Tue, 13 Feb 2018 16:02:50 +0100 Subject: [PATCH 003/177] Fix link markdown markup --- docs/large-deployments.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/large-deployments.md b/docs/large-deployments.md index b19f69913..953ca2963 100644 --- a/docs/large-deployments.md +++ b/docs/large-deployments.md @@ -3,8 +3,7 @@ Large deployments of K8s For a large scaled deployments, consider the following configuration changes: -* Tune [ansible settings] - (http://docs.ansible.com/ansible/intro_configuration.html) +* Tune [ansible settings](http://docs.ansible.com/ansible/intro_configuration.html) for `forks` and `timeout` vars to fit large numbers of nodes being deployed. * Override containers' `foo_image_repo` vars to point to intranet registry. From f90e509bf6e2af24f8a3fc8aa056ca3f6b4fe097 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Pi=C4=85tkowski?= Date: Wed, 14 Feb 2018 15:43:41 +0000 Subject: [PATCH 004/177] gather facts for all nodes, even if running for single one (--limit) --- cluster.yml | 6 ++++++ upgrade-cluster.yml | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/cluster.yml b/cluster.yml index 995e36309..7e4ff3f70 100644 --- a/cluster.yml +++ b/cluster.yml @@ -21,6 +21,12 @@ vars: ansible_ssh_pipelining: true gather_facts: true + pre_tasks: + - name: gather facts from all instances + setup: + delegate_to: "{{item}}" + delegate_facts: True + with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr'] }}" - hosts: k8s-cluster:etcd:calico-rr any_errors_fatal: "{{ any_errors_fatal | default(true) }}" diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml index d279f5635..88969436b 100644 --- a/upgrade-cluster.yml +++ b/upgrade-cluster.yml @@ -21,6 +21,12 @@ vars: ansible_ssh_pipelining: true gather_facts: true + pre_tasks: + - name: gather facts from all instances + setup: + delegate_to: "{{item}}" + delegate_facts: True + with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr'] }}" - hosts: k8s-cluster:etcd:calico-rr any_errors_fatal: "{{ any_errors_fatal | default(true) }}" From 4c280e59d43d237706dfb6279493d3289edb5da9 Mon Sep 17 00:00:00 2001 From: Miouge1 Date: Fri, 16 Feb 2018 13:43:35 +0100 Subject: [PATCH 005/177] Use legacy policy config to apply the scheduler policy --- .../master/templates/manifests/kube-scheduler.manifest.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 index b13fc7fa3..e42be474b 100644 --- a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 @@ -29,6 +29,7 @@ spec: - --leader-elect=true - --kubeconfig={{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml {% if volume_cross_zone_attachment %} + - --use-legacy-policy-config - --policy-config-file={{ kube_config_dir }}/kube-scheduler-policy.yaml {% endif %} - --profiling=false From 95e2bde15bd86b9607e98529b6e4eaf4d88642c4 Mon Sep 17 00:00:00 2001 From: Dann Bohn Date: Fri, 16 Feb 2018 16:20:08 -0500 Subject: [PATCH 006/177] set nodeName to "{{ inventory_hostname }}" in kubeadm-config --- roles/kubernetes/master/templates/kubeadm-config.yaml.j2 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 index ed1cc7add..dc842a5e6 100644 --- a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 +++ b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 @@ -83,3 +83,5 @@ apiServerCertSANs: {% endfor %} certificatesDir: {{ kube_config_dir }}/ssl unifiedControlPlaneImage: "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}" +nodeName: {{ inventory_hostname }} + From deef47c923c44069d154efc9f174cb3f30669594 Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Thu, 15 Feb 2018 09:55:43 +0800 Subject: [PATCH 007/177] Upgrade Local Volume Provisioner Addon to v2.0.0 --- cluster.yml | 1 + inventory/sample/group_vars/k8s-cluster.yml | 8 ++- .../local_volume_provisioner/README.md | 59 ++++++++++--------- .../defaults/main.yml | 8 +++ .../local_volume_provisioner/tasks/main.yml | 24 ++++---- ...ume-provisioner-clusterrolebinding.yml.j2} | 21 +++---- .../local-volume-provisioner-cm.yml.j2 | 11 ++++ .../local-volume-provisioner-ds.yml.j2 | 45 ++++++++++++++ .../local-volume-provisioner-ns.yml.j2 | 5 ++ .../local-volume-provisioner-sa.yml.j2 | 6 ++ .../local-volume-provisioner-sc.yml.j2 | 7 +++ .../external_provisioner/meta/main.yml | 8 +++ .../defaults/main.yml | 6 -- .../templates/configmap.yml.j2 | 14 ----- .../templates/daemonset.yml.j2 | 45 -------------- .../templates/serviceaccount.yml.j2 | 5 -- roles/kubernetes-apps/meta/main.yml | 7 --- .../node/templates/kubelet-container.j2 | 2 +- .../node/templates/kubelet.rkt.service.j2 | 4 +- roles/kubernetes/preinstall/tasks/main.yml | 10 +++- roles/kubespray-defaults/defaults/main.yaml | 13 ++-- 21 files changed, 170 insertions(+), 139 deletions(-) rename docs/local-storage-provisioner.md => roles/kubernetes-apps/external_provisioner/local_volume_provisioner/README.md (64%) create mode 100644 roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml rename roles/kubernetes-apps/{ => external_provisioner}/local_volume_provisioner/tasks/main.yml (52%) rename roles/kubernetes-apps/{local_volume_provisioner/templates/clusterrolebinding.yml.j2 => external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrolebinding.yml.j2} (51%) create mode 100644 roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-cm.yml.j2 create mode 100644 roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2 create mode 100644 roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ns.yml.j2 create mode 100644 roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sa.yml.j2 create mode 100644 roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sc.yml.j2 create mode 100644 roles/kubernetes-apps/external_provisioner/meta/main.yml delete mode 100644 roles/kubernetes-apps/local_volume_provisioner/defaults/main.yml delete mode 100644 roles/kubernetes-apps/local_volume_provisioner/templates/configmap.yml.j2 delete mode 100644 roles/kubernetes-apps/local_volume_provisioner/templates/daemonset.yml.j2 delete mode 100644 roles/kubernetes-apps/local_volume_provisioner/templates/serviceaccount.yml.j2 diff --git a/cluster.yml b/cluster.yml index 995e36309..b388c9ced 100644 --- a/cluster.yml +++ b/cluster.yml @@ -94,6 +94,7 @@ - { role: kubespray-defaults} - { role: kubernetes-apps/network_plugin, tags: network } - { role: kubernetes-apps/policy_controller, tags: policy-controller } + - { role: kubernetes-apps/external_provisioner, tags: external-provisioner } - hosts: calico-rr any_errors_fatal: "{{ any_errors_fatal | default(true) }}" diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml index cf4f08a89..8ea6e27ca 100644 --- a/inventory/sample/group_vars/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster.yml @@ -169,9 +169,11 @@ istio_enabled: false registry_enabled: false # Local volume provisioner deployment -# deprecated will be removed -local_volumes_enabled: false -local_volume_provisioner_enabled: "{{ local_volumes_enabled }}" +local_volume_provisioner_enabled: false +# local_volume_provisioner_namespace: "{{ system_namespace }}" +# local_volume_provisioner_base_dir: /mnt/disks +# local_volume_provisioner_mount_dir: /local-disks +# local_volume_provisioner_storage_class: local-storage # CephFS provisioner deployment cephfs_provisioner_enabled: false diff --git a/docs/local-storage-provisioner.md b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/README.md similarity index 64% rename from docs/local-storage-provisioner.md rename to roles/kubernetes-apps/external_provisioner/local_volume_provisioner/README.md index 9895cc473..458a483cb 100644 --- a/docs/local-storage-provisioner.md +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/README.md @@ -1,58 +1,62 @@ -# Local Storage Provisioner +Local Storage Provisioner +========================= The local storage provisioner is NOT a dynamic storage provisioner as you would expect from a cloud provider. Instead, it simply creates PersistentVolumes for -all manually created volumes located in the directory `local_volume_base_dir`. +all manually created volumes located in the directory `local_volume_provisioner_base_dir`. The default path is /mnt/disks and the rest of this doc will use that path as an example. -## Examples to create local storage volumes +Examples to create local storage volumes +---------------------------------------- ### tmpfs method: - ``` - for vol in vol1 vol2 vol3; do - mkdir /mnt/disks/$vol - mount -t tmpfs -o size=5G $vol /mnt/disks/$vol - done - ``` +``` bash +for vol in vol1 vol2 vol3; do +mkdir /mnt/disks/$vol +mount -t tmpfs -o size=5G $vol /mnt/disks/$vol +done +``` The tmpfs method is not recommended for production because the mount is not persistent and data will be deleted on reboot. ### Mount physical disks - ``` - mkdir /mnt/disks/ssd1 - mount /dev/vdb1 /mnt/disks/ssd1 - ``` +``` bash +mkdir /mnt/disks/ssd1 +mount /dev/vdb1 /mnt/disks/ssd1 +``` Physical disks are recommended for production environments because it offers complete isolation in terms of I/O and capacity. ### File-backed sparsefile method - ``` - truncate /mnt/disks/disk5 --size 2G - mkfs.ext4 /mnt/disks/disk5 - mkdir /mnt/disks/vol5 - mount /mnt/disks/disk5 /mnt/disks/vol5 - ``` +``` bash +truncate /mnt/disks/disk5 --size 2G +mkfs.ext4 /mnt/disks/disk5 +mkdir /mnt/disks/vol5 +mount /mnt/disks/disk5 /mnt/disks/vol5 +``` If you have a development environment and only one disk, this is the best way to limit the quota of persistent volumes. ### Simple directories - ``` - for vol in vol6 vol7 vol8; do - mkdir /mnt/disks/$vol - done - ``` + +``` bash +for vol in vol6 vol7 vol8; do +mkdir /mnt/disks/$vol +done +``` This is also acceptable in a development environment, but there is no capacity management. -## Usage notes +Usage notes +----------- The volume provisioner cannot calculate volume sizes correctly, so you should delete the daemonset pod on the relevant host after creating volumes. The pod @@ -62,6 +66,7 @@ Make sure to make any mounts persist via /etc/fstab or with systemd mounts (for CoreOS/Container Linux). Pods with persistent volume claims will not be able to start if the mounts become unavailable. -## Further reading +Further reading +--------------- -Refer to the upstream docs here: https://github.com/kubernetes-incubator/external-storage/tree/master/local-volume +Refer to the upstream docs here: diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml new file mode 100644 index 000000000..dd2e8a147 --- /dev/null +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml @@ -0,0 +1,8 @@ +--- +local_volume_provisioner_image_repo: quay.io/external_storage/local-volume-provisioner +local_volume_provisioner_image_tag: v2.0.0 + +local_volume_provisioner_namespace: "{{ system_namespace }}" +local_volume_provisioner_base_dir: /mnt/disks +local_volume_provisioner_mount_dir: /mnt/disks +local_volume_provisioner_storage_class: local-storage diff --git a/roles/kubernetes-apps/local_volume_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml similarity index 52% rename from roles/kubernetes-apps/local_volume_provisioner/tasks/main.yml rename to roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml index 9766ea27c..b83e45a20 100644 --- a/roles/kubernetes-apps/local_volume_provisioner/tasks/main.yml +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml @@ -1,8 +1,9 @@ --- + - name: Local Volume Provisioner | Ensure base dir is created on all hosts file: - path: "{{ local_volume_base_dir }}" - ensure: directory + path: "{{ local_volume_provisioner_base_dir }}" + state: directory owner: root group: root mode: 0700 @@ -13,31 +14,32 @@ - name: Local Volume Provisioner | Create addon dir file: path: "{{ kube_config_dir }}/addons/local_volume_provisioner" + state: directory owner: root group: root mode: 0755 - recurse: true - name: Local Volume Provisioner | Create manifests template: src: "{{ item.file }}.j2" dest: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.file }}" with_items: - - { name: local-volume-serviceaccount, file: serviceaccount.yml, type, serviceaccount } - - { name: local-volume-clusterrolebinding, file: clusterrolebinding.yml, type, clusterrolebinding } - - { name: local-volume-configmap, file: configmap.yml, type, configmap } - - { name: local-volume-daemonset, file: daemonset.yml, type, daemonset } - register: local_volume_manifests + - { name: local-volume-provisioner-ns, file: local-volume-provisioner-ns.yml, type: ns } + - { name: local-volume-provisioner-sa, file: local-volume-provisioner-sa.yml, type: sa } + - { name: local-volume-provisioner-clusterrolebinding, file: local-volume-provisioner-clusterrolebinding.yml, type, clusterrolebinding } + - { name: local-volume-provisioner-cm, file: local-volume-provisioner-cm.yml, type, cm } + - { name: local-volume-provisioner-ds, file: local-volume-provisioner-ds.yml, type, ds } + - { name: local-volume-provisioner-sc, file: local-volume-provisioner-sc.yml, type, sc } + register: local_volume_provisioner_manifests when: inventory_hostname == groups['kube-master'][0] - - name: Local Volume Provisioner | Apply manifests kube: name: "{{ item.item.name }}" - namespace: "{{ system_namespace }}" + namespace: "{{ local_volume_provisioner_namespace }}" kubectl: "{{ bin_dir }}/kubectl" resource: "{{ item.item.type }}" filename: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.item.file }}" state: "latest" - with_items: "{{ local_volume_manifests.results }}" + with_items: "{{ local_volume_provisioner_manifests.results }}" when: inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-apps/local_volume_provisioner/templates/clusterrolebinding.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrolebinding.yml.j2 similarity index 51% rename from roles/kubernetes-apps/local_volume_provisioner/templates/clusterrolebinding.yml.j2 rename to roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrolebinding.yml.j2 index 5097d2607..ab98f1f55 100644 --- a/roles/kubernetes-apps/local_volume_provisioner/templates/clusterrolebinding.yml.j2 +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrolebinding.yml.j2 @@ -1,27 +1,28 @@ --- -kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: - name: local-storage-provisioner-pv-binding - namespace: {{ system_namespace }} + name: local-volume-provisioner-system-persistent-volume-provisioner + namespace: {{ local_volume_provisioner_namespace }} subjects: - kind: ServiceAccount - name: local-storage-admin - namespace: {{ system_namespace }} + name: local-volume-provisioner + namespace: {{ local_volume_provisioner_namespace }} roleRef: kind: ClusterRole name: system:persistent-volume-provisioner apiGroup: rbac.authorization.k8s.io + --- -kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: - name: local-storage-provisioner-node-binding - namespace: {{ system_namespace }} + name: local-volume-provisioner-system-node + namespace: {{ local_volume_provisioner_namespace }} subjects: - kind: ServiceAccount - name: local-storage-admin - namespace: {{ system_namespace }} + name: local-volume-provisioner + namespace: {{ local_volume_provisioner_namespace }} roleRef: kind: ClusterRole name: system:node diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-cm.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-cm.yml.j2 new file mode 100644 index 000000000..8ad76ab2d --- /dev/null +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-cm.yml.j2 @@ -0,0 +1,11 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: local-volume-provisioner + namespace: {{ local_volume_provisioner_namespace }} +data: + storageClassMap: | + {{ local_volume_provisioner_storage_class }}: + hostDir: {{ local_volume_provisioner_base_dir }} + mountDir: {{ local_volume_provisioner_mount_dir }} diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2 new file mode 100644 index 000000000..80a74f5f1 --- /dev/null +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2 @@ -0,0 +1,45 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: local-volume-provisioner + namespace: {{ local_volume_provisioner_namespace }} + labels: + k8s-app: local-volume-provisioner + version: {{ local_volume_provisioner_image_tag }} +spec: + selector: + matchLabels: + k8s-app: local-volume-provisioner + version: {{ local_volume_provisioner_image_tag }} + template: + metadata: + labels: + k8s-app: local-volume-provisioner + version: {{ local_volume_provisioner_image_tag }} + spec: + serviceAccountName: local-volume-provisioner + containers: + - name: provisioner + image: {{ local_volume_provisioner_image_repo }}:{{ local_volume_provisioner_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + securityContext: + privileged: true + env: + - name: MY_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: local-volume-provisioner + mountPath: /etc/provisioner/config + readOnly: true + - name: local-volume-provisioner-hostpath-mnt-disks + mountPath: {{ local_volume_provisioner_mount_dir }} + volumes: + - name: local-volume-provisioner + configMap: + name: local-volume-provisioner + - name: local-volume-provisioner-hostpath-mnt-disks + hostPath: + path: {{ local_volume_provisioner_base_dir }} diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ns.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ns.yml.j2 new file mode 100644 index 000000000..68faacfbc --- /dev/null +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ns.yml.j2 @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ local_volume_provisioner_namespace }} diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sa.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sa.yml.j2 new file mode 100644 index 000000000..c78a16b60 --- /dev/null +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sa.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: local-volume-provisioner + namespace: {{ local_volume_provisioner_namespace }} diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sc.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sc.yml.j2 new file mode 100644 index 000000000..bf1f00262 --- /dev/null +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-sc.yml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: {{ local_volume_provisioner_storage_class }} +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer diff --git a/roles/kubernetes-apps/external_provisioner/meta/main.yml b/roles/kubernetes-apps/external_provisioner/meta/main.yml new file mode 100644 index 000000000..3daa461d8 --- /dev/null +++ b/roles/kubernetes-apps/external_provisioner/meta/main.yml @@ -0,0 +1,8 @@ +--- +dependencies: + - role: kubernetes-apps/external_provisioner/local_volume_provisioner + when: local_volume_provisioner_enabled + tags: + - apps + - local-volume-provisioner + - external-provisioner diff --git a/roles/kubernetes-apps/local_volume_provisioner/defaults/main.yml b/roles/kubernetes-apps/local_volume_provisioner/defaults/main.yml deleted file mode 100644 index d1e1d1d69..000000000 --- a/roles/kubernetes-apps/local_volume_provisioner/defaults/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -local_volume_provisioner_bootstrap_image_repo: quay.io/external_storage/local-volume-provisioner-bootstrap -local_volume_provisioner_bootstrap_image_tag: v1.0.1 - -local_volume_provisioner_image_repo: quay.io/external_storage/local-volume-provisioner -local_volume_provisioner_image_tag: v1.0.1 diff --git a/roles/kubernetes-apps/local_volume_provisioner/templates/configmap.yml.j2 b/roles/kubernetes-apps/local_volume_provisioner/templates/configmap.yml.j2 deleted file mode 100644 index fd8a7a637..000000000 --- a/roles/kubernetes-apps/local_volume_provisioner/templates/configmap.yml.j2 +++ /dev/null @@ -1,14 +0,0 @@ ---- -# The config map is used to configure local volume discovery for Local SSDs on GCE and GKE. -# It is a map from storage class to its mount configuration. -kind: ConfigMap -apiVersion: v1 -metadata: - name: local-volume-config - namespace: {{ system_namespace }} -data: - "{{ local_volume_storage_class }}": | - { - "hostDir": "{{ local_volume_base_dir }}", - "mountDir": "{{ local_volume_mount_dir }}" - } diff --git a/roles/kubernetes-apps/local_volume_provisioner/templates/daemonset.yml.j2 b/roles/kubernetes-apps/local_volume_provisioner/templates/daemonset.yml.j2 deleted file mode 100644 index 6ffe5e36b..000000000 --- a/roles/kubernetes-apps/local_volume_provisioner/templates/daemonset.yml.j2 +++ /dev/null @@ -1,45 +0,0 @@ ---- -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: local-volume-provisioner - namespace: "{{ system_namespace }}" -spec: - template: - metadata: - labels: - app: local-volume-provisioner - spec: - containers: - - name: provisioner - image: {{ local_volume_provisioner_image_repo }}:{{ local_volume_provisioner_image_tag }} - imagePullPolicy: {{ k8s_image_pull_policy }} - securityContext: - privileged: true - volumeMounts: - - name: discovery-vol - mountPath: "{{ local_volume_mount_dir }}" - - name: local-volume-config - mountPath: /etc/provisioner/config/ - env: - - name: MY_NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: MY_NAMESPACE - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - - name: VOLUME_CONFIG_NAME - value: "local-volume-config" - volumes: - - name: discovery-vol - hostPath: - path: "{{ local_volume_base_dir }}" - - configMap: - defaultMode: 420 - name: local-volume-config - name: local-volume-config - serviceAccount: local-storage-admin diff --git a/roles/kubernetes-apps/local_volume_provisioner/templates/serviceaccount.yml.j2 b/roles/kubernetes-apps/local_volume_provisioner/templates/serviceaccount.yml.j2 deleted file mode 100644 index 182248a6a..000000000 --- a/roles/kubernetes-apps/local_volume_provisioner/templates/serviceaccount.yml.j2 +++ /dev/null @@ -1,5 +0,0 @@ ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: local-storage-admin diff --git a/roles/kubernetes-apps/meta/main.yml b/roles/kubernetes-apps/meta/main.yml index 4f657bd27..fa3b1f1a6 100644 --- a/roles/kubernetes-apps/meta/main.yml +++ b/roles/kubernetes-apps/meta/main.yml @@ -27,13 +27,6 @@ dependencies: - apps - registry - - role: kubernetes-apps/local_volume_provisioner - when: local_volume_provisioner_enabled - tags: - - apps - - local_volume_provisioner - - storage - - role: kubernetes-apps/cephfs_provisioner when: cephfs_provisioner_enabled tags: diff --git a/roles/kubernetes/node/templates/kubelet-container.j2 b/roles/kubernetes/node/templates/kubelet-container.j2 index 6549a7044..28a109ec1 100644 --- a/roles/kubernetes/node/templates/kubelet-container.j2 +++ b/roles/kubernetes/node/templates/kubelet-container.j2 @@ -27,7 +27,7 @@ -v {{kube_config_dir}}:{{kube_config_dir}}:ro \ -v /etc/os-release:/etc/os-release:ro \ {% if local_volume_provisioner_enabled == true %} - -v {{ local_volume_base_dir }}:{{ local_volume_base_dir }}:shared \ + -v {{ local_volume_provisioner_base_dir }}:{{ local_volume_provisioner_base_dir }}:shared \ {% endif %} {{ hyperkube_image_repo }}:{{ hyperkube_image_tag}} \ ./hyperkube kubelet \ diff --git a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 b/roles/kubernetes/node/templates/kubelet.rkt.service.j2 index e1406e7e1..80825fab3 100644 --- a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 +++ b/roles/kubernetes/node/templates/kubelet.rkt.service.j2 @@ -37,7 +37,7 @@ ExecStart=/usr/bin/rkt run \ --volume var-lib-kubelet,kind=host,source=/var/lib/kubelet,readOnly=false,recursive=true \ --volume var-log,kind=host,source=/var/log \ {% if local_volume_provisioner_enabled == true %} - --volume local-volume-base-dir,kind=host,source={{ local_volume_base_dir }},readOnly=false,recursive=true \ + --volume local-volume-provisioner-base-dir,kind=host,source={{ local_volume_provisioner_base_dir }},readOnly=false,recursive=true \ {% endif %} {% if kube_network_plugin in ["calico", "weave", "canal", "flannel", "contiv", "cilium"] %} --volume etc-cni,kind=host,source=/etc/cni,readOnly=true \ @@ -68,7 +68,7 @@ ExecStart=/usr/bin/rkt run \ --mount volume=var-log,target=/var/log \ --mount volume=hosts,target=/etc/hosts \ {% if local_volume_provisioner_enabled == true %} - --mount volume=local-volume-base-dir,target={{ local_volume_base_dir }} \ + --mount volume=local-volume-provisioner-base-dir,target={{ local_volume_provisioner_base_dir }} \ {% endif %} {% if kubelet_flexvolumes_plugins_dir is defined %} --mount volume=flexvolumes,target={{ kubelet_flexvolumes_plugins_dir }} \ diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml index 289065c71..76f9bfde4 100644 --- a/roles/kubernetes/preinstall/tasks/main.yml +++ b/roles/kubernetes/preinstall/tasks/main.yml @@ -60,7 +60,15 @@ - "{{ kube_config_dir }}/ssl" - "{{ kube_manifest_dir }}" - "{{ kube_script_dir }}" - - "{{ local_volume_base_dir }}" + +- name: Create local volume provisioner base dir + file: + path: "{{ local_volume_provisioner_base_dir }}" + state: directory + owner: kube + when: + - inventory_hostname in groups['k8s-cluster'] + - local_volume_provisioner_enabled == true - name: check cloud_provider value fail: diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index 6883370ee..d85265eca 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -166,15 +166,10 @@ helm_enabled: false istio_enabled: false registry_enabled: false enable_network_policy: false -local_volume_provisioner_enabled: false +local_volume_provisioner_enabled: "{{ local_volumes_enabled | default('false') }}" persistent_volumes_enabled: false cephfs_provisioner_enabled: false -# Base path for local volume provisioner addon -local_volume_base_dir: /mnt/disks -local_volume_mount_dir: /local-disks -local_volume_storage_class: local-storage - ## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461) # openstack_blockstorage_version: "v1/v2/auto (default)" ## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables. @@ -199,7 +194,11 @@ rbac_enabled: "{{ 'RBAC' in authorization_modes or kubeadm_enabled }}" ## List of key=value pairs that describe feature gates for ## the k8s cluster. -kube_feature_gates: ['Initializers={{ istio_enabled|string }}', 'PersistentLocalVolumes={{ local_volume_provisioner_enabled|string }}'] +kube_feature_gates: + - "Initializers={{ istio_enabled | string }}" + - "PersistentLocalVolumes={{ local_volume_provisioner_enabled | string }}" + - "VolumeScheduling={{ local_volume_provisioner_enabled | string }}" + - "MountPropagation={{ local_volume_provisioner_enabled | string }}" # Vault data dirs. vault_base_dir: /etc/vault From d4c61d262877dee443a7239878745078e0f2681c Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Mon, 19 Feb 2018 16:43:18 +0800 Subject: [PATCH 008/177] Fixup for gce_centos7-flannel-addons --- inventory/sample/group_vars/k8s-cluster.yml | 4 ++-- roles/kubernetes/preinstall/tasks/main.yml | 10 +--------- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml index 8ea6e27ca..af493beca 100644 --- a/inventory/sample/group_vars/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster.yml @@ -171,8 +171,8 @@ registry_enabled: false # Local volume provisioner deployment local_volume_provisioner_enabled: false # local_volume_provisioner_namespace: "{{ system_namespace }}" -# local_volume_provisioner_base_dir: /mnt/disks -# local_volume_provisioner_mount_dir: /local-disks +local_volume_provisioner_base_dir: /mnt/disks +local_volume_provisioner_mount_dir: /mnt/disks # local_volume_provisioner_storage_class: local-storage # CephFS provisioner deployment diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml index 76f9bfde4..24e839806 100644 --- a/roles/kubernetes/preinstall/tasks/main.yml +++ b/roles/kubernetes/preinstall/tasks/main.yml @@ -60,15 +60,7 @@ - "{{ kube_config_dir }}/ssl" - "{{ kube_manifest_dir }}" - "{{ kube_script_dir }}" - -- name: Create local volume provisioner base dir - file: - path: "{{ local_volume_provisioner_base_dir }}" - state: directory - owner: kube - when: - - inventory_hostname in groups['k8s-cluster'] - - local_volume_provisioner_enabled == true + - "{{ local_volume_provisioner_base_dir }}" - name: check cloud_provider value fail: From c20f38b89c8c67f6971e976488495375f09ee013 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Wed, 21 Feb 2018 14:41:57 +0300 Subject: [PATCH 009/177] retry unmount kubelet dirs --- roles/reset/tasks/main.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index f6714f680..88e9065c8 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -67,6 +67,10 @@ - name: reset | unmount kubelet dirs command: umount {{item}} with_items: '{{ mounted_dirs.stdout_lines }}' + register: umount_dir + retries: 4 + until: umount_dir.rc == 0 + delay: 5 tags: - mounts From 2eb57ee5cdf59b240c4b6884eb35556e2a2528c7 Mon Sep 17 00:00:00 2001 From: Dann Bohn Date: Wed, 21 Feb 2018 11:33:25 -0500 Subject: [PATCH 010/177] default kube_proxy_mode in kubernetes-defaults --- roles/kubespray-defaults/defaults/main.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index 6883370ee..e1340f9e7 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -12,9 +12,13 @@ kube_api_anonymous_auth: false # Default value, but will be set to true automatically if detected is_atomic: false + ## Change this to use another Kubernetes version, e.g. a current beta release kube_version: v1.9.2 +## Kube Proxy mode One of ['iptables','ipvs'] +kube_proxy_mode: iptables + # Set to true to allow pre-checks to fail and continue deployment ignore_assert_errors: false From 2d69b05c771a73b3fa493817a6cd0313557ca9d2 Mon Sep 17 00:00:00 2001 From: Dann Bohn Date: Wed, 21 Feb 2018 11:35:34 -0500 Subject: [PATCH 011/177] set local_release_dir in downloads to match others --- roles/download/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 406ec8b95..2dac5fb26 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -1,5 +1,5 @@ --- -local_release_dir: /tmp +local_release_dir: /tmp/releases # Used to only evaluate vars from download role skip_downloads: false From 87f33a4644c142e374cc28bfe5df645b2e81ca28 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Wed, 21 Feb 2018 18:16:32 +0300 Subject: [PATCH 012/177] Use CNI to assign kube_pods_subnet for calico Now calico can be deployed if there are other existing pools and not confuse IPAM and end up with pods in the wrong pools. --- roles/etcd/defaults/main.yml | 2 +- roles/network_plugin/calico/defaults/main.yml | 3 --- roles/network_plugin/calico/tasks/main.yml | 8 -------- .../calico/templates/cni-calico.conflist.j2 | 8 +++++--- 4 files changed, 6 insertions(+), 15 deletions(-) diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml index f394e41aa..4e122e719 100644 --- a/roles/etcd/defaults/main.yml +++ b/roles/etcd/defaults/main.yml @@ -32,7 +32,7 @@ etcd_memory_limit: "{% if ansible_memtotal_mb < 4096 %}512M{% else %}0{% endif % etcd_blkio_weight: 1000 -etcd_node_cert_hosts: "{{ groups['k8s-cluster'] | union(groups.get('calico-rr', [])) }}" +etcd_node_cert_hosts: "{{ groups['k8s-cluster'] | union(groups.get('calico-rr', [])) | union(groups.get('vault', [])) }}" etcd_compaction_retention: "8" diff --git a/roles/network_plugin/calico/defaults/main.yml b/roles/network_plugin/calico/defaults/main.yml index 902d01707..a44b3d315 100644 --- a/roles/network_plugin/calico/defaults/main.yml +++ b/roles/network_plugin/calico/defaults/main.yml @@ -16,9 +16,6 @@ etcd_cert_dir: /etc/ssl/etcd/ssl # Global as_num (/calico/bgp/v1/global/as_num) global_as_num: "64512" -# Set to true if you need to configure multiple pools (this is not common) -calico_ignore_extra_pools: false - # You can set MTU value here. If left undefined or empty, it will # not be specified in calico CNI config, so Calico will use built-in # defaults. The value should be a number, not a string. diff --git a/roles/network_plugin/calico/tasks/main.yml b/roles/network_plugin/calico/tasks/main.yml index b3cacaec4..46a136768 100644 --- a/roles/network_plugin/calico/tasks/main.yml +++ b/roles/network_plugin/calico/tasks/main.yml @@ -138,14 +138,6 @@ calico_pools: "{{ calico_pools_raw.stdout | from_json }}" run_once: true -- name: Calico | Check if calico pool is properly configured - fail: - msg: 'Only one network pool must be configured and it must be the subnet {{ kube_pods_subnet }}. - Please erase calico configuration and run the playbook again ("etcdctl rm --recursive /calico/v1/ipam/v4/pool")' - when: ( calico_pools['node']['nodes'] | length > 1 and not calico_ignore_extra_pools ) or - ( not calico_pools['node']['nodes'][0]['key'] | search(".*{{ kube_pods_subnet | ipaddr('network') }}.*") ) - run_once: true - - name: Calico | Set global as_num command: "{{ bin_dir}}/calicoctl config set asNumber {{ global_as_num }}" run_once: true diff --git a/roles/network_plugin/calico/templates/cni-calico.conflist.j2 b/roles/network_plugin/calico/templates/cni-calico.conflist.j2 index 32f2bfff6..6dd51e912 100644 --- a/roles/network_plugin/calico/templates/cni-calico.conflist.j2 +++ b/roles/network_plugin/calico/templates/cni-calico.conflist.j2 @@ -15,16 +15,18 @@ "etcd_ca_cert_file": "{{ etcd_cert_dir }}/ca.pem", "log_level": "info", "ipam": { - "type": "calico-ipam" + "type": "calico-ipam", + "assign_ipv4": "true", + "ipv4_pools": ["{{ kube_pods_subnet }}"] }, {% if enable_network_policy %} "policy": { "type": "k8s" }, - {% endif %} + {%- endif %} {% if calico_mtu is defined and calico_mtu is number %} "mtu": {{ calico_mtu }}, - {% endif %} + {%- endif %} "kubernetes": { "kubeconfig": "{{ kube_config_dir }}/node-kubeconfig.yaml" } From 2bd3776ddb15c01adea6c35b1da535ae3c15338c Mon Sep 17 00:00:00 2001 From: Nedim Haveric Date: Thu, 22 Feb 2018 14:00:32 +0100 Subject: [PATCH 013/177] fix apiserver manifest when disabling insecure_port --- .../master/templates/manifests/kube-apiserver.manifest.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index a6f76c6d6..2ec231f4c 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -123,7 +123,7 @@ spec: httpGet: host: 127.0.0.1 path: /healthz -{% if kube_apiserver_insecure_port == 0 %} +{% if kube_apiserver_insecure_port|int == 0 %} port: {{ kube_apiserver_port }} scheme: HTTPS {% else %} From 89fe6505f97fd38e61a87646787d3e2b59e42369 Mon Sep 17 00:00:00 2001 From: Drew Leske Date: Thu, 22 Feb 2018 22:41:19 -0800 Subject: [PATCH 014/177] Correct use of deprecated argument to TF/OpenStack module https://www.terraform.io/docs/providers/openstack/r/networking_router_v2.html#external_gateway --- contrib/terraform/openstack/modules/network/main.tf | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/contrib/terraform/openstack/modules/network/main.tf b/contrib/terraform/openstack/modules/network/main.tf index a5ef099ed..2c461c784 100644 --- a/contrib/terraform/openstack/modules/network/main.tf +++ b/contrib/terraform/openstack/modules/network/main.tf @@ -1,7 +1,7 @@ resource "openstack_networking_router_v2" "k8s" { - name = "${var.cluster_name}-router" - admin_state_up = "true" - external_gateway = "${var.external_net}" + name = "${var.cluster_name}-router" + admin_state_up = "true" + external_network_id = "${var.external_net}" } resource "openstack_networking_network_v2" "k8s" { From fe719c1bc1db4b0e2097a9802aa10c77baa14892 Mon Sep 17 00:00:00 2001 From: Drew Leske Date: Fri, 23 Feb 2018 12:08:45 -0800 Subject: [PATCH 015/177] Update OpenStack contrib to use per-cluster inventory layout Supports Kubespray workspace with multiple OpenStack-deployed k8s clusters. * Create sample inventory directory for template * Moved broken `group_vars` symlink to sample directory * Created sample cluster Terraform file * Updated documentation --- contrib/terraform/openstack/README.md | 202 ++++++++++-------- contrib/terraform/openstack/group_vars | 1 - .../openstack/inventory-sample/cluster.tf | 45 ++++ .../openstack/inventory-sample/group_vars | 1 + 4 files changed, 159 insertions(+), 90 deletions(-) delete mode 120000 contrib/terraform/openstack/group_vars create mode 100644 contrib/terraform/openstack/inventory-sample/cluster.tf create mode 120000 contrib/terraform/openstack/inventory-sample/group_vars diff --git a/contrib/terraform/openstack/README.md b/contrib/terraform/openstack/README.md index 6ff0860ca..a92693b97 100644 --- a/contrib/terraform/openstack/README.md +++ b/contrib/terraform/openstack/README.md @@ -17,32 +17,33 @@ to actually install kubernetes and stand up the cluster. ### Networking The configuration includes creating a private subnet with a router to the -external net. It will allocate floating-ips from a pool and assign them to the +external net. It will allocate floating IPs from a pool and assign them to the hosts where that makes sense. You have the option of creating bastion hosts -inside the private subnet to access the nodes there. +inside the private subnet to access the nodes there. Alternatively, a node with +a floating IP can be used as a jump host to nodes without. ### Kubernetes Nodes You can create many different kubernetes topologies by setting the number of different classes of hosts. For each class there are options for allocating -floating ip addresses or not. -- Master Nodes with etcd +floating IP addresses or not. +- Master nodes with etcd - Master nodes without etcd - Standalone etcd hosts - Kubernetes worker nodes -Note that the ansible script will report an invalid configuration if you wind up +Note that the Ansible script will report an invalid configuration if you wind up with an even number of etcd instances since that is not a valid configuration. ### Gluster FS -The terraform configuration supports provisioning of an optional GlusterFS +The Terraform configuration supports provisioning of an optional GlusterFS shared file system based on a separate set of VMs. To enable this, you need to -specify -- the number of gluster hosts +specify: +- the number of Gluster hosts (minimum 2) - Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks - Other properties related to provisioning the hosts Even if you are using Container Linux by CoreOS for your cluster, you will still -need the GlusterFS VMs to be based on either Debian or RedHat based images, +need the GlusterFS VMs to be based on either Debian or RedHat based images. Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through binaries available on hyperkube v1.4.3_coreos.0 or higher. @@ -50,9 +51,9 @@ binaries available on hyperkube v1.4.3_coreos.0 or higher. - [Install Terraform](https://www.terraform.io/intro/getting-started/install.html) - [Install Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html) -- you already have a suitable OS image in glance -- you already have a floating-ip pool created -- you have security-groups enabled +- you already have a suitable OS image in Glance +- you already have a floating IP pool created +- you have security groups enabled - you have a pair of keys generated that can be used to secure the new hosts ## Module Architecture @@ -67,7 +68,7 @@ any external references to the floating IP (e.g. DNS) that would otherwise have to be updated. You can force your existing IPs by modifying the compute variables in -`kubespray.tf` as +`kubespray.tf` as follows: ``` k8s_master_fips = ["151.101.129.67"] @@ -75,14 +76,25 @@ k8s_node_fips = ["151.101.129.68"] ``` ## Terraform -Terraform will be used to provision all of the OpenStack resources. It is also -used to deploy and provision the software requirements. +Terraform will be used to provision all of the OpenStack resources with base software as appropriate. -### Prep +### Configuration -#### OpenStack +#### Inventory files -No provider variables are hard coded inside `variables.tf` because Terraform +Create an inventory directory for your cluster by copying the existing sample and linking the `hosts` script (used to build the inventory based on Terraform state): + +```ShellSession +$ cp -LRp contrib/terraform/openstack/sample-inventory inventory/$CLUSTER +$ ln -s contrib/terraform/openstack/hosts inventory/$CLUSTER/ +$ cd inventory/$CLUSTER +``` + +This will be the base for subsequent Terraform commands. + +#### OpenStack access and credentials + +No provider variables are hardcoded inside `variables.tf` because Terraform supports various authentication method for OpenStack, between identity v2 and v3 API, `openrc` or `clouds.yaml`. @@ -90,15 +102,15 @@ These are examples and may vary depending on your OpenStack cloud provider, for an exhaustive list on how to authenticate on OpenStack with Terraform please read the [OpenStack provider documentation](https://www.terraform.io/docs/providers/openstack/). -##### Recommended method : clouds.yaml +##### Recommended method: clouds.yaml -Newer recommended authentication method is to use a `clouds.yaml` file that can be store in : +Newer recommended authentication method is to use a `clouds.yaml` file that can be store in: * `Current Directory` * `~/.config/openstack` * `/etc/openstack` -`clouds.yaml` : +`clouds.yaml`: ``` clouds: @@ -180,14 +192,11 @@ unset OS_PROJECT_DOMAIN_ID set OS_PROJECT_DOMAIN_NAME=Default ``` -### Terraform Variables +#### Cluster variables The construction of the cluster is driven by values found in [variables.tf](variables.tf). -The best way to set these values is to create a file in the project's root -directory called something like`my-terraform-vars.tfvars`. Many of the -variables are obvious. Here is a summary of some of the more interesting -ones: +For your cluster, edit `inventory/$CLUSTER/cluster.tf`. |Variable | Description | |---------|-------------| @@ -208,9 +217,9 @@ ones: |`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. | | `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks | -### Terraform files +#### Terraform state files -In the root folder, the following files might be created (either by Terraform +In the cluster's inventory folder, the following files might be created (either by Terraform or manually), to prevent you from pushing them accidentally they are in a `.gitignore` file in the `terraform/openstack` directory : @@ -221,49 +230,56 @@ or manually), to prevent you from pushing them accidentally they are in a You can still add them manually if you want to. -## Initializing Terraform +### Initialization -Before Terraform can operate on your cluster you need to install required -plugins. This is accomplished with the command +Before Terraform can operate on your cluster you need to install the required +plugins. This is accomplished as follows: -```bash +```ShellSession +$ cd inventory/$CLUSTER $ terraform init contrib/terraform/openstack ``` -## Provisioning Cluster with Terraform -You can apply the terraform config to your cluster with the following command -issued from the project's root directory -```bash -$ terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-terraform-vars.tfvars contrib/terraform/openstack +This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules. + +### Provisioning cluster +You can apply the Terraform configuration to your cluster with the following command +issued from your cluster's inventory directory (`inventory/$CLUSTER`): +```ShellSession +$ terraform apply -var-file=cluster.tf ../../contrib/terraform/openstack ``` if you chose to create a bastion host, this script will create -`contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for ansible to -be able to access your machines tunneling through the bastion's ip adress. If +`contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for Ansible to +be able to access your machines tunneling through the bastion's IP address. If you want to manually handle the ssh tunneling to these machines, please delete or move that file. If you want to use this, just leave it there, as ansible will pick it up automatically. +### Destroying cluster +You can destroy your new cluster with the following command issued from the cluster's inventory directory: -## Destroying Cluster with Terraform -You can destroy a config deployed to your cluster with the following command -issued from the project's root directory -```bash -$ terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-terraform-vars.tfvars contrib/terraform/openstack +```ShellSession +$ terraform destroy -var-file=cluster.tf ../../contrib/terraform/openstack ``` -## Debugging Cluster Provisioning +### Debugging You can enable debugging output from Terraform by setting -`OS_DEBUG` to 1 and`TF_LOG` to`DEBUG` before runing the terraform command +`OS_DEBUG` to 1 and`TF_LOG` to`DEBUG` before running the Terraform command. -## Terraform output +### Terraform output -Terraform can output useful values that need to be reused if you want to use Kubernetes OpenStack cloud provider with Neutron/Octavia LBaaS or Cinder persistent Volume provisioning: +Terraform can output values that are useful for configure Neutron/Octavia LBaaS or Cinder persistent volume provisioning as part of your Kubernetes deployment: - - `private_subnet_id`: the subnet where your instances are running, maps to `openstack_lbaas_subnet_id` - - `floating_network_id`: the network_id where the floating IP are provisioned, maps to `openstack_lbaas_floating_network_id` + - `private_subnet_id`: the subnet where your instances are running is used for `openstack_lbaas_subnet_id` + - `floating_network_id`: the network_id where the floating IP are provisioned is used for `openstack_lbaas_floating_network_id` + +## Ansible + +### Node access + +#### SSH -# Running the Ansible Script Ensure your local ssh-agent is running and your ssh key has been added. This step is required by the terraform provisioner: @@ -272,11 +288,22 @@ $ eval $(ssh-agent -s) $ ssh-add ~/.ssh/id_rsa ``` +If you have deployed and destroyed a previous iteration of your cluster, you will need to clear out any stale keys from your SSH "known hosts" file ( `~/.ssh/known_hosts`). -Make sure you can connect to the hosts: +#### Bastion host + +If you are not using a bastion host, but not all of your nodes have floating IPs, create a file `inventory/$CLUSTER/group_vars/no-floating.yml` with the following content. Use one of your nodes with a floating IP (this should have been output at the end of the Terraform step) and the appropriate user for that OS, or if you have another jump host, use that. ``` -$ ansible -i contrib/terraform/openstack/hosts -m ping all +ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@MASTER_IP"' +``` + +#### Test access + +Make sure you can connect to the hosts. Note that Container Linux by CoreOS will have a state `FAILED` due to Python not being present. This is okay, because Python will be installed during bootstrapping, so long as the hosts are not `UNREACHABLE`. + +``` +$ ansible -i inventory/$CLUSTER/hosts -m ping all example-k8s_node-1 | SUCCESS => { "changed": false, "ping": "pong" @@ -291,21 +318,17 @@ example-k8s-master-1 | SUCCESS => { } ``` -if you are deploying a system that needs bootstrapping, like Container Linux by -CoreOS, these might have a state`FAILED` due to Container Linux by CoreOS not -having python. As long as the state is not`UNREACHABLE`, this is fine. +If it fails try to connect manually via SSH. It could be something as simple as a stale host key. -if it fails try to connect manually via SSH ... it could be something as simple as a stale host key. +### Configure cluster variables -## Configure Cluster variables - -Edit `inventory/sample/group_vars/all.yml`: -- Set variable **bootstrap_os** according selected image +Edit `inventory/$CLUSTER/group_vars/all.yml`: +- Set variable **bootstrap_os** appropriately for your desired image: ``` # Valid bootstrap options (required): ubuntu, coreos, centos, none bootstrap_os: coreos ``` -- **bin_dir** +- **bin_dir**: ``` # Directory where the binaries will be installed # Default: @@ -313,20 +336,19 @@ bootstrap_os: coreos # For Container Linux by CoreOS: bin_dir: /opt/bin ``` -- and **cloud_provider** +- and **cloud_provider**: ``` cloud_provider: openstack ``` -Edit `inventory/sample/group_vars/k8s-cluster.yml`: -- Set variable **kube_network_plugin** according selected networking +Edit `inventory/$CLUSTER/group_vars/k8s-cluster.yml`: +- Set variable **kube_network_plugin** to your desired networking plugin. + - **flannel** works out-of-the-box + - **calico** requires [configuring OpenStack Neutron ports](/docs/openstack.md) to allow service and pod subnets ``` # Choose network plugin (calico, weave or flannel) # Can also be set to 'cloud', which lets the cloud provider setup appropriate routing kube_network_plugin: flannel ``` -> flannel works out-of-the-box - -> calico requires allowing service's and pod's subnets on according OpenStack Neutron ports - Set variable **resolvconf_mode** ``` # Can be docker_dns, host_resolvconf or none @@ -336,18 +358,19 @@ kube_network_plugin: flannel resolvconf_mode: host_resolvconf ``` -For calico configure OpenStack Neutron ports: [OpenStack](/docs/openstack.md) - -## Deploy kubernetes: +### Deploy Kubernetes ``` -$ ansible-playbook --become -i contrib/terraform/openstack/hosts cluster.yml +$ ansible-playbook --become -i inventory/$CLUSTER/hosts cluster.yml ``` -## Set up local kubectl -1. Install kubectl on your workstation: -[Install and Set Up kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) -2. Add route to internal IP of master node (if needed): +This will take some time as there are many tasks to run. + +## Kubernetes + +### Set up kubectl +1. [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your workstation +2. Add a route to the internal IP of a master node (if needed): ``` sudo route add [master-internal-ip] gw [router-ip] ``` @@ -355,28 +378,28 @@ or ``` sudo route add -net [internal-subnet]/24 gw [router-ip] ``` -3. List Kubernetes certs&keys: +3. List Kubernetes certificates & keys: ``` ssh [os-user]@[master-ip] sudo ls /etc/kubernetes/ssl/ ``` -4. Get admin's certs&key: +4. Get `admin`'s certificates and keys: ``` ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1-key.pem > admin-key.pem ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1.pem > admin.pem ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/ca.pem > ca.pem ``` 5. Configure kubectl: -``` -kubectl config set-cluster default-cluster --server=https://[master-internal-ip]:6443 \ +```ShellSession +$ kubectl config set-cluster default-cluster --server=https://[master-internal-ip]:6443 \ --certificate-authority=ca.pem -kubectl config set-credentials default-admin \ +$ kubectl config set-credentials default-admin \ --certificate-authority=ca.pem \ --client-key=admin-key.pem \ --client-certificate=admin.pem -kubectl config set-context default-system --cluster=default-cluster --user=default-admin -kubectl config use-context default-system +$ kubectl config set-context default-system --cluster=default-cluster --user=default-admin +$ kubectl config use-context default-system ``` 7. Check it: ``` @@ -393,14 +416,15 @@ You can tell kubectl to ignore this condition by adding the ## GlusterFS GlusterFS is not deployed by the standard`cluster.yml` playbook, see the -[glusterfs playbook documentation](../../network-storage/glusterfs/README.md) +[GlusterFS playbook documentation](../../network-storage/glusterfs/README.md) for instructions. -Basically you will install gluster as -```bash -$ ansible-playbook --become -i contrib/terraform/openstack/hosts ./contrib/network-storage/glusterfs/glusterfs.yml +Basically you will install Gluster as +```ShellSession +$ ansible-playbook --become -i inventory/$CLUSTER/hosts ./contrib/network-storage/glusterfs/glusterfs.yml ``` -# What's next -[Start Hello Kubernetes Service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/) +## What's next + +Try out your new Kubernetes cluster with the [Hello Kubernetes service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/). diff --git a/contrib/terraform/openstack/group_vars b/contrib/terraform/openstack/group_vars deleted file mode 120000 index d64da8dc6..000000000 --- a/contrib/terraform/openstack/group_vars +++ /dev/null @@ -1 +0,0 @@ -../../../inventory/group_vars \ No newline at end of file diff --git a/contrib/terraform/openstack/inventory-sample/cluster.tf b/contrib/terraform/openstack/inventory-sample/cluster.tf new file mode 100644 index 000000000..7830d2159 --- /dev/null +++ b/contrib/terraform/openstack/inventory-sample/cluster.tf @@ -0,0 +1,45 @@ +# your Kubernetes cluster name here +cluster_name = "i-didnt-read-the-docs" + +# SSH key to use for access to nodes +public_key_path = "~/.ssh/id_rsa.pub" + +# image to use for bastion, masters, standalone etcd instances, and nodes +image = "" +# user on the node (ex. core on Container Linux, ubuntu on Ubuntu, etc.) +ssh_user = "" + +# 0|1 bastion nodes +number_of_bastions = 0 +#flavor_bastion = "" + +# standalone etcds +number_of_etcd = 0 + +# masters +number_of_k8s_masters = 1 +number_of_k8s_masters_no_etcd = 0 +number_of_k8s_masters_no_floating_ip = 0 +number_of_k8s_masters_no_floating_ip_no_etcd = 0 +flavor_k8s_master = "" + +# nodes +number_of_k8s_nodes = 2 +number_of_k8s_nodes_no_floating_ip = 4 +#flavor_k8s_node = "" + +# GlusterFS +# either 0 or more than one +#number_of_gfs_nodes_no_floating_ip = 0 +#gfs_volume_size_in_gb = 150 +# Container Linux does not support GlusterFS +#image_gfs = "" +# May be different from other nodes +#ssh_user_gfs = "ubuntu" +#flavor_gfs_node = "" + +# networking +network_name = "" +external_net = "" +floatingip_pool = "" + diff --git a/contrib/terraform/openstack/inventory-sample/group_vars b/contrib/terraform/openstack/inventory-sample/group_vars new file mode 120000 index 000000000..373595823 --- /dev/null +++ b/contrib/terraform/openstack/inventory-sample/group_vars @@ -0,0 +1 @@ +../../../../inventory/sample/group_vars \ No newline at end of file From e2c5a3895b5d93e002e7fdfd4c0d6cb5a1934380 Mon Sep 17 00:00:00 2001 From: Drew Leske Date: Fri, 23 Feb 2018 14:39:31 -0800 Subject: [PATCH 016/177] Rename sample inventory directory to be less awkward --- .../openstack/{inventory-sample => sample-inventory}/cluster.tf | 0 .../openstack/{inventory-sample => sample-inventory}/group_vars | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename contrib/terraform/openstack/{inventory-sample => sample-inventory}/cluster.tf (100%) rename contrib/terraform/openstack/{inventory-sample => sample-inventory}/group_vars (100%) diff --git a/contrib/terraform/openstack/inventory-sample/cluster.tf b/contrib/terraform/openstack/sample-inventory/cluster.tf similarity index 100% rename from contrib/terraform/openstack/inventory-sample/cluster.tf rename to contrib/terraform/openstack/sample-inventory/cluster.tf diff --git a/contrib/terraform/openstack/inventory-sample/group_vars b/contrib/terraform/openstack/sample-inventory/group_vars similarity index 100% rename from contrib/terraform/openstack/inventory-sample/group_vars rename to contrib/terraform/openstack/sample-inventory/group_vars From 66bd57058486cdbeaf1560d937abab4ec3d1a7ce Mon Sep 17 00:00:00 2001 From: Drew Leske Date: Fri, 23 Feb 2018 15:05:29 -0800 Subject: [PATCH 017/177] Update README.md with minor fixes and cleanup --- contrib/terraform/openstack/README.md | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/contrib/terraform/openstack/README.md b/contrib/terraform/openstack/README.md index a92693b97..b686fa51e 100644 --- a/contrib/terraform/openstack/README.md +++ b/contrib/terraform/openstack/README.md @@ -34,7 +34,7 @@ floating IP addresses or not. Note that the Ansible script will report an invalid configuration if you wind up with an even number of etcd instances since that is not a valid configuration. -### Gluster FS +### GlusterFS The Terraform configuration supports provisioning of an optional GlusterFS shared file system based on a separate set of VMs. To enable this, you need to specify: @@ -95,18 +95,19 @@ This will be the base for subsequent Terraform commands. #### OpenStack access and credentials No provider variables are hardcoded inside `variables.tf` because Terraform -supports various authentication method for OpenStack, between identity v2 and -v3 API, `openrc` or `clouds.yaml`. +supports various authentication methods for OpenStack: the older script and +environment method (using `openrc`) as well as a newer declarative method, and +different OpenStack environments may support Identity API version 2 or 3. These are examples and may vary depending on your OpenStack cloud provider, for an exhaustive list on how to authenticate on OpenStack with Terraform please read the [OpenStack provider documentation](https://www.terraform.io/docs/providers/openstack/). -##### Recommended method: clouds.yaml +##### Declarative method (recommended) -Newer recommended authentication method is to use a `clouds.yaml` file that can be store in: +The recommended authentication method is to describe credentials in a YAML file `clouds.yaml` that can be stored in: -* `Current Directory` +* the current directory * `~/.config/openstack` * `/etc/openstack` @@ -128,18 +129,18 @@ clouds: ``` If you have multiple clouds defined in your `clouds.yaml` file you can choose -the one you want to use with the environment variable `OS_CLOUD` : +the one you want to use with the environment variable `OS_CLOUD`: ``` export OS_CLOUD=mycloud ``` -##### Deprecated method : openrc +##### Openrc method (deprecated) When using classic environment variables, Terraform uses default `OS_*` -environment variables : +environment variables: -With identity v2 : +With identity v2: ``` source openrc @@ -176,7 +177,7 @@ OS_USER_DOMAIN_NAME=Default ``` Terraform does not support a mix of DomainName and DomainID, choose one or the -other : +other: ``` * provider.openstack: You must provide exactly one of DomainID or DomainName to authenticate by Username From b6698e686a50067a06c7acacafa5f7238fdbb3bd Mon Sep 17 00:00:00 2001 From: Drew Leske Date: Fri, 23 Feb 2018 16:54:13 -0800 Subject: [PATCH 018/177] Update README.md with minor fixes/clarifications --- contrib/terraform/openstack/README.md | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/contrib/terraform/openstack/README.md b/contrib/terraform/openstack/README.md index b686fa51e..ed11bef1e 100644 --- a/contrib/terraform/openstack/README.md +++ b/contrib/terraform/openstack/README.md @@ -86,8 +86,8 @@ Create an inventory directory for your cluster by copying the existing sample an ```ShellSession $ cp -LRp contrib/terraform/openstack/sample-inventory inventory/$CLUSTER -$ ln -s contrib/terraform/openstack/hosts inventory/$CLUSTER/ $ cd inventory/$CLUSTER +$ ln -s ../../contrib/terraform/openstack/hosts ``` This will be the base for subsequent Terraform commands. @@ -138,7 +138,8 @@ export OS_CLOUD=mycloud ##### Openrc method (deprecated) When using classic environment variables, Terraform uses default `OS_*` -environment variables: +environment variables. A script suitable for your environment may be available +from Horizon under *Project* -> *Compute* -> *Access & Security* -> *API Access*. With identity v2: @@ -157,7 +158,7 @@ OS_INTERFACE=public OS_IDENTITY_API_VERSION=2 ``` -With identity v3 : +With identity v3: ``` source openrc @@ -238,7 +239,7 @@ plugins. This is accomplished as follows: ```ShellSession $ cd inventory/$CLUSTER -$ terraform init contrib/terraform/openstack +$ terraform init ../../contrib/terraform/openstack ``` This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules. @@ -264,6 +265,11 @@ You can destroy your new cluster with the following command issued from the clus $ terraform destroy -var-file=cluster.tf ../../contrib/terraform/openstack ``` +If you've started the Ansible run, it may also be a good idea to do some manual cleanup: + +* remove SSH keys from the destroyed cluster from your `~/.ssh/known_hosts` file +* clean up any temporary cache files: `rm /tmp/$CLUSTER-*` + ### Debugging You can enable debugging output from Terraform by setting `OS_DEBUG` to 1 and`TF_LOG` to`DEBUG` before running the Terraform command. From 8875e25fe95a7a81dca2d94645ac58bb805ea3c0 Mon Sep 17 00:00:00 2001 From: David Chang Date: Tue, 27 Feb 2018 14:34:58 +0800 Subject: [PATCH 019/177] Replace tab with space. Remove redundant spaces --- inventory/sample/group_vars/all.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index 2c460e28f..e8d5395b1 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -17,7 +17,7 @@ bin_dir: /usr/local/bin ### LOADBALANCING AND ACCESS MODES ## Enable multiaccess to configure etcd clients to access all of the etcd members directly ## as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers. -## This may be the case if clients support and loadbalance multiple etcd servers natively. +## This may be the case if clients support and loadbalance multiple etcd servers natively. #etcd_multiaccess: true ### ETCD: disable peer client cert authentication. @@ -42,7 +42,7 @@ bin_dir: /usr/local/bin ## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes ## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel ## modules. -# kubelet_load_modules: false +#kubelet_load_modules: false ## Internal network total size. This is the prefix of the ## entire network. Must be unused in your environment. @@ -111,7 +111,7 @@ bin_dir: /usr/local/bin ## Default packages to install within the cluster, f.e: #kpm_packages: -# - name: kube-system/grafana +# - name: kube-system/grafana ## Certificate Management ## This setting determines whether certs are generated via scripts or whether a @@ -129,4 +129,4 @@ bin_dir: /usr/local/bin #etcd_metrics: basic # The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable. -# kube_read_only_port: 10255 +#kube_read_only_port: 10255 From b7e06085c70c8bd428fb3ef8b6783791ba989fe8 Mon Sep 17 00:00:00 2001 From: RongZhang Date: Tue, 27 Feb 2018 05:31:59 -0600 Subject: [PATCH 020/177] Upgrade to Kubernetes v1.9.3 (#2323) Upgrade to Kubernetes v1.9.3 --- README.md | 2 +- inventory/sample/group_vars/k8s-cluster.yml | 2 +- inventory/sample/hosts.ini | 2 +- roles/download/defaults/main.yml | 4 ++-- roles/kubespray-defaults/defaults/main.yaml | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index df80c27ff..e5bc77a61 100644 --- a/README.md +++ b/README.md @@ -75,7 +75,7 @@ Note: Upstart/SysV init based OS types are not supported. Versions of supported components -------------------------------- -- [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.2 +- [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.3 - [etcd](https://github.com/coreos/etcd/releases) v3.2.4 - [flanneld](https://github.com/coreos/flannel/releases) v0.8.0 - [calico](https://docs.projectcalico.org/v2.5/releases/) v2.5.0 diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml index cf4f08a89..4a79a352f 100644 --- a/inventory/sample/group_vars/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster.yml @@ -20,7 +20,7 @@ kube_users_dir: "{{ kube_config_dir }}/users" kube_api_anonymous_auth: true ## Change this to use another Kubernetes version, e.g. a current beta release -kube_version: v1.9.2 +kube_version: v1.9.3 # Where the binaries will be downloaded. # Note: ensure that you've enough disk space (about 1G) diff --git a/inventory/sample/hosts.ini b/inventory/sample/hosts.ini index f8c567b34..13cc3612e 100644 --- a/inventory/sample/hosts.ini +++ b/inventory/sample/hosts.ini @@ -28,4 +28,4 @@ # [k8s-cluster:children] # kube-node -# kube-master \ No newline at end of file +# kube-master diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 406ec8b95..ad0b197fa 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -24,7 +24,7 @@ download_always_pull: False download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube-master'][0]}}{% endif %}" # Versions -kube_version: v1.9.2 +kube_version: v1.9.3 kubeadm_version: "{{ kube_version }}" etcd_version: v3.2.4 # TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults @@ -50,7 +50,7 @@ vault_download_url: "https://releases.hashicorp.com/vault/{{ vault_version }}/va # Checksums istioctl_checksum: fd703063c540b8c0ab943f478c05ab257d88ae27224c746a27d0526ddbf7c370 -kubeadm_checksum: 560b44a2b91747f4fb64ac8754fcf65db9a39a84c6b54d4e6483400ac6c674fc +kubeadm_checksum: 9ebbb1fbf3a9e72d7df3f0dc02500dc8f957f39489b22cf577498c8a7c6b39b1 vault_binary_checksum: 3c4d70ba71619a43229e65c67830e30e050eab7a81ac6b28325ff707e5914188 # Containers diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index 6883370ee..525dd5a1b 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -13,7 +13,7 @@ kube_api_anonymous_auth: false is_atomic: false ## Change this to use another Kubernetes version, e.g. a current beta release -kube_version: v1.9.2 +kube_version: v1.9.3 # Set to true to allow pre-checks to fail and continue deployment ignore_assert_errors: false From 128d3ef94cc3bffe21fdd3f5035064c71353fafc Mon Sep 17 00:00:00 2001 From: RongZhang Date: Tue, 27 Feb 2018 07:32:20 -0600 Subject: [PATCH 021/177] Fix run kubectl error (#2199) * Fix run kubectl error Fix run kubectl error when first master doesn't work * if access_ip is define use first_kube_master else different master use a different ip * Delete set first_kube_master and use kube_apiserver_access_address --- roles/kubernetes/client/tasks/main.yml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/roles/kubernetes/client/tasks/main.yml b/roles/kubernetes/client/tasks/main.yml index e20a71eb8..3b66c5e1c 100644 --- a/roles/kubernetes/client/tasks/main.yml +++ b/roles/kubernetes/client/tasks/main.yml @@ -1,15 +1,11 @@ --- -- name: Set first kube master - set_fact: - first_kube_master: "{{ hostvars[groups['kube-master'][0]]['access_ip'] | default(hostvars[groups['kube-master'][0]]['ip'] | default(hostvars[groups['kube-master'][0]]['ansible_default_ipv4']['address'])) }}" - - name: Set external kube-apiserver endpoint set_fact: external_apiserver_endpoint: >- {%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%} https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }} {%- else -%} - https://{{ first_kube_master }}:{{ kube_apiserver_port }} + https://{{ kube_apiserver_address }}:{{ kube_apiserver_port }} {%- endif -%} tags: - facts From 89ade65ad63e151cdc10aa7cbd4adfbff6298aca Mon Sep 17 00:00:00 2001 From: Brad Beam Date: Tue, 27 Feb 2018 08:34:07 -0600 Subject: [PATCH 022/177] Fixing etcd certs for calico rr (#2374) --- roles/network_plugin/calico/rr/tasks/main.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/roles/network_plugin/calico/rr/tasks/main.yml b/roles/network_plugin/calico/rr/tasks/main.yml index 5b893f38e..491065360 100644 --- a/roles/network_plugin/calico/rr/tasks/main.yml +++ b/roles/network_plugin/calico/rr/tasks/main.yml @@ -48,7 +48,10 @@ - name: Calico-rr | Configure route reflector command: |- - {{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} \ + {{ bin_dir }}/etcdctl \ + --peers={{ etcd_access_addresses }} \ + --cert-file {{ etcd_cert_dir }}/node-{{ groups['etcd'][0] }}.pem \ + --key-file {{ etcd_cert_dir }}/node-{{ groups['etcd'][0] }}-key.pem \ set /calico/bgp/v1/rr_v4/{{ rr_ip }} \ '{ "ip": "{{ rr_ip }}", @@ -57,9 +60,6 @@ retries: 4 delay: "{{ retry_stagger | random + 3 }}" delegate_to: "{{groups['etcd'][0]}}" - environment: - ETCDCTL_CERT: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" - meta: flush_handlers From bb469005b275b5573137d7276b25eabc8bbe53ca Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Thu, 15 Feb 2018 20:18:12 +0300 Subject: [PATCH 023/177] Add pre-upgrade task for moving credentials file --- .gitignore | 1 + roles/kubernetes/preinstall/tasks/main.yml | 6 ++++++ .../preinstall/tasks/pre_upgrade.yml | 21 +++++++++++++++++++ 3 files changed, 28 insertions(+) create mode 100644 roles/kubernetes/preinstall/tasks/pre_upgrade.yml diff --git a/.gitignore b/.gitignore index 66c9b4867..1e8b17188 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ .vagrant *.retry inventory/vagrant_ansible_inventory +inventory/credentials/ inventory/group_vars/fake_hosts.yml inventory/host_vars/ temp diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml index 289065c71..95a10704f 100644 --- a/roles/kubernetes/preinstall/tasks/main.yml +++ b/roles/kubernetes/preinstall/tasks/main.yml @@ -3,6 +3,7 @@ tags: - asserts + - name: Force binaries directory for Container Linux by CoreOS set_fact: bin_dir: "/opt/bin" @@ -39,6 +40,11 @@ tags: - facts +- import_tasks: pre_upgrade.yml + tags: + - upgrade + + - name: Create kubernetes directories file: path: "{{ item }}" diff --git a/roles/kubernetes/preinstall/tasks/pre_upgrade.yml b/roles/kubernetes/preinstall/tasks/pre_upgrade.yml new file mode 100644 index 000000000..4cfb79593 --- /dev/null +++ b/roles/kubernetes/preinstall/tasks/pre_upgrade.yml @@ -0,0 +1,21 @@ +--- +- name: "Pre-upgrade | check if old credential dir exists" + stat: + path: "{{ inventory_dir }}/../credentials" + register: old_credential_dir + delegate_to: localhost + +- name: "Pre-upgrade | check if new credential dir exists" + stat: + path: "{{ inventory_dir }}/credentials" + register: new_credential_dir + delegate_to: localhost + when: old_credential_dir.stat.exists + +- name: "Pre-upgrade | move data from old credential dir to new" + command: mv {{ inventory_dir }}/../credentials {{ inventory_dir }}/credentials + args: + creates: "{{ inventory_dir }}/credentials" + when: old_credential_dir.stat.exists and not new_credential_dir.stat.exists + delegate_to: localhost + From bc0fc5df98f49a80f4e38b2880b6e11d9a651dde Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Tue, 27 Feb 2018 22:23:51 +0300 Subject: [PATCH 024/177] Use node cert for etcd tasks instead of delegating to first etcd (#2386) For etcdctl commands, use admin cert instead of node because this file doesn't exist on etcd only hosts. --- roles/network_plugin/calico/rr/tasks/main.yml | 4 ++-- roles/network_plugin/calico/tasks/main.yml | 15 ++++++--------- roles/network_plugin/canal/tasks/main.yml | 4 ++-- 3 files changed, 10 insertions(+), 13 deletions(-) diff --git a/roles/network_plugin/calico/rr/tasks/main.yml b/roles/network_plugin/calico/rr/tasks/main.yml index 491065360..02cfce152 100644 --- a/roles/network_plugin/calico/rr/tasks/main.yml +++ b/roles/network_plugin/calico/rr/tasks/main.yml @@ -50,8 +50,8 @@ command: |- {{ bin_dir }}/etcdctl \ --peers={{ etcd_access_addresses }} \ - --cert-file {{ etcd_cert_dir }}/node-{{ groups['etcd'][0] }}.pem \ - --key-file {{ etcd_cert_dir }}/node-{{ groups['etcd'][0] }}-key.pem \ + --cert-file {{ etcd_cert_dir }}/admin-{{ groups['etcd'][0] }}.pem \ + --key-file {{ etcd_cert_dir }}/admin-{{ groups['etcd'][0] }}-key.pem \ set /calico/bgp/v1/rr_v4/{{ rr_ip }} \ '{ "ip": "{{ rr_ip }}", diff --git a/roles/network_plugin/calico/tasks/main.yml b/roles/network_plugin/calico/tasks/main.yml index f3072d388..ccf54de4a 100644 --- a/roles/network_plugin/calico/tasks/main.yml +++ b/roles/network_plugin/calico/tasks/main.yml @@ -83,26 +83,24 @@ uri: url: https://localhost:2379/health validate_certs: no - client_cert: "{{ etcd_cert_dir }}/node-{{ groups['etcd'][0] }}.pem" - client_key: "{{ etcd_cert_dir }}/node-{{ groups['etcd'][0] }}-key.pem" + client_cert: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" + client_key: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" register: result until: result.status == 200 or result.status == 401 retries: 10 delay: 5 - delegate_to: "{{groups['etcd'][0]}}" run_once: true - name: Calico | Check if calico network pool has already been configured command: |- curl \ --cacert {{ etcd_cert_dir }}/ca.pem \ - --cert {{ etcd_cert_dir}}/admin-{{ groups['etcd'][0] }}.pem \ - --key {{ etcd_cert_dir }}/admin-{{ groups['etcd'][0] }}-key.pem \ + --cert {{ etcd_cert_dir}}/node-{{ inventory_hostname }}.pem \ + --key {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem \ https://localhost:2379/v2/keys/calico/v1/ipam/v4/pool register: calico_conf retries: 4 delay: "{{ retry_stagger | random + 3 }}" - delegate_to: "{{groups['etcd'][0]}}" run_once: true changed_when: false @@ -125,13 +123,12 @@ command: |- curl \ --cacert {{ etcd_cert_dir }}/ca.pem \ - --cert {{ etcd_cert_dir}}/admin-{{ groups['etcd'][0] }}.pem \ - --key {{ etcd_cert_dir }}/admin-{{ groups['etcd'][0] }}-key.pem \ + --cert {{ etcd_cert_dir}}/node-{{ inventory_hostname }}.pem \ + --key {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem \ https://localhost:2379/v2/keys/calico/v1/ipam/v4/pool register: calico_pools_raw retries: 4 delay: "{{ retry_stagger | random + 3 }}" - delegate_to: "{{groups['etcd'][0]}}" run_once: true - set_fact: diff --git a/roles/network_plugin/canal/tasks/main.yml b/roles/network_plugin/canal/tasks/main.yml index d42f4ec56..a42c2cfa7 100644 --- a/roles/network_plugin/canal/tasks/main.yml +++ b/roles/network_plugin/canal/tasks/main.yml @@ -35,8 +35,8 @@ changed_when: false run_once: true environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ groups['etcd'][0] }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ groups['etcd'][0] }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ groups['etcd'][0] }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ groups['etcd'][0] }}-key.pem" - name: Canal | Create canal node manifests template: From 977e7ae105585abfb9c4b0c33dd62cd9892ac24c Mon Sep 17 00:00:00 2001 From: Dmitry Vlasov Date: Wed, 21 Feb 2018 23:34:25 +0300 Subject: [PATCH 025/177] remove obsolete init image, bump dashboard version 1.8.1 -> 1.8.3 --- roles/kubernetes-apps/ansible/defaults/main.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml index dc29e33e9..350f663a1 100644 --- a/roles/kubernetes-apps/ansible/defaults/main.yml +++ b/roles/kubernetes-apps/ansible/defaults/main.yml @@ -41,9 +41,7 @@ netchecker_server_memory_requests: 64M # Dashboard dashboard_enabled: true dashboard_image_repo: gcr.io/google_containers/kubernetes-dashboard-amd64 -dashboard_image_tag: v1.8.1 -dashboard_init_image_repo: gcr.io/google_containers/kubernetes-dashboard-init-amd64 -dashboard_init_image_tag: v1.0.1 +dashboard_image_tag: v1.8.3 # Limits for dashboard dashboard_cpu_limit: 100m From a40d9f3c726f7be97f75ca7c7201d346fcb603c5 Mon Sep 17 00:00:00 2001 From: Andrew Greenwood Date: Wed, 28 Feb 2018 07:36:51 -0500 Subject: [PATCH 026/177] Document a silent killer... (#2373) Adding this into the default example inventory so it has less of a chance of biting others after weeks of random failures (as etcd does not express that it has run out of RAM it just stalls).. 512MB was not enough for us to run one of our products. --- inventory/sample/group_vars/all.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index e8d5395b1..c107b049f 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -128,5 +128,9 @@ bin_dir: /usr/local/bin ## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics. #etcd_metrics: basic +## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing. +## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM. +#etcd_memory_limit: "512M" + # The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable. #kube_read_only_port: 10255 From 2257dc9baa5aad4a7422941dc808ac6a065ea3ca Mon Sep 17 00:00:00 2001 From: Miouge1 Date: Wed, 28 Feb 2018 16:29:38 +0100 Subject: [PATCH 027/177] Install latest version of Helm --- roles/download/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index ad0b197fa..f18dad0e8 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -122,7 +122,7 @@ kibana_version: "v4.6.1" kibana_image_repo: "gcr.io/google_containers/kibana" kibana_image_tag: "{{ kibana_version }}" -helm_version: "v2.7.2" +helm_version: "v2.8.1" helm_image_repo: "lachlanevenson/k8s-helm" helm_image_tag: "{{ helm_version }}" tiller_image_repo: "gcr.io/kubernetes-helm/tiller" From 6b80ac65006f378cebbcaeca0e72df01c475c44d Mon Sep 17 00:00:00 2001 From: Simon Li Date: Fri, 9 Feb 2018 22:41:31 +0000 Subject: [PATCH 028/177] Fix indexing of supplementary DNS in openssl.conf --- roles/kubernetes/secrets/templates/openssl.conf.j2 | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/roles/kubernetes/secrets/templates/openssl.conf.j2 b/roles/kubernetes/secrets/templates/openssl.conf.j2 index a214739c9..adc875ba6 100644 --- a/roles/kubernetes/secrets/templates/openssl.conf.j2 +++ b/roles/kubernetes/secrets/templates/openssl.conf.j2 @@ -15,9 +15,10 @@ DNS.5 = localhost {% for host in groups['kube-master'] %} DNS.{{ 5 + loop.index }} = {{ host }} {% endfor %} +{% set idns = groups['kube-master'] | length | int + 5 %} {% if loadbalancer_apiserver is defined %} -{% set idx = groups['kube-master'] | length | int + 5 + 1 %} -DNS.{{ idx | string }} = {{ apiserver_loadbalancer_domain_name }} +{% set idns = idns + 1 %} +DNS.{{ idns | string }} = {{ apiserver_loadbalancer_domain_name }} {% endif %} {% for host in groups['kube-master'] %} IP.{{ 2 * loop.index - 1 }} = {{ hostvars[host]['access_ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }} @@ -36,7 +37,7 @@ IP.{{ idx + 1 }} = 127.0.0.1 {% if addr | ipaddr %} IP.{{ is + loop.index }} = {{ addr }} {% else %} -DNS.{{ is + loop.index }} = {{ addr }} +DNS.{{ idns + loop.index }} = {{ addr }} {% endif %} {% endfor %} {% endif %} From ad89d1c87698ef24b0a0f7c61b1cb2ef8979c92c Mon Sep 17 00:00:00 2001 From: Brad Beam Date: Thu, 22 Feb 2018 19:32:19 -0600 Subject: [PATCH 029/177] Update pre_upgrade.yml --- roles/kubernetes/preinstall/tasks/main.yml | 9 ++++----- roles/kubernetes/preinstall/tasks/pre_upgrade.yml | 15 ++++++++------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml index 95a10704f..34ad9af34 100644 --- a/roles/kubernetes/preinstall/tasks/main.yml +++ b/roles/kubernetes/preinstall/tasks/main.yml @@ -3,6 +3,10 @@ tags: - asserts +# This is run before bin_dir is pinned because these tasks are run on localhost +- import_tasks: pre_upgrade.yml + tags: + - upgrade - name: Force binaries directory for Container Linux by CoreOS set_fact: @@ -40,11 +44,6 @@ tags: - facts -- import_tasks: pre_upgrade.yml - tags: - - upgrade - - - name: Create kubernetes directories file: path: "{{ item }}" diff --git a/roles/kubernetes/preinstall/tasks/pre_upgrade.yml b/roles/kubernetes/preinstall/tasks/pre_upgrade.yml index 4cfb79593..91bab1f52 100644 --- a/roles/kubernetes/preinstall/tasks/pre_upgrade.yml +++ b/roles/kubernetes/preinstall/tasks/pre_upgrade.yml @@ -1,21 +1,22 @@ --- - name: "Pre-upgrade | check if old credential dir exists" - stat: + local_action: + module: stat path: "{{ inventory_dir }}/../credentials" register: old_credential_dir - delegate_to: localhost + become: no - name: "Pre-upgrade | check if new credential dir exists" - stat: + local_action: + module: stat path: "{{ inventory_dir }}/credentials" register: new_credential_dir - delegate_to: localhost + become: no when: old_credential_dir.stat.exists - name: "Pre-upgrade | move data from old credential dir to new" - command: mv {{ inventory_dir }}/../credentials {{ inventory_dir }}/credentials + local_action: command mv {{ inventory_dir }}/../credentials {{ inventory_dir }}/credentials args: creates: "{{ inventory_dir }}/credentials" + become: no when: old_credential_dir.stat.exists and not new_credential_dir.stat.exists - delegate_to: localhost - From 7ef9f4dfdd7d64876aacc48a982313dbea8a06f5 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Wed, 28 Feb 2018 22:41:52 +0300 Subject: [PATCH 030/177] Revert "Add pre-upgrade task for moving credentials file" (#2393) --- .gitignore | 1 - roles/kubernetes/preinstall/tasks/main.yml | 5 ----- .../preinstall/tasks/pre_upgrade.yml | 22 ------------------- 3 files changed, 28 deletions(-) delete mode 100644 roles/kubernetes/preinstall/tasks/pre_upgrade.yml diff --git a/.gitignore b/.gitignore index 1e8b17188..66c9b4867 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,6 @@ .vagrant *.retry inventory/vagrant_ansible_inventory -inventory/credentials/ inventory/group_vars/fake_hosts.yml inventory/host_vars/ temp diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml index 34ad9af34..289065c71 100644 --- a/roles/kubernetes/preinstall/tasks/main.yml +++ b/roles/kubernetes/preinstall/tasks/main.yml @@ -3,11 +3,6 @@ tags: - asserts -# This is run before bin_dir is pinned because these tasks are run on localhost -- import_tasks: pre_upgrade.yml - tags: - - upgrade - - name: Force binaries directory for Container Linux by CoreOS set_fact: bin_dir: "/opt/bin" diff --git a/roles/kubernetes/preinstall/tasks/pre_upgrade.yml b/roles/kubernetes/preinstall/tasks/pre_upgrade.yml deleted file mode 100644 index 91bab1f52..000000000 --- a/roles/kubernetes/preinstall/tasks/pre_upgrade.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- name: "Pre-upgrade | check if old credential dir exists" - local_action: - module: stat - path: "{{ inventory_dir }}/../credentials" - register: old_credential_dir - become: no - -- name: "Pre-upgrade | check if new credential dir exists" - local_action: - module: stat - path: "{{ inventory_dir }}/credentials" - register: new_credential_dir - become: no - when: old_credential_dir.stat.exists - -- name: "Pre-upgrade | move data from old credential dir to new" - local_action: command mv {{ inventory_dir }}/../credentials {{ inventory_dir }}/credentials - args: - creates: "{{ inventory_dir }}/credentials" - become: no - when: old_credential_dir.stat.exists and not new_credential_dir.stat.exists From 67ffd8e923c74bb9fa4637f2dfdaa587e5893bcd Mon Sep 17 00:00:00 2001 From: RongZhang Date: Thu, 1 Mar 2018 02:39:14 -0600 Subject: [PATCH 031/177] Add etcd-events cluster for kube-apiserver (#2385) Add etcd-events cluster for kube-apiserver --- roles/etcd/defaults/main.yml | 1 + roles/etcd/handlers/main.yml | 27 +++++++++++ roles/etcd/tasks/configure.yml | 44 +++++++++++++++-- roles/etcd/tasks/install_docker.yml | 10 ++++ roles/etcd/tasks/join_etcd-events_member.yml | 47 +++++++++++++++++++ roles/etcd/tasks/join_etcd_member.yml | 47 +++++++++++++++++++ roles/etcd/tasks/main.yml | 12 +++++ roles/etcd/tasks/refresh_config.yml | 7 +++ roles/etcd/tasks/set_cluster_health.yml | 15 +++++- .../templates/etcd-events-docker.service.j2 | 18 +++++++ .../templates/etcd-events-host.service.j2 | 16 +++++++ roles/etcd/templates/etcd-events.env.j2 | 26 ++++++++++ roles/etcd/templates/etcd-events.j2 | 22 +++++++++ .../manifests/kube-apiserver.manifest.j2 | 3 ++ roles/kubespray-defaults/defaults/main.yaml | 13 +++++ roles/reset/tasks/main.yml | 4 ++ tests/files/gce_centos7-flannel-addons.yml | 1 + 17 files changed, 309 insertions(+), 4 deletions(-) create mode 100644 roles/etcd/tasks/join_etcd-events_member.yml create mode 100644 roles/etcd/tasks/join_etcd_member.yml create mode 100644 roles/etcd/templates/etcd-events-docker.service.j2 create mode 100644 roles/etcd/templates/etcd-events-host.service.j2 create mode 100644 roles/etcd/templates/etcd-events.env.j2 create mode 100644 roles/etcd/templates/etcd-events.j2 diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml index 4e122e719..4986ad257 100644 --- a/roles/etcd/defaults/main.yml +++ b/roles/etcd/defaults/main.yml @@ -4,6 +4,7 @@ etcd_cluster_setup: true etcd_backup_prefix: "/var/backups" etcd_data_dir: "/var/lib/etcd" +etcd_events_data_dir: "/var/lib/etcd-events" etcd_config_dir: /etc/ssl/etcd etcd_cert_dir: "{{ etcd_config_dir }}/ssl" diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml index f6666ee94..a72cbd515 100644 --- a/roles/etcd/handlers/main.yml +++ b/roles/etcd/handlers/main.yml @@ -7,17 +7,33 @@ - reload etcd - wait for etcd up +- name: restart etcd-events + command: /bin/true + notify: + - etcd-events | reload systemd + - reload etcd-events + - wait for etcd-events up + - import_tasks: backup.yml - name: etcd | reload systemd command: systemctl daemon-reload +- name: etcd-events | reload systemd + command: systemctl daemon-reload + - name: reload etcd service: name: etcd state: restarted when: is_etcd_master +- name: reload etcd-events + service: + name: etcd-events + state: restarted + when: is_etcd_master + - name: wait for etcd up uri: url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health" @@ -29,6 +45,17 @@ retries: 10 delay: 5 +- name: wait for etcd-events up + uri: + url: "https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2381/health" + validate_certs: no + client_cert: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem" + client_key: "{{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem" + register: result + until: result.status is defined and result.status == 200 + retries: 10 + delay: 5 + - name: set etcd_secret_changed set_fact: etcd_secret_changed: true diff --git a/roles/etcd/tasks/configure.yml b/roles/etcd/tasks/configure.yml index 7af17f69e..d7d3920c6 100644 --- a/roles/etcd/tasks/configure.yml +++ b/roles/etcd/tasks/configure.yml @@ -1,5 +1,5 @@ --- -- name: Configure | Check if member is in cluster +- name: Configure | Check if member is in etcd cluster shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}" register: etcd_member_in_cluster ignore_errors: true @@ -12,6 +12,19 @@ ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" +- name: Configure | Check if member is in etcd-events cluster + shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_events_access_addresses }} member list | grep -q {{ etcd_access_address }}" + register: etcd_events_member_in_cluster + ignore_errors: true + changed_when: false + check_mode: no + when: is_etcd_master and etcd_events_cluster_setup + tags: + - facts + environment: + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + - name: Configure | Copy etcd.service systemd file template: src: "etcd-{{ etcd_deployment_type }}.service.j2" @@ -20,11 +33,36 @@ when: is_etcd_master notify: restart etcd -- name: Configure | Join member(s) to cluster one at a time - include_tasks: join_member.yml +- name: Configure | Copy etcd-events.service systemd file + template: + src: "etcd-events-host.service.j2" + dest: /etc/systemd/system/etcd-events.service + backup: yes + when: is_etcd_master and etcd_deployment_type == "host" and etcd_events_cluster_setup + notify: restart etcd-events + +- name: Configure | Copy etcd-events.service systemd file + template: + src: "etcd-events-docker.service.j2" + dest: /etc/systemd/system/etcd-events.service + backup: yes + when: is_etcd_master and etcd_deployment_type == "docker" and etcd_events_cluster_setup + notify: restart etcd-events + +- name: Configure | Join member(s) to etcd cluster one at a time + include_tasks: join_etcd_member.yml vars: target_node: "{{ item }}" loop_control: pause: 10 with_items: "{{ groups['etcd'] }}" when: inventory_hostname == item and etcd_member_in_cluster.rc != 0 and etcd_cluster_is_healthy.rc == 0 + +- name: Configure | Join member(s) to etcd-events cluster one at a time + include_tasks: join_etcd-evetns_member.yml + vars: + target_node: "{{ item }}" + loop_control: + pause: 10 + with_items: "{{ groups['etcd'] }}" + when: inventory_hostname == item and etcd_events_cluster_setup and etcd_events_member_in_cluster.rc != 0 and etcd_events_cluster_is_healthy.rc == 0 diff --git a/roles/etcd/tasks/install_docker.yml b/roles/etcd/tasks/install_docker.yml index 291bb5f25..58e1485a5 100644 --- a/roles/etcd/tasks/install_docker.yml +++ b/roles/etcd/tasks/install_docker.yml @@ -18,3 +18,13 @@ mode: 0755 backup: yes notify: restart etcd + +- name: Install etcd-events launch script + template: + src: etcd-events.j2 + dest: "{{ bin_dir }}/etcd-events" + owner: 'root' + mode: 0755 + backup: yes + when: etcd_events_cluster_setup + notify: restart etcd-events diff --git a/roles/etcd/tasks/join_etcd-events_member.yml b/roles/etcd/tasks/join_etcd-events_member.yml new file mode 100644 index 000000000..104ef22df --- /dev/null +++ b/roles/etcd/tasks/join_etcd-events_member.yml @@ -0,0 +1,47 @@ +--- +- name: Join Member | Add member to cluster + shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_events_access_addresses }} member add {{ etcd_member_name }} {{ etcd_events_peer_url }}" + register: member_add_result + until: member_add_result.rc == 0 + retries: 4 + delay: "{{ retry_stagger | random + 3 }}" + when: target_node == inventory_hostname + environment: + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + +- include_tasks: refresh_config.yml + vars: + etcd_events_peer_addresses: >- + {% for host in groups['etcd'] -%} + {%- if hostvars[host]['etcd_events_member_in_cluster'].rc == 0 -%} + {{ "etcd"+loop.index|string }}=https://{{ hostvars[host].access_ip | default(hostvars[host].ip | default(hostvars[host].ansible_default_ipv4['address'])) }}:2382, + {%- endif -%} + {%- if loop.last -%} + {{ etcd_member_name }}={{ etcd_events_peer_url }} + {%- endif -%} + {%- endfor -%} + when: target_node == inventory_hostname + +- name: Join Member | reload systemd + command: systemctl daemon-reload + when: target_node == inventory_hostname + +- name: Join Member | Ensure etcd-events is running + service: + name: etcd-events + state: started + enabled: yes + when: target_node == inventory_hostname + +- name: Join Member | Ensure member is in etcd-events cluster + shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_events_access_addresses }} member list | grep -q {{ etcd_events_access_address }}" + register: etcd_events_member_in_cluster + changed_when: false + check_mode: no + tags: + - facts + when: target_node == inventory_hostname + environment: + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" diff --git a/roles/etcd/tasks/join_etcd_member.yml b/roles/etcd/tasks/join_etcd_member.yml new file mode 100644 index 000000000..b7801f0c9 --- /dev/null +++ b/roles/etcd/tasks/join_etcd_member.yml @@ -0,0 +1,47 @@ +--- +- name: Join Member | Add member to cluster + shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} member add {{ etcd_member_name }} {{ etcd_peer_url }}" + register: member_add_result + until: member_add_result.rc == 0 + retries: 4 + delay: "{{ retry_stagger | random + 3 }}" + when: target_node == inventory_hostname + environment: + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + +- include_tasks: refresh_config.yml + vars: + etcd_peer_addresses: >- + {% for host in groups['etcd'] -%} + {%- if hostvars[host]['etcd_member_in_cluster'].rc == 0 -%} + {{ "etcd"+loop.index|string }}=https://{{ hostvars[host].access_ip | default(hostvars[host].ip | default(hostvars[host].ansible_default_ipv4['address'])) }}:2380, + {%- endif -%} + {%- if loop.last -%} + {{ etcd_member_name }}={{ etcd_peer_url }} + {%- endif -%} + {%- endfor -%} + when: target_node == inventory_hostname + +- name: Join Member | reload systemd + command: systemctl daemon-reload + when: target_node == inventory_hostname + +- name: Join Member | Ensure etcd is running + service: + name: etcd + state: started + enabled: yes + when: target_node == inventory_hostname + +- name: Join Member | Ensure member is in cluster + shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_access_addresses }} member list | grep -q {{ etcd_access_address }}" + register: etcd_member_in_cluster + changed_when: false + check_mode: no + tags: + - facts + when: target_node == inventory_hostname + environment: + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index a8a9f23ad..bb299126b 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -43,6 +43,11 @@ notify: restart etcd when: is_etcd_master and etcd_secret_changed|default(false) +- name: Restart etcd-events if certs changed + command: /bin/true + notify: restart etcd + when: is_etcd_master and etcd_events_cluster_setup and etcd_secret_changed|default(false) + # reload-systemd - meta: flush_handlers @@ -53,6 +58,13 @@ enabled: yes when: is_etcd_master and etcd_cluster_setup +- name: Ensure etcd-events is running + service: + name: etcd-events + state: started + enabled: yes + when: is_etcd_master and etcd_events_cluster_setup + # After etcd cluster is assembled, make sure that # initial state of the cluster is in `existing` # state insted of `new`. diff --git a/roles/etcd/tasks/refresh_config.yml b/roles/etcd/tasks/refresh_config.yml index 0691d1df9..927663301 100644 --- a/roles/etcd/tasks/refresh_config.yml +++ b/roles/etcd/tasks/refresh_config.yml @@ -5,3 +5,10 @@ dest: /etc/etcd.env notify: restart etcd when: is_etcd_master + +- name: Refresh config | Create etcd-events config file + template: + src: etcd-events.env.j2 + dest: /etc/etcd-events.env + notify: restart etcd-events + when: is_etcd_master and etcd_events_cluster_setup diff --git a/roles/etcd/tasks/set_cluster_health.yml b/roles/etcd/tasks/set_cluster_health.yml index 955208633..68e738031 100644 --- a/roles/etcd/tasks/set_cluster_health.yml +++ b/roles/etcd/tasks/set_cluster_health.yml @@ -1,5 +1,5 @@ --- -- name: Configure | Check if cluster is healthy +- name: Configure | Check if etcd cluster is healthy shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} cluster-health | grep -q 'cluster is healthy'" register: etcd_cluster_is_healthy ignore_errors: true @@ -11,3 +11,16 @@ environment: ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + +- name: Configure | Check if etcd-events cluster is healthy + shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_events_access_addresses }} cluster-health | grep -q 'cluster is healthy'" + register: etcd_events_cluster_is_healthy + ignore_errors: true + changed_when: false + check_mode: no + when: is_etcd_master and etcd_events_cluster_setup + tags: + - facts + environment: + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" diff --git a/roles/etcd/templates/etcd-events-docker.service.j2 b/roles/etcd/templates/etcd-events-docker.service.j2 new file mode 100644 index 000000000..271980ab7 --- /dev/null +++ b/roles/etcd/templates/etcd-events-docker.service.j2 @@ -0,0 +1,18 @@ +[Unit] +Description=etcd docker wrapper +Wants=docker.socket +After=docker.service + +[Service] +User=root +PermissionsStartOnly=true +EnvironmentFile=-/etc/etcd-events.env +ExecStart={{ bin_dir }}/etcd-events +ExecStartPre=-{{ docker_bin_dir }}/docker rm -f {{ etcd_member_name }}-events +ExecStop={{ docker_bin_dir }}/docker stop {{ etcd_member_name }}-events +Restart=always +RestartSec=15s +TimeoutStartSec=30s + +[Install] +WantedBy=multi-user.target diff --git a/roles/etcd/templates/etcd-events-host.service.j2 b/roles/etcd/templates/etcd-events-host.service.j2 new file mode 100644 index 000000000..6e0167a8c --- /dev/null +++ b/roles/etcd/templates/etcd-events-host.service.j2 @@ -0,0 +1,16 @@ +[Unit] +Description=etcd +After=network.target + +[Service] +Type=notify +User=root +EnvironmentFile=/etc/etcd-events.env +ExecStart={{ bin_dir }}/etcd +NotifyAccess=all +Restart=always +RestartSec=10s +LimitNOFILE=40000 + +[Install] +WantedBy=multi-user.target diff --git a/roles/etcd/templates/etcd-events.env.j2 b/roles/etcd/templates/etcd-events.env.j2 new file mode 100644 index 000000000..c168ab03e --- /dev/null +++ b/roles/etcd/templates/etcd-events.env.j2 @@ -0,0 +1,26 @@ +ETCD_DATA_DIR={{ etcd_events_data_dir }} +ETCD_ADVERTISE_CLIENT_URLS={{ etcd_events_client_url }} +ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_events_peer_url }} +ETCD_INITIAL_CLUSTER_STATE={% if etcd_cluster_is_healthy.rc != 0 | bool %}new{% else %}existing{% endif %} + +ETCD_METRICS={{ etcd_metrics }} +ETCD_LISTEN_CLIENT_URLS=https://{{ etcd_address }}:2381,https://127.0.0.1:2381 +ETCD_ELECTION_TIMEOUT={{ etcd_election_timeout }} +ETCD_HEARTBEAT_INTERVAL={{ etcd_heartbeat_interval }} +ETCD_INITIAL_CLUSTER_TOKEN=k8s_events_etcd +ETCD_LISTEN_PEER_URLS=https://{{ etcd_address }}:2382 +ETCD_NAME={{ etcd_member_name }}-events +ETCD_PROXY=off +ETCD_INITIAL_CLUSTER={{ etcd_events_peer_addresses }} +ETCD_AUTO_COMPACTION_RETENTION={{ etcd_compaction_retention }} + +# TLS settings +ETCD_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem +ETCD_CERT_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem +ETCD_KEY_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem +ETCD_CLIENT_CERT_AUTH={{ etcd_secure_client | lower}} + +ETCD_PEER_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem +ETCD_PEER_CERT_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}.pem +ETCD_PEER_KEY_FILE={{ etcd_cert_dir }}/member-{{ inventory_hostname }}-key.pem +ETCD_PEER_CLIENT_CERT_AUTH={{ etcd_peer_client_auth }} diff --git a/roles/etcd/templates/etcd-events.j2 b/roles/etcd/templates/etcd-events.j2 new file mode 100644 index 000000000..3b25eaaff --- /dev/null +++ b/roles/etcd/templates/etcd-events.j2 @@ -0,0 +1,22 @@ +#!/bin/bash +{{ docker_bin_dir }}/docker run \ + --restart=on-failure:5 \ + --env-file=/etc/etcd-events.env \ + --net=host \ + -v /etc/ssl/certs:/etc/ssl/certs:ro \ + -v {{ etcd_cert_dir }}:{{ etcd_cert_dir }}:ro \ + -v {{ etcd_events_data_dir }}:{{ etcd_events_data_dir }}:rw \ + {% if etcd_memory_limit is defined %} + --memory={{ etcd_memory_limit|regex_replace('Mi', 'M') }} \ + {% endif %} + --oom-kill-disable \ + {% if etcd_cpu_limit is defined %} + --cpu-shares={{ etcd_cpu_limit|regex_replace('m', '') }} \ + {% endif %} + {% if etcd_blkio_weight is defined %} + --blkio-weight={{ etcd_blkio_weight }} \ + {% endif %} + --name={{ etcd_member_name }}-events \ + {{ etcd_image_repo }}:{{ etcd_image_tag }} \ + /usr/local/bin/etcd \ + "$@" diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 2ec231f4c..bee13b4ec 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -30,6 +30,9 @@ spec: - apiserver - --advertise-address={{ ip | default(ansible_default_ipv4.address) }} - --etcd-servers={{ etcd_access_addresses }} +{% if etcd_events_cluster_setup %} + - --etcd-servers-overrides=/events#{{ etcd_events_access_addresses }} +{% endif %} {% if kube_version | version_compare('v1.9', '<') %} - --etcd-quorum-read=true {% endif %} diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index 0a4429e05..7c0a0f12c 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -289,16 +289,25 @@ kube_apiserver_client_key: |- {{ kube_cert_dir }}/apiserver-key.pem {%- endif %} +# Set to true to deploy etcd-events cluster +etcd_events_cluster_setup: false + # Vars for pointing to etcd endpoints is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}" etcd_address: "{{ ip | default(ansible_default_ipv4['address']) }}" etcd_access_address: "{{ access_ip | default(etcd_address) }}" etcd_peer_url: "https://{{ etcd_access_address }}:2380" etcd_client_url: "https://{{ etcd_access_address }}:2379" +etcd_events_peer_url: "https://{{ etcd_access_address }}:2382" +etcd_events_client_url: "https://{{ etcd_access_address }}:2381" etcd_access_addresses: |- {% for item in groups['etcd'] -%} https://{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}:2379{% if not loop.last %},{% endif %} {%- endfor %} +etcd_events_access_addresses: |- + {% for item in groups['etcd'] -%} + https://{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}:2381{% if not loop.last %},{% endif %} + {%- endfor %} etcd_member_name: |- {% for host in groups['etcd'] %} {% if inventory_hostname == host %}{{"etcd"+loop.index|string }}{% endif %} @@ -307,3 +316,7 @@ etcd_peer_addresses: |- {% for item in groups['etcd'] -%} {{ "etcd"+loop.index|string }}=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2380{% if not loop.last %},{% endif %} {%- endfor %} +etcd_events_peer_addresses: |- + {% for item in groups['etcd'] -%} + {{ "etcd"+loop.index|string }}-events=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2382{% if not loop.last %},{% endif %} + {%- endfor %} diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 88e9065c8..9b87a1a13 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -8,6 +8,7 @@ - kubelet - vault - etcd + - etcd-events failed_when: false tags: - services @@ -19,6 +20,7 @@ with_items: - kubelet - etcd + - etcd-events - vault - calico-node register: services_removed @@ -95,6 +97,7 @@ - /root/.kube - /root/.helm - "{{ etcd_data_dir }}" + - /var/lib/etcd-events - /etc/ssl/etcd - /var/log/calico - /etc/cni @@ -125,6 +128,7 @@ - "{{ bin_dir }}/kubelet" - "{{ bin_dir }}/etcd-scripts" - "{{ bin_dir }}/etcd" + - "{{ bin_dir }}/etcd-events" - "{{ bin_dir }}/etcdctl" - "{{ bin_dir }}/kubernetes-scripts" - "{{ bin_dir }}/kubectl" diff --git a/tests/files/gce_centos7-flannel-addons.yml b/tests/files/gce_centos7-flannel-addons.yml index 272c5e7ae..0e4346f67 100644 --- a/tests/files/gce_centos7-flannel-addons.yml +++ b/tests/files/gce_centos7-flannel-addons.yml @@ -9,6 +9,7 @@ kube_network_plugin: flannel helm_enabled: true istio_enabled: true efk_enabled: true +etcd_events_cluster_setup: true local_volume_provisioner_enabled: true etcd_deployment_type: host deploy_netchecker: true From cdc2e7d4fe6f9c823c6692838dd79482b1bcf0ec Mon Sep 17 00:00:00 2001 From: brant Date: Fri, 2 Mar 2018 11:30:55 +0800 Subject: [PATCH 032/177] Test-for-release-(do-not-merge) --- Test-for-release-(do-not-merge) | 1 + 1 file changed, 1 insertion(+) create mode 100644 Test-for-release-(do-not-merge) diff --git a/Test-for-release-(do-not-merge) b/Test-for-release-(do-not-merge) new file mode 100644 index 000000000..c96bff2e5 --- /dev/null +++ b/Test-for-release-(do-not-merge) @@ -0,0 +1 @@ +Test-for-release-(do-not-merge) \ No newline at end of file From 40d72d18659228486a8ef7dfdb539f0ca552ff51 Mon Sep 17 00:00:00 2001 From: brant Date: Fri, 2 Mar 2018 13:48:11 +0800 Subject: [PATCH 033/177] added [calico-rr] --- Test-for-release-(do-not-merge) | 1 - contrib/terraform/aws/templates/inventory.tpl | 3 +++ 2 files changed, 3 insertions(+), 1 deletion(-) delete mode 100644 Test-for-release-(do-not-merge) diff --git a/Test-for-release-(do-not-merge) b/Test-for-release-(do-not-merge) deleted file mode 100644 index c96bff2e5..000000000 --- a/Test-for-release-(do-not-merge) +++ /dev/null @@ -1 +0,0 @@ -Test-for-release-(do-not-merge) \ No newline at end of file diff --git a/contrib/terraform/aws/templates/inventory.tpl b/contrib/terraform/aws/templates/inventory.tpl index 2bb772549..5ef875543 100644 --- a/contrib/terraform/aws/templates/inventory.tpl +++ b/contrib/terraform/aws/templates/inventory.tpl @@ -25,3 +25,6 @@ kube-master [k8s-cluster:vars] ${elb_api_fqdn} + + +[calico-rr] \ No newline at end of file From 7c7b33a0f8a202af25485a83e0877a683663d32b Mon Sep 17 00:00:00 2001 From: brant Date: Fri, 2 Mar 2018 14:31:54 +0800 Subject: [PATCH 034/177] remove + groups['calico-rr'] --- cluster.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster.yml b/cluster.yml index 09f6043ad..1305ab450 100644 --- a/cluster.yml +++ b/cluster.yml @@ -26,7 +26,7 @@ setup: delegate_to: "{{item}}" delegate_facts: True - with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr'] }}" + with_items: "{{ groups['k8s-cluster'] + groups['etcd'] }}" - hosts: k8s-cluster:etcd:calico-rr any_errors_fatal: "{{ any_errors_fatal | default(true) }}" From 2a3b48edafa21b620100000d046c6abf1b3052ab Mon Sep 17 00:00:00 2001 From: "rong.zhang" Date: Fri, 2 Mar 2018 14:39:13 +0800 Subject: [PATCH 035/177] Delete unused fedora docker repo --- roles/docker/vars/fedora-20.yml | 17 ----------------- roles/docker/vars/fedora.yml | 28 ---------------------------- 2 files changed, 45 deletions(-) delete mode 100644 roles/docker/vars/fedora-20.yml delete mode 100644 roles/docker/vars/fedora.yml diff --git a/roles/docker/vars/fedora-20.yml b/roles/docker/vars/fedora-20.yml deleted file mode 100644 index 31d431ee8..000000000 --- a/roles/docker/vars/fedora-20.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -docker_kernel_min_version: '0' - -# versioning: docker-io itself is pinned at docker 1.5 - -docker_package_info: - pkg_mgr: yum - pkgs: - - name: docker-io - -docker_repo_key_info: - pkg_key: '' - repo_keys: [] - -docker_repo_info: - pkg_repo: '' - repos: [] diff --git a/roles/docker/vars/fedora.yml b/roles/docker/vars/fedora.yml deleted file mode 100644 index 8ce0588d5..000000000 --- a/roles/docker/vars/fedora.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -docker_kernel_min_version: '0' - -# https://docs.docker.com/engine/installation/linux/fedora/#install-from-a-package -# https://download.docker.com/linux/fedora/7/x86_64/stable/ -# the package names below are guesses; -# docs mention `sudo dnf config-manager --enable docker-ce-edge` for edge -docker_versioned_pkg: - 'latest': docker - '1.11': docker-1:1.11.2 - '1.12': docker-1:1.12.6 - '1.13': docker-1.13.1 - '17.03': docker-17.03.1 - 'stable': docker-ce - 'edge': docker-ce-edge - -docker_package_info: - pkg_mgr: dnf - pkgs: - - name: "{{ docker_versioned_pkg[docker_version | string] }}" - -docker_repo_key_info: - pkg_key: '' - repo_keys: [] - -docker_repo_info: - pkg_repo: '' - repos: [] From 739f6c78adbb0c58909b9a1aeb18feb1f40d344b Mon Sep 17 00:00:00 2001 From: brant Date: Fri, 2 Mar 2018 15:41:04 +0800 Subject: [PATCH 036/177] remove [calico-rr] --- contrib/terraform/aws/templates/inventory.tpl | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/contrib/terraform/aws/templates/inventory.tpl b/contrib/terraform/aws/templates/inventory.tpl index 5ef875543..20a8a69a6 100644 --- a/contrib/terraform/aws/templates/inventory.tpl +++ b/contrib/terraform/aws/templates/inventory.tpl @@ -24,7 +24,4 @@ kube-master [k8s-cluster:vars] -${elb_api_fqdn} - - -[calico-rr] \ No newline at end of file +${elb_api_fqdn} \ No newline at end of file From 9837b7926fb616080073f5ea8fc955a9177c0033 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Fri, 2 Mar 2018 15:36:52 +0300 Subject: [PATCH 037/177] Use proper lookup of etcd host for calico (#2408) Fixes #2397 --- roles/network_plugin/calico/tasks/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/network_plugin/calico/tasks/main.yml b/roles/network_plugin/calico/tasks/main.yml index 44ea00e83..05e7b9611 100644 --- a/roles/network_plugin/calico/tasks/main.yml +++ b/roles/network_plugin/calico/tasks/main.yml @@ -81,7 +81,7 @@ - name: Calico | wait for etcd uri: - url: https://localhost:2379/health + url: "{{ etcd_access_addresses.split(',') | first }}/health" validate_certs: no client_cert: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" client_key: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" @@ -97,7 +97,7 @@ --cacert {{ etcd_cert_dir }}/ca.pem \ --cert {{ etcd_cert_dir}}/node-{{ inventory_hostname }}.pem \ --key {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem \ - https://localhost:2379/v2/keys/calico/v1/ipam/v4/pool + {{ etcd_access_addresses.split(',') | first }}/v2/keys/calico/v1/ipam/v4/pool register: calico_conf retries: 4 delay: "{{ retry_stagger | random + 3 }}" @@ -125,7 +125,7 @@ --cacert {{ etcd_cert_dir }}/ca.pem \ --cert {{ etcd_cert_dir}}/node-{{ inventory_hostname }}.pem \ --key {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem \ - https://localhost:2379/v2/keys/calico/v1/ipam/v4/pool + {{ etcd_access_addresses.split(',') | first }}/v2/keys/calico/v1/ipam/v4/pool register: calico_pools_raw retries: 4 delay: "{{ retry_stagger | random + 3 }}" From fd464421887eaf98a17e3c133ed5efb5cea7c3d3 Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Tue, 13 Feb 2018 09:08:44 +0800 Subject: [PATCH 038/177] Integrate kubernetes/ingress-nginx 0.11.0 to Kubespray --- cluster.yml | 1 + inventory/sample/group_vars/k8s-cluster.yml | 6 + .../ingress_nginx/README.md | 283 ++++++++++++++++++ .../ingress_nginx/defaults/main.yml | 10 + .../ingress_nginx/tasks/main.yml | 42 +++ .../ingress-nginx-clusterrole.yml.j2 | 25 ++ .../ingress-nginx-clusterrolebinding.yml.j2 | 14 + .../templates/ingress-nginx-cm.yml.j2 | 8 + .../ingress-nginx-controller-ds.yml.j2 | 70 +++++ .../ingress-nginx-default-backend-rs.yml.j2 | 37 +++ .../ingress-nginx-default-backend-svc.yml.j2 | 14 + .../templates/ingress-nginx-ns.yml.j2 | 5 + .../templates/ingress-nginx-role.yml.j2 | 24 ++ .../ingress-nginx-rolebinding.yml.j2 | 14 + .../templates/ingress-nginx-sa.yml.j2 | 6 + .../ingress-nginx-tcp-servicecs-cm.yml.j2 | 8 + .../ingress-nginx-udp-servicecs-cm.yml.j2 | 8 + .../ingress_controller/meta/main.yml | 8 + roles/kubespray-defaults/defaults/main.yaml | 1 + 19 files changed, 584 insertions(+) create mode 100644 roles/kubernetes-apps/ingress_controller/ingress_nginx/README.md create mode 100644 roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml create mode 100644 roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml create mode 100644 roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-clusterrole.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-clusterrolebinding.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-cm.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-rs.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-svc.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-ns.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-role.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-rolebinding.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-sa.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-tcp-servicecs-cm.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-udp-servicecs-cm.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/meta/main.yml diff --git a/cluster.yml b/cluster.yml index 1305ab450..00c68a593 100644 --- a/cluster.yml +++ b/cluster.yml @@ -100,6 +100,7 @@ - { role: kubespray-defaults} - { role: kubernetes-apps/network_plugin, tags: network } - { role: kubernetes-apps/policy_controller, tags: policy-controller } + - { role: kubernetes-apps/ingress_controller, tags: ingress-controller } - { role: kubernetes-apps/external_provisioner, tags: external-provisioner } - hosts: calico-rr diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml index bbada34cf..c5047acd1 100644 --- a/inventory/sample/group_vars/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster.yml @@ -187,6 +187,12 @@ cephfs_provisioner_enabled: false # cephfs_provisioner_secret: secret # cephfs_provisioner_storage_class: cephfs +# Nginx ingress controller deployment +ingress_nginx_enabled: false +# ingress_nginx_namespace: "ingress-nginx" +# ingress_nginx_insecure_port: 80 +# ingress_nginx_secure_port: 443 + # Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now ) persistent_volumes_enabled: false diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/README.md b/roles/kubernetes-apps/ingress_controller/ingress_nginx/README.md new file mode 100644 index 000000000..0fb40f31e --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/README.md @@ -0,0 +1,283 @@ +Installation Guide +================== + +Contents +-------- + +- [Mandatory commands](#mandatory-commands) +- [Install without RBAC roles](#install-without-rbac-roles) +- [Install with RBAC roles](#install-with-rbac-roles) +- [Custom Provider](#custom-provider) +- [minikube](#minikube) +- [AWS](#aws) +- [GCE - GKE](#gce---gke) +- [Azure](#azure) +- [Baremetal](#baremetal) +- [Using Helm](#using-helm) +- [Verify installation](#verify-installation) +- [Detect installed version](#detect-installed-version) +- [Deploying the config-map](#deploying-the-config-map) + +Generic Deployment +------------------ + +The following resources are required for a generic deployment. + +### Mandatory commands + +``` console +curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/namespace.yaml \ + | kubectl apply -f - + +curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/default-backend.yaml \ + | kubectl apply -f - + +curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/configmap.yaml \ + | kubectl apply -f - + +curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/tcp-services-configmap.yaml \ + | kubectl apply -f - + +curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/udp-services-configmap.yaml \ + | kubectl apply -f - +``` + +### Install without RBAC roles + +``` console +curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/without-rbac.yaml \ + | kubectl apply -f - +``` + +### Install with RBAC roles + +Please check the [RBAC](rbac.md) document. + +``` console +curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/rbac.yaml \ + | kubectl apply -f - + +curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/with-rbac.yaml \ + | kubectl apply -f - +``` + +Custom Service Provider Deployment +---------------------------------- + +There are cloud provider specific yaml files. + +### minikube + +For standard usage: + +``` console +minikube addons enable ingress +``` + +For development: + +1. Disable the ingress addon: + + ``` console + $ minikube addons disable ingress + ``` + +2. Use the [docker daemon](https://github.com/kubernetes/minikube/blob/master/docs/reusing_the_docker_daemon.md) +3. [Build the image](../docs/development.md) +4. Perform [Mandatory commands](#mandatory-commands) +5. Install the `nginx-ingress-controller` deployment [without RBAC roles](#install-without-rbac-roles) or [with RBAC roles](#install-with-rbac-roles) +6. Edit the `nginx-ingress-controller` deployment to use your custom image. Local images can be seen by performing `docker images`. + + ``` console + $ kubectl edit deployment nginx-ingress-controller -n ingress-nginx + ``` + + edit the following section: + + ``` yaml + image: : + imagePullPolicy: IfNotPresent + name: nginx-ingress-controller + ``` + +7. Confirm the `nginx-ingress-controller` deployment exists: + +``` console +$ kubectl get pods -n ingress-nginx +NAME READY STATUS RESTARTS AGE +default-http-backend-66b447d9cf-rrlf9 1/1 Running 0 12s +nginx-ingress-controller-fdcdcd6dd-vvpgs 1/1 Running 0 11s +``` + +### AWS + +In AWS we use an Elastic Load Balancer (ELB) to expose the NGINX Ingress controller behind a Service of `Type=LoadBalancer`. +This setup requires to choose in which layer (L4 or L7) we want to configure the ELB: + +- [Layer 4](https://en.wikipedia.org/wiki/OSI_model#Layer_4:_Transport_Layer): use TCP as the listener protocol for ports 80 and 443. +- [Layer 7](https://en.wikipedia.org/wiki/OSI_model#Layer_7:_Application_Layer): use HTTP as the listener protocol for port 80 and terminate TLS in the ELB + +Patch the nginx ingress controller deployment to add the flag `--publish-service` + +``` console +kubectl patch deployment -n ingress-nginx nginx-ingress-controller --type='json' \ + --patch="$(curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/publish-service-patch.yaml)" +``` + +For L4: + +``` console +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/aws/service-l4.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/aws/patch-configmap-l4.yaml +``` + +For L7: + +Change line of the file `provider/aws/service-l7.yaml` replacing the dummy id with a valid one `"arn:aws:acm:us-west-2:XXXXXXXX:certificate/XXXXXX-XXXXXXX-XXXXXXX-XXXXXXXX"` +Then execute: + +``` console +kubectl apply -f provider/aws/service-l7.yaml +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/aws/patch-configmap-l7.yaml +``` + +This example creates an ELB with just two listeners, one in port 80 and another in port 443 + +![Listeners](../docs/images/elb-l7-listener.png) + +If the ingress controller uses RBAC run: + +``` console +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/patch-service-with-rbac.yaml +``` + +If not run: + +``` console +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/patch-service-without-rbac.yaml +``` + +### GCE - GKE + +Patch the nginx ingress controller deployment to add the flag `--publish-service` + +``` console +kubectl patch deployment -n ingress-nginx nginx-ingress-controller --type='json' \ + --patch="$(curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/publish-service-patch.yaml)" +``` + +``` console +curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/gce-gke/service.yaml \ + | kubectl apply -f - +``` + +If the ingress controller uses RBAC run: + +``` console +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/patch-service-with-rbac.yaml +``` + +If not run: + +``` console +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/patch-service-without-rbac.yaml +``` + +**Important Note:** proxy protocol is not supported in GCE/GKE + +### Azure + +Patch the nginx ingress controller deployment to add the flag `--publish-service` + +``` console +kubectl patch deployment -n ingress-nginx nginx-ingress-controller --type='json' \ + --patch="$(curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/publish-service-patch.yaml)" +``` + +``` console +curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/azure/service.yaml \ + | kubectl apply -f - +``` + +If the ingress controller uses RBAC run: + +``` console +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/patch-service-with-rbac.yaml +``` + +If not run: + +``` console +kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/patch-service-without-rbac.yaml +``` + +**Important Note:** proxy protocol is not supported in GCE/GKE + +### Baremetal + +Using [NodePort](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport): + +``` console +curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/provider/baremetal/service-nodeport.yaml \ + | kubectl apply -f - +``` + +Using Helm +---------- + +NGINX Ingress controller can be installed via [Helm](https://helm.sh/) using the chart [stable/nginx](https://github.com/kubernetes/charts/tree/master/stable/nginx-ingress) from the official charts repository. +To install the chart with the release name `my-nginx`: + +``` console +helm install stable/nginx-ingress --name my-nginx +``` + +If the kubernetes cluster has RBAC enabled, then run: + +``` console +helm install stable/nginx-ingress --name my-nginx --set rbac.create=true +``` + +Verify installation +------------------- + +To check if the ingress controller pods have started, run the following command: + +``` console +kubectl get pods --all-namespaces -l app=ingress-nginx --watch +``` + +Once the operator pods are running, you can cancel the above command by typing `Ctrl+C`. +Now, you are ready to create your first ingress. + +Detect installed version +------------------------ + +To detect which version of the ingress controller is running, exec into the pod and run `nginx-ingress-controller version` command. + +``` console +POD_NAMESPACE=ingress-nginx +POD_NAME=$(kubectl get pods -n $POD_NAMESPACE -l app=ingress-nginx -o jsonpath={.items[0].metadata.name}) +kubectl exec -it $POD_NAME -n $POD_NAMESPACE -- /nginx-ingress-controller --version +``` + +Deploying the config-map +------------------------ + +A config map can be used to configure system components for the nginx-controller. In order to begin using a config-map +make sure it has been created and is being used in the deployment. + +It is created as seen in the [Mandatory Commands](#mandatory-commands) section above. + +``` console +curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/configmap.yaml \ + | kubectl apply -f - +``` + +and is setup to be used in the deployment [without-rbac](without-rbac.yaml) or [with-rbac](with-rbac.yaml) with the following line: + +``` yaml +- --configmap=$(POD_NAMESPACE)/nginx-configuration +``` + +For information on using the config-map, see its [user-guide](../docs/user-guide/configmap.md). diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml b/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml new file mode 100644 index 000000000..b38fc2b97 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml @@ -0,0 +1,10 @@ +--- +ingress_nginx_default_backend_image_repo: gcr.io/google_containers/defaultbackend +ingress_nginx_default_backend_image_tag: 1.4 + +ingress_nginx_controller_image_repo: quay.io/kubernetes-ingress-controller/nginx-ingress-controller +ingress_nginx_controller_image_tag: 0.11.0 + +ingress_nginx_namespace: "ingress-nginx" +ingress_nginx_insecure_port: 80 +ingress_nginx_secure_port: 443 diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml new file mode 100644 index 000000000..0a37e94cd --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml @@ -0,0 +1,42 @@ +--- + +- name: NGINX Ingress Controller | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/ingress_nginx" + state: directory + owner: root + group: root + mode: 0755 + +- name: NGINX Ingress Controller | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/ingress_nginx/{{ item.file }}" + with_items: + - { name: ingress-nginx-ns, file: ingress-nginx-ns.yml, type: ns } + - { name: ingress-nginx-sa, file: ingress-nginx-sa.yml, type: sa } + - { name: ingress-nginx-role, file: ingress-nginx-role.yml, type: role } + - { name: ingress-nginx-rolebinding, file: ingress-nginx-rolebinding.yml, type: rolebinding } + - { name: ingress-nginx-clusterrole, file: ingress-nginx-clusterrole.yml, type: clusterrole } + - { name: ingress-nginx-clusterrolebinding, file: ingress-nginx-clusterrolebinding.yml, type: clusterrolebinding } + - { name: ingress-nginx-cm, file: ingress-nginx-cm.yml, type: cm } + - { name: ingress-nginx-tcp-servicecs-cm, file: ingress-nginx-tcp-servicecs-cm.yml, type: cm } + - { name: ingress-nginx-udp-servicecs-cm, file: ingress-nginx-udp-servicecs-cm.yml, type: cm } + - { name: ingress-nginx-default-backend-svc, file: ingress-nginx-default-backend-svc.yml, type: svc } + - { name: ingress-nginx-default-backend-rs, file: ingress-nginx-default-backend-rs.yml, type: rs } + - { name: ingress-nginx-controller-ds, file: ingress-nginx-controller-ds.yml, type: ds } + register: ingress_nginx_manifests + when: + - inventory_hostname == groups['kube-master'][0] + +- name: NGINX Ingress Controller | Apply manifests + kube: + name: "{{ item.item.name }}" + namespace: "{{ ingress_nginx_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/ingress_nginx/{{ item.item.file }}" + state: "latest" + with_items: "{{ ingress_nginx_manifests.results }}" + when: + - inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-clusterrole.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-clusterrole.yml.j2 new file mode 100644 index 000000000..e6c36ef30 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-clusterrole.yml.j2 @@ -0,0 +1,25 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: ingress-nginx + namespace: {{ ingress_nginx_namespace }} +rules: + - apiGroups: [""] + resources: ["configmaps", "endpoints", "nodes", "pods", "secrets"] + verbs: ["list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + - apiGroups: [""] + resources: ["services"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["ingresses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + - apiGroups: ["extensions"] + resources: ["ingresses/status"] + verbs: ["update"] diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-clusterrolebinding.yml.j2 new file mode 100644 index 000000000..8d14af4b7 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-clusterrolebinding.yml.j2 @@ -0,0 +1,14 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: ingress-nginx + namespace: {{ ingress_nginx_namespace }} +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: {{ ingress_nginx_namespace }} +roleRef: + kind: ClusterRole + name: ingress-nginx + apiGroup: rbac.authorization.k8s.io diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-cm.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-cm.yml.j2 new file mode 100644 index 000000000..4febe00f9 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-cm.yml.j2 @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: ingress-nginx + namespace: {{ ingress_nginx_namespace }} + labels: + k8s-app: ingress-nginx diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 new file mode 100644 index 000000000..0f275bb55 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 @@ -0,0 +1,70 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: ingress-nginx-controller + namespace: {{ ingress_nginx_namespace }} + labels: + k8s-app: ingress-nginx + version: v{{ ingress_nginx_controller_image_tag }} + annotations: + prometheus.io/port: '10254' + prometheus.io/scrape: 'true' +spec: + selector: + matchLabels: + k8s-app: ingress-nginx + version: v{{ ingress_nginx_controller_image_tag }} + template: + metadata: + labels: + k8s-app: ingress-nginx + version: v{{ ingress_nginx_controller_image_tag }} + spec: + serviceAccountName: ingress-nginx + containers: + - name: ingress-nginx-controller + image: {{ ingress_nginx_controller_image_repo }}:{{ ingress_nginx_controller_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - /nginx-ingress-controller + - --default-backend-service=$(POD_NAMESPACE)/ingress-nginx-default-backend + - --configmap=$(POD_NAMESPACE)/ingress-nginx + - --tcp-services-configmap=$(POD_NAMESPACE)/ingress-nginx-tcp-services + - --udp-services-configmap=$(POD_NAMESPACE)/ingress-nginx-udp-services + - --annotations-prefix=nginx.ingress.kubernetes.io + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + ports: + - name: http + containerPort: 80 + hostPort: {{ ingress_nginx_insecure_port }} + - name: https + containerPort: 443 + hostPort: {{ ingress_nginx_secure_port }} + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-rs.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-rs.yml.j2 new file mode 100644 index 000000000..c0bed920b --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-rs.yml.j2 @@ -0,0 +1,37 @@ +--- +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: ingress-nginx-default-backend-v{{ ingress_nginx_default_backend_image_tag }} + namespace: {{ ingress_nginx_namespace }} + labels: + k8s-app: ingress-nginx-default-backend + version: v{{ ingress_nginx_default_backend_image_tag }} +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: ingress-nginx-default-backend + version: v{{ ingress_nginx_default_backend_image_tag }} + template: + metadata: + labels: + k8s-app: ingress-nginx-default-backend + version: v{{ ingress_nginx_default_backend_image_tag }} + spec: + terminationGracePeriodSeconds: 60 + containers: + - name: ingress-nginx-default-backend + # Any image is permissible as long as: + # 1. It serves a 404 page at / + # 2. It serves 200 on a /healthz endpoint + image: {{ ingress_nginx_default_backend_image_repo }}:{{ ingress_nginx_default_backend_image_tag }} + livenessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + ports: + - containerPort: 8080 diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-svc.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-svc.yml.j2 new file mode 100644 index 000000000..ab23f3799 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-default-backend-svc.yml.j2 @@ -0,0 +1,14 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: ingress-nginx-default-backend + namespace: {{ ingress_nginx_namespace }} + labels: + k8s-app: ingress-nginx-default-backend +spec: + ports: + - port: 80 + targetPort: 8080 + selector: + k8s-app: ingress-nginx-default-backend diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-ns.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-ns.yml.j2 new file mode 100644 index 000000000..75958948a --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-ns.yml.j2 @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ ingress_nginx_namespace }} diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-role.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-role.yml.j2 new file mode 100644 index 000000000..9254e035a --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-role.yml.j2 @@ -0,0 +1,24 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: ingress-nginx + namespace: {{ ingress_nginx_namespace }} +rules: + - apiGroups: [""] + resources: ["configmaps", "pods", "secrets", "namespaces"] + verbs: ["get"] + - apiGroups: [""] + resources: ["configmaps"] + # Defaults to "-" + # Here: "-" + # This has to be adapted if you change either parameter + # when launching the nginx-ingress-controller. + resourceNames: ["ingress-controller-leader-nginx"] + verbs: ["get", "update"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["create"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get"] diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-rolebinding.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-rolebinding.yml.j2 new file mode 100644 index 000000000..a6a8dec4b --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-rolebinding.yml.j2 @@ -0,0 +1,14 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: ingress-nginx + namespace: {{ ingress_nginx_namespace }} +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: {{ ingress_nginx_namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-sa.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-sa.yml.j2 new file mode 100644 index 000000000..55d6d6518 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-sa.yml.j2 @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ingress-nginx + namespace: {{ ingress_nginx_namespace }} diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-tcp-servicecs-cm.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-tcp-servicecs-cm.yml.j2 new file mode 100644 index 000000000..131127003 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-tcp-servicecs-cm.yml.j2 @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: ingress-nginx-tcp-services + namespace: {{ ingress_nginx_namespace }} + labels: + k8s-app: ingress-nginx diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-udp-servicecs-cm.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-udp-servicecs-cm.yml.j2 new file mode 100644 index 000000000..fc2bd2a65 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-udp-servicecs-cm.yml.j2 @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: ingress-nginx-udp-services + namespace: {{ ingress_nginx_namespace }} + labels: + k8s-app: ingress-nginx diff --git a/roles/kubernetes-apps/ingress_controller/meta/main.yml b/roles/kubernetes-apps/ingress_controller/meta/main.yml new file mode 100644 index 000000000..da2e03ecc --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/meta/main.yml @@ -0,0 +1,8 @@ +--- +dependencies: + - role: kubernetes-apps/ingress_controller/ingress_nginx + when: ingress_nginx_enabled + tags: + - apps + - ingress-nginx + - ingress-controller diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index eb718f7d4..8622d919f 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -173,6 +173,7 @@ enable_network_policy: false local_volume_provisioner_enabled: "{{ local_volumes_enabled | default('false') }}" persistent_volumes_enabled: false cephfs_provisioner_enabled: false +ingress_nginx_enabled: false ## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461) # openstack_blockstorage_version: "v1/v2/auto (default)" From 46ff9ce765247fe0021d7f93e5e894783caae2f2 Mon Sep 17 00:00:00 2001 From: Qasim Sarfraz Date: Fri, 2 Mar 2018 16:37:41 +0000 Subject: [PATCH 039/177] README.md update for default component versions --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index e5bc77a61..7d592f851 100644 --- a/README.md +++ b/README.md @@ -77,13 +77,13 @@ Versions of supported components - [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.3 - [etcd](https://github.com/coreos/etcd/releases) v3.2.4 -- [flanneld](https://github.com/coreos/flannel/releases) v0.8.0 +- [flanneld](https://github.com/coreos/flannel/releases) v0.9.1 - [calico](https://docs.projectcalico.org/v2.5/releases/) v2.5.0 - [canal](https://github.com/projectcalico/canal) (given calico/flannel versions) - [cilium](https://github.com/cilium/cilium) v1.0.0-rc4 - [contiv](https://github.com/contiv/install/releases) v1.0.3 - [weave](http://weave.works/) v2.0.1 -- [docker](https://www.docker.com/) v1.13 (see note) +- [docker](https://www.docker.com/) v17.03 (see note) - [rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2) Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin). From a800ed094b40aac9e73ac5115bf8506ef53c1db1 Mon Sep 17 00:00:00 2001 From: Jonas Kongslund Date: Sun, 21 Jan 2018 14:34:37 +0400 Subject: [PATCH 040/177] Added support for webhook authentication/authorization on the secure kubelet endpoint --- .../cluster_roles/tasks/main.yml | 46 +++++++++++++++++++ .../templates/node-webhook-cr.yml.j2 | 19 ++++++++ .../templates/node-webhook-crb.yml.j2 | 17 +++++++ .../node/templates/kubelet.kubeadm.env.j2 | 3 ++ .../node/templates/kubelet.standard.env.j2 | 6 +++ roles/kubespray-defaults/defaults/main.yaml | 6 +++ 6 files changed, 97 insertions(+) create mode 100644 roles/kubernetes-apps/cluster_roles/templates/node-webhook-cr.yml.j2 create mode 100644 roles/kubernetes-apps/cluster_roles/templates/node-webhook-crb.yml.j2 diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml index 5bf670949..b58670c0f 100644 --- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml +++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml @@ -29,6 +29,52 @@ - rbac_enabled - node_crb_manifest.changed +- name: Kubernetes Apps | Add webhook ClusterRole that grants access to proxy, stats, log, spec, and metrics on a kubelet + template: + src: "node-webhook-cr.yml.j2" + dest: "{{ kube_config_dir }}/node-webhook-cr.yml" + register: node_webhook_cr_manifest + when: + - rbac_enabled + - kubelet_authorization_mode_webhook + tags: node-webhook + +- name: Apply webhook ClusterRole + kube: + name: "system:node-webhook" + kubectl: "{{bin_dir}}/kubectl" + resource: "clusterrole" + filename: "{{ kube_config_dir }}/node-webhook-cr.yml" + state: latest + when: + - rbac_enabled + - kubelet_authorization_mode_webhook + - node_webhook_cr_manifest.changed + tags: node-webhook + +- name: Kubernetes Apps | Add ClusterRoleBinding for system:nodes to webhook ClusterRole + template: + src: "node-webhook-crb.yml.j2" + dest: "{{ kube_config_dir }}/node-webhook-crb.yml" + register: node_webhook_crb_manifest + when: + - rbac_enabled + - kubelet_authorization_mode_webhook + tags: node-webhook + +- name: Grant system:nodes the webhook ClusterRole + kube: + name: "system:node-webhook" + kubectl: "{{bin_dir}}/kubectl" + resource: "clusterrolebinding" + filename: "{{ kube_config_dir }}/node-webhook-crb.yml" + state: latest + when: + - rbac_enabled + - kubelet_authorization_mode_webhook + - node_webhook_crb_manifest.changed + tags: node-webhook + # This is not a cluster role, but should be run after kubeconfig is set on master - name: Write kube system namespace manifest template: diff --git a/roles/kubernetes-apps/cluster_roles/templates/node-webhook-cr.yml.j2 b/roles/kubernetes-apps/cluster_roles/templates/node-webhook-cr.yml.j2 new file mode 100644 index 000000000..3f32c2599 --- /dev/null +++ b/roles/kubernetes-apps/cluster_roles/templates/node-webhook-cr.yml.j2 @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:node-webhook +rules: + - apiGroups: + - "" + resources: + - nodes/proxy + - nodes/stats + - nodes/log + - nodes/spec + - nodes/metrics + verbs: + - "*" \ No newline at end of file diff --git a/roles/kubernetes-apps/cluster_roles/templates/node-webhook-crb.yml.j2 b/roles/kubernetes-apps/cluster_roles/templates/node-webhook-crb.yml.j2 new file mode 100644 index 000000000..68aed5cb5 --- /dev/null +++ b/roles/kubernetes-apps/cluster_roles/templates/node-webhook-crb.yml.j2 @@ -0,0 +1,17 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:node-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:node-webhook +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: system:nodes diff --git a/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 index e49b15f48..93297accd 100644 --- a/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 +++ b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 @@ -20,6 +20,9 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" {% if kube_version | version_compare('v1.8', '<') %} --require-kubeconfig \ {% endif %} +{% if kubelet_authentication_token_webhook %} +--authentication-token-webhook \ +{% endif %} --authorization-mode=Webhook \ --client-ca-file={{ kube_cert_dir }}/ca.crt \ --pod-manifest-path={{ kube_manifest_dir }} \ diff --git a/roles/kubernetes/node/templates/kubelet.standard.env.j2 b/roles/kubernetes/node/templates/kubelet.standard.env.j2 index cb7d83d35..5c112c179 100644 --- a/roles/kubernetes/node/templates/kubelet.standard.env.j2 +++ b/roles/kubernetes/node/templates/kubelet.standard.env.j2 @@ -33,6 +33,12 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" {% else %} --fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \ {% endif %} +{% if kubelet_authentication_token_webhook %} +--authentication-token-webhook \ +{% endif %} +{% if kubelet_authorization_mode_webhook %} +--authorization-mode=Webhook \ +{% endif %} --enforce-node-allocatable={{ kubelet_enforce_node_allocatable }} {% endif %}{% endset %} {# DNS settings for kubelet #} diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index 8622d919f..692c2e698 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -197,6 +197,12 @@ openstack_lbaas_monitor_max_retries: "3" authorization_modes: ['Node', 'RBAC'] rbac_enabled: "{{ 'RBAC' in authorization_modes or kubeadm_enabled }}" +# When enabled, API bearer tokens (including service account tokens) can be used to authenticate to the kubelet’s HTTPS endpoint +kubelet_authentication_token_webhook: false + +# When enabled, access to the kubelet API requires authorization by delegation to the API server +kubelet_authorization_mode_webhook: false + ## List of key=value pairs that describe feature gates for ## the k8s cluster. kube_feature_gates: From 585303ad664a7bc943bde15814d5adcaeb062295 Mon Sep 17 00:00:00 2001 From: Jonas Kongslund Date: Sat, 3 Mar 2018 10:05:05 +0400 Subject: [PATCH 041/177] Start with three dashes for consistency --- .../cluster_roles/templates/node-webhook-cr.yml.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/kubernetes-apps/cluster_roles/templates/node-webhook-cr.yml.j2 b/roles/kubernetes-apps/cluster_roles/templates/node-webhook-cr.yml.j2 index 3f32c2599..0ac79d3e6 100644 --- a/roles/kubernetes-apps/cluster_roles/templates/node-webhook-cr.yml.j2 +++ b/roles/kubernetes-apps/cluster_roles/templates/node-webhook-cr.yml.j2 @@ -1,3 +1,4 @@ +--- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: From dada98143c109a907590a1349f35f1d75631001c Mon Sep 17 00:00:00 2001 From: RongZhang Date: Mon, 5 Mar 2018 04:53:51 -0600 Subject: [PATCH 042/177] Fix kubespary rpm spec file some requires (#2417) Fix kubespary rpm spec file some requires --- contrib/packaging/rpm/kubespray.spec | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/contrib/packaging/rpm/kubespray.spec b/contrib/packaging/rpm/kubespray.spec index 7080cf998..6ec3ffca7 100644 --- a/contrib/packaging/rpm/kubespray.spec +++ b/contrib/packaging/rpm/kubespray.spec @@ -20,9 +20,10 @@ BuildRequires: python2-setuptools BuildRequires: python-d2to1 BuildRequires: python2-pbr -Requires: ansible +Requires: ansible >= 2.4.0 Requires: python-jinja2 >= 2.10 Requires: python-netaddr +Requires: python-pbr %description From 89847d5684b9ea734333ae795d673e28de788885 Mon Sep 17 00:00:00 2001 From: Ayaz Ahmed Khan Date: Wed, 12 Jul 2017 12:51:12 +0500 Subject: [PATCH 043/177] Explicitly defines the --kubelet-preferred-address-types parameter to the API server configuration. This solves the problem where if you have non-resolvable node names, and try to scale the server by adding new nodes, kubectl commands start to fail for newly added nodes, giving a TCP timeout error when trying to resolve the node hostname against a public DNS. --- roles/kubernetes/master/defaults/main.yml | 3 +++ .../master/templates/manifests/kube-apiserver.manifest.j2 | 1 + 2 files changed, 4 insertions(+) diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml index 64a71fc22..59e528822 100644 --- a/roles/kubernetes/master/defaults/main.yml +++ b/roles/kubernetes/master/defaults/main.yml @@ -78,6 +78,9 @@ kube_oidc_auth: false ## Variables for custom flags apiserver_custom_flags: [] +# List of the preferred NodeAddressTypes to use for kubelet connections. +kubelet_preferred_address_types: 'InternalDNS,InternalIP,Hostname,ExternalDNS,ExternalIP' + controller_mgr_custom_flags: [] scheduler_custom_flags: [] diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index bee13b4ec..0dbe93cab 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -51,6 +51,7 @@ spec: - --kubelet-client-certificate={{ kube_cert_dir }}/node-{{ inventory_hostname }}.pem - --kubelet-client-key={{ kube_cert_dir }}/node-{{ inventory_hostname }}-key.pem - --service-account-lookup=true + - --kubelet-preferred-address-types={{ kubelet_preferred_address_types }} {% if kube_basic_auth|default(true) %} - --basic-auth-file={{ kube_users_dir }}/known_users.csv {% endif %} From e65904eee372e31e5606b383d163dc518f594f73 Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Mon, 5 Mar 2018 23:11:18 +0800 Subject: [PATCH 044/177] Add labels for ingress_nginx_namespace, also only setup serviceAccountName if rbac_enabled --- .../templates/ingress-nginx-controller-ds.yml.j2 | 4 +++- .../ingress_nginx/templates/ingress-nginx-ns.yml.j2 | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 index 0f275bb55..7fd3a946c 100644 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 @@ -21,7 +21,6 @@ spec: k8s-app: ingress-nginx version: v{{ ingress_nginx_controller_image_tag }} spec: - serviceAccountName: ingress-nginx containers: - name: ingress-nginx-controller image: {{ ingress_nginx_controller_image_repo }}:{{ ingress_nginx_controller_image_tag }} @@ -68,3 +67,6 @@ spec: periodSeconds: 10 successThreshold: 1 timeoutSeconds: 1 +{% if rbac_enabled %} + serviceAccountName: ingress-nginx +{% endif %} diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-ns.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-ns.yml.j2 index 75958948a..1f1236619 100644 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-ns.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-ns.yml.j2 @@ -3,3 +3,5 @@ apiVersion: v1 kind: Namespace metadata: name: {{ ingress_nginx_namespace }} + labels: + name: {{ ingress_nginx_namespace }} From 07657aecf47524f7a93ce6af7988babde272b3ed Mon Sep 17 00:00:00 2001 From: Michael Beatty Date: Thu, 1 Mar 2018 10:41:19 -0600 Subject: [PATCH 045/177] add support for azure vnetResourceGroup --- inventory/sample/group_vars/all.yml | 1 + roles/kubernetes/preinstall/tasks/azure-credential-check.yml | 5 +++++ roles/kubernetes/preinstall/templates/azure-cloud-config.j2 | 3 ++- 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index c107b049f..282943a8d 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -76,6 +76,7 @@ bin_dir: /usr/local/bin #azure_subnet_name: #azure_security_group_name: #azure_vnet_name: +#azure_vnet_resource_group: #azure_route_table_name: ## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461) diff --git a/roles/kubernetes/preinstall/tasks/azure-credential-check.yml b/roles/kubernetes/preinstall/tasks/azure-credential-check.yml index fa2d82fd2..68cbaa160 100644 --- a/roles/kubernetes/preinstall/tasks/azure-credential-check.yml +++ b/roles/kubernetes/preinstall/tasks/azure-credential-check.yml @@ -44,6 +44,11 @@ msg: "azure_vnet_name is missing" when: azure_vnet_name is not defined or azure_vnet_name == "" +- name: check azure_vnet_resource_group value + fail: + msg: "azure_vnet_resource_group is missing" + when: azure_vnet_resource_group is not defined or azure_vnet_resource_group == "" + - name: check azure_route_table_name value fail: msg: "azure_route_table_name is missing" diff --git a/roles/kubernetes/preinstall/templates/azure-cloud-config.j2 b/roles/kubernetes/preinstall/templates/azure-cloud-config.j2 index 139a06cc1..d33c044b2 100644 --- a/roles/kubernetes/preinstall/templates/azure-cloud-config.j2 +++ b/roles/kubernetes/preinstall/templates/azure-cloud-config.j2 @@ -8,5 +8,6 @@ "subnetName": "{{ azure_subnet_name }}", "securityGroupName": "{{ azure_security_group_name }}", "vnetName": "{{ azure_vnet_name }}", + "vnetResourceGroup": "{{ azure_vnet_resource_group }}", "routeTableName": "{{ azure_route_table_name }}" -} \ No newline at end of file +} From f9019ab116428c24da9cf243c375789de6538b7e Mon Sep 17 00:00:00 2001 From: Dominic Lam Date: Mon, 5 Mar 2018 13:15:10 -0800 Subject: [PATCH 046/177] Adding ssh_private_key_file to ProxyCommand This is trying to match what the roles/bastion-ssh-config is trying to do. When the setup is going through bastion, we want to ssh private key to be used on the bastion instance. --- roles/kubespray-defaults/defaults/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index 498b14365..d24f279ec 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -5,7 +5,7 @@ bootstrap_os: none # Use proxycommand if bastion host is in group all # This change obseletes editing ansible.cfg file depending on bastion existance -ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %h:%p {{ ansible_user }}@{{hostvars['bastion']['ansible_host']}} ' {% endif %}" +ansible_ssh_common_args: "{% if 'bastion' in groups['all'] %} -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -W %h:%p {{ ansible_user }}@{{hostvars['bastion']['ansible_host']}} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %} ' {% endif %}" kube_api_anonymous_auth: false From 388b627f722f462263e050a85aa516111a73d414 Mon Sep 17 00:00:00 2001 From: RongZhang Date: Mon, 5 Mar 2018 20:46:39 -0600 Subject: [PATCH 047/177] Enable OOM killing for etcd-events Enable OOM killing like docker run etcd --- roles/etcd/templates/etcd-events.j2 | 1 - 1 file changed, 1 deletion(-) diff --git a/roles/etcd/templates/etcd-events.j2 b/roles/etcd/templates/etcd-events.j2 index 3b25eaaff..b26847960 100644 --- a/roles/etcd/templates/etcd-events.j2 +++ b/roles/etcd/templates/etcd-events.j2 @@ -9,7 +9,6 @@ {% if etcd_memory_limit is defined %} --memory={{ etcd_memory_limit|regex_replace('Mi', 'M') }} \ {% endif %} - --oom-kill-disable \ {% if etcd_cpu_limit is defined %} --cpu-shares={{ etcd_cpu_limit|regex_replace('m', '') }} \ {% endif %} From 646d473e8e75415ed6b1912207f7c0594c1acb9e Mon Sep 17 00:00:00 2001 From: zhengchuan hu Date: Wed, 7 Mar 2018 18:30:34 +0800 Subject: [PATCH 048/177] fix the name of some variable --- roles/kubernetes/node/defaults/main.yml | 10 +++++----- roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 | 6 +++--- .../kubernetes/node/templates/kubelet.standard.env.j2 | 6 +++--- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml index 7603ef6be..2cbf56e1d 100644 --- a/roles/kubernetes/node/defaults/main.yml +++ b/roles/kubernetes/node/defaults/main.yml @@ -35,12 +35,12 @@ kubelet_disable_shared_pid: true ### fail with swap on (default true) kubelet_fail_swap_on: true -# Reserve this space for system resources -kubelet_memory_limit: 256M -kubelet_cpu_limit: 100m +# Reserve this space for kube resources +kube_memory_reserved: 256M +kube_cpu_reserved: 100m # Reservation for master hosts -kubelet_master_memory_limit: 512M -kubelet_master_cpu_limit: 200m +kube_master_memory_reserved: 512M +kube_master_cpu_reserved: 200m kubelet_status_update_frequency: 10s diff --git a/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 index 93297accd..c8cf40e7b 100644 --- a/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 +++ b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 @@ -44,9 +44,9 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" {# Node reserved CPU/memory #} {% if is_kube_master|bool %} -{% set kubelet_reserve %}--kube-reserved cpu={{ kubelet_master_cpu_limit }},memory={{ kubelet_master_memory_limit|regex_replace('Mi', 'M') }}{% endset %} +{% set kube_reserved %}--kube-reserved cpu={{ kube_master_cpu_reserved }},memory={{ kube_master_memory_reserved|regex_replace('Mi', 'M') }}{% endset %} {% else %} -{% set kubelet_reserve %}--kube-reserved cpu={{ kubelet_cpu_limit }},memory={{ kubelet_memory_limit|regex_replace('Mi', 'M') }}{% endset %} +{% set kube_reserved %}--kube-reserved cpu={{ kube_cpu_reserved }},memory={{ kube_memory_reserved|regex_replace('Mi', 'M') }}{% endset %} {% endif %} {# DNS settings for kubelet #} @@ -62,7 +62,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" {% set kubelet_args_dns %}{{ kubelet_args_cluster_dns }} --cluster-domain={{ dns_domain }} --resolv-conf={{ kube_resolv_conf }}{% endset %} -KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_reserve }} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}" +KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kube_reserved }} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}" {% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave", "contiv", "cilium"] %} KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin" {% elif kube_network_plugin is defined and kube_network_plugin == "cloud" %} diff --git a/roles/kubernetes/node/templates/kubelet.standard.env.j2 b/roles/kubernetes/node/templates/kubelet.standard.env.j2 index 5c112c179..8e05e0253 100644 --- a/roles/kubernetes/node/templates/kubelet.standard.env.j2 +++ b/roles/kubernetes/node/templates/kubelet.standard.env.j2 @@ -73,9 +73,9 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" {# Node reserved CPU/memory #} {% if is_kube_master|bool %} -{% set kubelet_reserve %}--kube-reserved cpu={{ kubelet_master_cpu_limit }},memory={{ kubelet_master_memory_limit|regex_replace('Mi', 'M') }}{% endset %} +{% set kube_reserved %}--kube-reserved cpu={{ kube_master_cpu_reserved }},memory={{ kube_master_memory_reserved|regex_replace('Mi', 'M') }}{% endset %} {% else %} -{% set kubelet_reserve %}--kube-reserved cpu={{ kubelet_cpu_limit }},memory={{ kubelet_memory_limit|regex_replace('Mi', 'M') }}{% endset %} +{% set kube_reserved %}--kube-reserved cpu={{ kube_cpu_reserved }},memory={{ kube_memory_reserved|regex_replace('Mi', 'M') }}{% endset %} {% endif %} {# Kubelet node labels #} @@ -88,7 +88,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" {% set node_labels %}--node-labels=node-role.kubernetes.io/node=true{% endset %} {% endif %} -KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ kubelet_reserve }} {{ node_labels }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}" +KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ kube_reserved }} {{ node_labels }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}" {% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave", "contiv", "cilium"] %} KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin" {% elif kube_network_plugin is defined and kube_network_plugin == "weave" %} From 0b1200bb4967ead9ab74c025e82395a1c99d3bbe Mon Sep 17 00:00:00 2001 From: zhengchuan hu Date: Wed, 7 Mar 2018 19:00:00 +0800 Subject: [PATCH 049/177] add tip to large-deployments.doc set the ``etcd_events_cluster_setup: true`` store events in a separate dedicated etcd instance. --- docs/large-deployments.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/large-deployments.md b/docs/large-deployments.md index 953ca2963..723ca5f48 100644 --- a/docs/large-deployments.md +++ b/docs/large-deployments.md @@ -46,5 +46,8 @@ For a large scaled deployments, consider the following configuration changes: section of the Getting started guide for tips on creating a large scale Ansible inventory. +* Override the ``etcd_events_cluster_setup: true`` store events in a separate + dedicated etcd instance. + For example, when deploying 200 nodes, you may want to run ansible with ``--forks=50``, ``--timeout=600`` and define the ``retry_stagger: 60``. From 954aae931e97e417e137e79248cd9e4b50acb3fa Mon Sep 17 00:00:00 2001 From: RongZhang Date: Wed, 7 Mar 2018 05:39:03 -0600 Subject: [PATCH 050/177] Fix issues #2246 (#2403) Support Centos/Fedora atomic host --- README.md | 1 + docs/atomic.md | 7 ++++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 7d592f851..ec220c734 100644 --- a/README.md +++ b/README.md @@ -69,6 +69,7 @@ Supported Linux Distributions - **Debian** Jessie - **Ubuntu** 16.04 - **CentOS/RHEL** 7 +- **Fedora/CentOS** Atomic Note: Upstart/SysV init based OS types are not supported. diff --git a/docs/atomic.md b/docs/atomic.md index cb506a9f3..1c432b8e8 100644 --- a/docs/atomic.md +++ b/docs/atomic.md @@ -7,7 +7,7 @@ Note: Flannel is the only plugin that has currently been tested with atomic ### Vagrant -* For bootstrapping with Vagrant, use box centos/atomic-host +* For bootstrapping with Vagrant, use box centos/atomic-host or fedora/atomic-host * Update VagrantFile variable `local_release_dir` to `/var/vagrant/temp`. * Update `vm_memory = 2048` and `vm_cpus = 2` * Networking on vagrant hosts has to be brought up manually once they are booted. @@ -17,6 +17,7 @@ Note: Flannel is the only plugin that has currently been tested with atomic sudo /sbin/ifup enp0s8 ``` -* For users of vagrant-libvirt download qcow2 format from https://wiki.centos.org/SpecialInterestGroup/Atomic/Download/ +* For users of vagrant-libvirt download centos/atomic-host qcow2 format from https://wiki.centos.org/SpecialInterestGroup/Atomic/Download/ +* For users of vagrant-libvirt download fedora/atomic-host qcow2 format from https://getfedora.org/en/atomic/download/ -Then you can proceed to [cluster deployment](#run-deployment) \ No newline at end of file +Then you can proceed to [cluster deployment](#run-deployment) From dbf40bbbb80eb8221d6dc5dc2d553e774ae34213 Mon Sep 17 00:00:00 2001 From: RongZhang Date: Wed, 7 Mar 2018 06:11:20 -0600 Subject: [PATCH 051/177] docker-ce instead of docker-engine repo (#2423) * Use docker-ce 17.03.2 * Docker-engine may be discarded --- roles/docker/defaults/main.yml | 14 ++++++++++---- roles/docker/vars/debian.yml | 20 ++++++++++---------- roles/docker/vars/redhat.yml | 20 +++++++++++++------- roles/docker/vars/ubuntu.yml | 18 ++++++++---------- 4 files changed, 41 insertions(+), 31 deletions(-) diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index e49471ccc..2bbd62835 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -1,5 +1,6 @@ --- docker_version: '17.03' +docker_selinux_version: '17.03' docker_package_info: pkgs: @@ -14,7 +15,12 @@ docker_dns_servers_strict: yes docker_container_storage_setup: false -docker_rh_repo_base_url: 'https://yum.dockerproject.org/repo/main/centos/7' -docker_rh_repo_gpgkey: 'https://yum.dockerproject.org/gpg' -docker_apt_repo_base_url: 'https://apt.dockerproject.org/repo' -docker_apt_repo_gpgkey: 'https://apt.dockerproject.org/gpg' +#CentOS/RedHat docker-ce repo +docker_rh_repo_base_url: 'https://download.docker.com/linux/centos/7/$basearch/stable' +docker_rh_repo_gpgkey: 'https://download.docker.com/linux/centos/gpg' +#Ubuntu docker-ce repo +docker_ubuntu_repo_base_url: "https://download.docker.com/linux/ubuntu" +docker_ubuntu_repo_gpgkey: 'https://download.docker.com/linux/ubuntu/gpg' +#Debian docker-ce repo +docker_debian_repo_base_url: "https://download.docker.com/linux/debian" +docker_debian_repo_gpgkey: 'https://download.docker.com/linux/debian/gpg' diff --git a/roles/docker/vars/debian.yml b/roles/docker/vars/debian.yml index 587e910d6..0a43c7c79 100644 --- a/roles/docker/vars/debian.yml +++ b/roles/docker/vars/debian.yml @@ -1,15 +1,15 @@ --- docker_kernel_min_version: '3.10' -# https://apt.dockerproject.org/repo/dists/debian-wheezy/main/filelist +# https://download.docker.com/linux/debian/ docker_versioned_pkg: - 'latest': docker-engine + 'latest': docker-ce '1.11': docker-engine=1.11.2-0~{{ ansible_distribution_release|lower }} '1.12': docker-engine=1.12.6-0~debian-{{ ansible_distribution_release|lower }} '1.13': docker-engine=1.13.1-0~debian-{{ ansible_distribution_release|lower }} - '17.03': docker-engine=17.03.1~ce-0~debian-{{ ansible_distribution_release|lower }} - 'stable': docker-engine=17.03.1~ce-0~debian-{{ ansible_distribution_release|lower }} - 'edge': docker-engine=17.05.0~ce-0~debian-{{ ansible_distribution_release|lower }} + '17.03': docker-ce=17.03.2~ce-0~debian-{{ ansible_distribution_release|lower }} + 'stable': docker-ce=17.03.2~ce-0~debian-{{ ansible_distribution_release|lower }} + 'edge': docker-ce=17.12.1~ce-0~debian-{{ ansible_distribution_release|lower }} docker_package_info: pkg_mgr: apt @@ -19,14 +19,14 @@ docker_package_info: docker_repo_key_info: pkg_key: apt_key - url: '{{ docker_apt_repo_gpgkey }}' + url: '{{ docker_debian_repo_gpgkey }}' repo_keys: - - 58118E89F3A912897C070ADBF76221572C52609D + - 9DC858229FC7DD38854AE2D88D81803C0EBFCD88 docker_repo_info: pkg_repo: apt_repository repos: - > - deb {{ docker_apt_repo_base_url }} - {{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }} - main + deb {{ docker_debian_repo_base_url }} + {{ ansible_distribution_release|lower }} + stable diff --git a/roles/docker/vars/redhat.yml b/roles/docker/vars/redhat.yml index 23c5419a6..96950719e 100644 --- a/roles/docker/vars/redhat.yml +++ b/roles/docker/vars/redhat.yml @@ -1,23 +1,29 @@ --- docker_kernel_min_version: '0' -# https://yum.dockerproject.org/repo/main/centos/7/Packages/ +# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package +# https://download.docker.com/linux/centos/7/x86_64/stable/Packages/ # or do 'yum --showduplicates list docker-engine' docker_versioned_pkg: - 'latest': docker-engine + 'latest': docker-ce '1.11': docker-engine-1.11.2-1.el7.centos '1.12': docker-engine-1.12.6-1.el7.centos '1.13': docker-engine-1.13.1-1.el7.centos - '17.03': docker-engine-17.03.1.ce-1.el7.centos - 'stable': docker-engine-17.03.1.ce-1.el7.centos - 'edge': docker-engine-17.05.0.ce-1.el7.centos + '17.03': docker-ce-17.03.2.ce-1.el7.centos + 'stable': docker-ce-17.03.2.ce-1.el7.centos + 'edge': docker-ce-17.03.2.ce-1.el7.centos + +docker_selinux_versioned_pkg: + 'latest': docker-ce-selinux + '17.03': docker-ce-selinux-17.03.2.ce-1.el7.centos + 'stable': docker-ce-selinux-17.03.2.ce-1.el7.centos + 'edge': docker-ce-selinux-17.03.2.ce-1.el7.centos -# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package -# https://download.docker.com/linux/centos/7/x86_64/stable/Packages/ docker_package_info: pkg_mgr: yum pkgs: + - name: "{{ docker_selinux_versioned_pkg[docker_selinux_version | string] }}" - name: "{{ docker_versioned_pkg[docker_version | string] }}" docker_repo_key_info: diff --git a/roles/docker/vars/ubuntu.yml b/roles/docker/vars/ubuntu.yml index f11f5bb81..897c23ce0 100644 --- a/roles/docker/vars/ubuntu.yml +++ b/roles/docker/vars/ubuntu.yml @@ -1,15 +1,13 @@ --- docker_kernel_min_version: '3.10' -# https://apt.dockerproject.org/repo/dists/ubuntu-xenial/main/filelist +# https://download.docker.com/linux/ubuntu/ docker_versioned_pkg: - 'latest': docker-engine + 'latest': docker-ce '1.11': docker-engine=1.11.1-0~{{ ansible_distribution_release|lower }} '1.12': docker-engine=1.12.6-0~ubuntu-{{ ansible_distribution_release|lower }} '1.13': docker-engine=1.13.1-0~ubuntu-{{ ansible_distribution_release|lower }} - '17.03': docker-engine=17.03.1~ce-0~ubuntu-{{ ansible_distribution_release|lower }} - 'stable': docker-engine=17.03.1~ce-0~ubuntu-{{ ansible_distribution_release|lower }} - 'edge': docker-engine=17.05.0~ce-0~ubuntu-{{ ansible_distribution_release|lower }} + '17.03': docker-ce=17.03.2~ce-0~ubuntu-{{ ansible_distribution_release|lower }} docker_package_info: pkg_mgr: apt @@ -19,14 +17,14 @@ docker_package_info: docker_repo_key_info: pkg_key: apt_key - url: '{{ docker_apt_repo_gpgkey }}' + url: '{{ docker_ubuntu_repo_gpgkey }}' repo_keys: - - 58118E89F3A912897C070ADBF76221572C52609D + - 9DC858229FC7DD38854AE2D88D81803C0EBFCD88 docker_repo_info: pkg_repo: apt_repository repos: - > - deb {{ docker_apt_repo_base_url }} - {{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }} - main + deb {{ docker_ubuntu_repo_base_url }} + {{ ansible_distribution_release|lower }} + stable From 9e44f94176c3248af98524913e152279b12d043e Mon Sep 17 00:00:00 2001 From: Antoine Legrand <2t.antoine@gmail.com> Date: Wed, 7 Mar 2018 13:37:07 +0100 Subject: [PATCH 052/177] Remove DigitalOcean --- .gitlab-ci.yml | 21 ++++++++++----------- README.md | 2 +- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 3d49fd26d..c21bb0c43 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -320,16 +320,6 @@ gce_coreos-calico-aio: only: [/^pr-.*$/] ### PR JOBS PART2 -do_ubuntu-canal-ha: - stage: deploy-part2 - <<: *job - <<: *do - variables: - <<: *do_variables - when: on_success - except: ['triggers'] - only: [/^pr-.*$/] - gce_centos7-flannel-addons: stage: deploy-part2 <<: *job @@ -363,7 +353,6 @@ gce_coreos-calico-sep-triggers: when: on_success only: ['triggers'] - gce_ubuntu-canal-ha-triggers: stage: deploy-part2 <<: *job @@ -396,6 +385,16 @@ gce_ubuntu-weave-sep-triggers: only: ['triggers'] # More builds for PRs/merges (manual) and triggers (auto) +do_ubuntu-canal-ha: + stage: deploy-part2 + <<: *job + <<: *do + variables: + <<: *do_variables + when: manual + except: ['triggers'] + only: ['master', /^pr-.*$/] + gce_ubuntu-canal-ha: stage: deploy-part2 <<: *job diff --git a/README.md b/README.md index 7d592f851..dc15d2099 100644 --- a/README.md +++ b/README.md @@ -150,5 +150,5 @@ CI Tests [![Build graphs](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/badges/master/build.svg)](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines) -CI/end-to-end tests sponsored by Google (GCE), DigitalOcean, [teuto.net](https://teuto.net/) (openstack). +CI/end-to-end tests sponsored by Google (GCE) See the [test matrix](docs/test_cases.md) for details. From 3f96b2da7a3af41419f724476ec67defee43e00e Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Wed, 7 Mar 2018 01:18:42 +0800 Subject: [PATCH 053/177] Add Custom ConfigMap Support for ingress-nginx --- inventory/sample/group_vars/k8s-cluster.yml | 7 +++++++ .../ingress_controller/ingress_nginx/defaults/main.yml | 3 +++ .../ingress_nginx/templates/ingress-nginx-cm.yml.j2 | 2 ++ .../templates/ingress-nginx-tcp-servicecs-cm.yml.j2 | 2 ++ .../templates/ingress-nginx-udp-servicecs-cm.yml.j2 | 2 ++ 5 files changed, 16 insertions(+) diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml index c5047acd1..a31963f16 100644 --- a/inventory/sample/group_vars/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster.yml @@ -192,6 +192,13 @@ ingress_nginx_enabled: false # ingress_nginx_namespace: "ingress-nginx" # ingress_nginx_insecure_port: 80 # ingress_nginx_secure_port: 443 +# ingress_nginx_configmap: +# map-hash-bucket-size: "128" +# ssl-protocols: "SSLv2" +# ingress_nginx_configmap_tcp_services: +# 9000: "default/example-go:8080" +# ingress_nginx_configmap_udp_services: +# 53: "kube-system/kube-dns:53" # Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now ) persistent_volumes_enabled: false diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml b/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml index b38fc2b97..dce234f6c 100644 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml @@ -8,3 +8,6 @@ ingress_nginx_controller_image_tag: 0.11.0 ingress_nginx_namespace: "ingress-nginx" ingress_nginx_insecure_port: 80 ingress_nginx_secure_port: 443 +ingress_nginx_configmap: {} +ingress_nginx_configmap_tcp_services: {} +ingress_nginx_configmap_udp_services: {} diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-cm.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-cm.yml.j2 index 4febe00f9..79b9e17e7 100644 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-cm.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-cm.yml.j2 @@ -6,3 +6,5 @@ metadata: namespace: {{ ingress_nginx_namespace }} labels: k8s-app: ingress-nginx +data: + {{ ingress_nginx_configmap | to_nice_yaml }} diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-tcp-servicecs-cm.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-tcp-servicecs-cm.yml.j2 index 131127003..5fb875940 100644 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-tcp-servicecs-cm.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-tcp-servicecs-cm.yml.j2 @@ -6,3 +6,5 @@ metadata: namespace: {{ ingress_nginx_namespace }} labels: k8s-app: ingress-nginx +data: + {{ ingress_nginx_configmap_tcp_services | to_nice_yaml }} diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-udp-servicecs-cm.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-udp-servicecs-cm.yml.j2 index fc2bd2a65..bcb004bc9 100644 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-udp-servicecs-cm.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-udp-servicecs-cm.yml.j2 @@ -6,3 +6,5 @@ metadata: namespace: {{ ingress_nginx_namespace }} labels: k8s-app: ingress-nginx +data: + {{ ingress_nginx_configmap_udp_services | to_nice_yaml }} From 605738757d3abfc02ecf3755b43be3c94d5aee97 Mon Sep 17 00:00:00 2001 From: Chris Mildebrandt Date: Wed, 7 Mar 2018 11:32:47 -0800 Subject: [PATCH 054/177] Fix systemd version detection Change "command" to "shell" in order for the pipe to work correctly --- roles/docker/tasks/systemd.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/docker/tasks/systemd.yml b/roles/docker/tasks/systemd.yml index 877de1299..78cec33cc 100644 --- a/roles/docker/tasks/systemd.yml +++ b/roles/docker/tasks/systemd.yml @@ -12,7 +12,7 @@ when: http_proxy is defined or https_proxy is defined - name: get systemd version - command: systemctl --version | head -n 1 | cut -d " " -f 2 + shell: systemctl --version | head -n 1 | cut -d " " -f 2 register: systemd_version when: not is_atomic changed_when: false From 8960d5bcfad38e588addd1fdf81babc8d08a928a Mon Sep 17 00:00:00 2001 From: Chad Swenson Date: Wed, 7 Mar 2018 14:50:09 -0600 Subject: [PATCH 055/177] kube Module Enhancements * Multiple files are now supported across operations. * Can be specified as a list or a comma separated string. * Single item per task params will still work without changes. * Added `files`, `filenames`, and `file`, as aliases for the `filename` param. * Improved output of error message to always include stderr * `exists` now supports checking files Follow up PRs encouraged across roles to start converting `with_items` loops on `kube` tasks into `files` param lists so we can improve performance. --- library/kube.py | 56 +++++++++++++++++++++++++++++++------------------ 1 file changed, 36 insertions(+), 20 deletions(-) diff --git a/library/kube.py b/library/kube.py index a84578ff0..0a50c4303 100644 --- a/library/kube.py +++ b/library/kube.py @@ -18,7 +18,9 @@ options: required: false default: null description: - - The path and filename of the resource(s) definition file. + - The path and filename of the resource(s) definition file(s). + - To operate on several files this can accept a comma separated list of files or a list of files. + aliases: [ 'files', 'file', 'filenames' ] kubectl: required: false default: null @@ -86,6 +88,15 @@ EXAMPLES = """ - name: test nginx is present kube: filename=/tmp/nginx.yml + +- name: test nginx and postgresql are present + kube: files=/tmp/nginx.yml,/tmp/postgresql.yml + +- name: test nginx and postgresql are present + kube: + files: + - /tmp/nginx.yml + - /tmp/postgresql.yml """ @@ -112,7 +123,7 @@ class KubeManager(object): self.all = module.params.get('all') self.force = module.params.get('force') self.name = module.params.get('name') - self.filename = module.params.get('filename') + self.filename = [f.strip() for f in module.params.get('filename') or []] self.resource = module.params.get('resource') self.label = module.params.get('label') @@ -122,7 +133,7 @@ class KubeManager(object): rc, out, err = self.module.run_command(args) if rc != 0: self.module.fail_json( - msg='error running kubectl (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err)) + msg='error running kubectl (%s) command (rc=%d), out=\'%s\', err=\'%s\'' % (' '.join(args), rc, out, err)) except Exception as exc: self.module.fail_json( msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc))) @@ -147,7 +158,7 @@ class KubeManager(object): if not self.filename: self.module.fail_json(msg='filename required to create') - cmd.append('--filename=' + self.filename) + cmd.append('--filename=' + ','.join(self.filename)) return self._execute(cmd) @@ -161,7 +172,7 @@ class KubeManager(object): if not self.filename: self.module.fail_json(msg='filename required to reload') - cmd.append('--filename=' + self.filename) + cmd.append('--filename=' + ','.join(self.filename)) return self._execute(cmd) @@ -173,7 +184,7 @@ class KubeManager(object): cmd = ['delete'] if self.filename: - cmd.append('--filename=' + self.filename) + cmd.append('--filename=' + ','.join(self.filename)) else: if not self.resource: self.module.fail_json(msg='resource required to delete without filename') @@ -197,27 +208,31 @@ class KubeManager(object): def exists(self): cmd = ['get'] - if not self.resource: - return False + if self.filename: + cmd.append('--filename=' + ','.join(self.filename)) + else: + if not self.resource: + self.module.fail_json(msg='resource required without filename') - cmd.append(self.resource) + cmd.append(self.resource) - if self.name: - cmd.append(self.name) + if self.name: + cmd.append(self.name) + + if self.label: + cmd.append('--selector=' + self.label) + + if self.all: + cmd.append('--all-namespaces') cmd.append('--no-headers') - if self.label: - cmd.append('--selector=' + self.label) - - if self.all: - cmd.append('--all-namespaces') - result = self._execute_nofail(cmd) if not result: return False return True + # TODO: This is currently unused, perhaps convert to 'scale' with a replicas param? def stop(self): if not self.force and not self.exists(): @@ -226,7 +241,7 @@ class KubeManager(object): cmd = ['stop'] if self.filename: - cmd.append('--filename=' + self.filename) + cmd.append('--filename=' + ','.join(self.filename)) else: if not self.resource: self.module.fail_json(msg='resource required to stop without filename') @@ -253,7 +268,7 @@ def main(): module = AnsibleModule( argument_spec=dict( name=dict(), - filename=dict(), + filename=dict(type='list', aliases=['files', 'file', 'filenames']), namespace=dict(), resource=dict(), label=dict(), @@ -263,7 +278,8 @@ def main(): all=dict(default=False, type='bool'), log_level=dict(default=0, type='int'), state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped']), - ) + ), + mutually_exclusive=[['filename', 'list']] ) changed = False From 64020040180556c9b835dfb18b54cbd5b5ba0a17 Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Wed, 7 Mar 2018 01:00:01 +0800 Subject: [PATCH 056/177] FIXUP #2424: local_provisioner directory should be created only if enabled --- inventory/sample/group_vars/k8s-cluster.yml | 4 ++-- .../templates/local-volume-provisioner-ns.yml.j2 | 2 ++ roles/kubernetes/node/templates/kubelet-container.j2 | 3 --- roles/kubernetes/node/templates/kubelet.rkt.service.j2 | 6 ------ roles/kubernetes/preinstall/tasks/main.yml | 1 - 5 files changed, 4 insertions(+), 12 deletions(-) diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml index a31963f16..128e8cc99 100644 --- a/inventory/sample/group_vars/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster.yml @@ -171,8 +171,8 @@ registry_enabled: false # Local volume provisioner deployment local_volume_provisioner_enabled: false # local_volume_provisioner_namespace: "{{ system_namespace }}" -local_volume_provisioner_base_dir: /mnt/disks -local_volume_provisioner_mount_dir: /mnt/disks +# local_volume_provisioner_base_dir: /mnt/disks +# local_volume_provisioner_mount_dir: /mnt/disks # local_volume_provisioner_storage_class: local-storage # CephFS provisioner deployment diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ns.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ns.yml.j2 index 68faacfbc..04a791010 100644 --- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ns.yml.j2 +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ns.yml.j2 @@ -3,3 +3,5 @@ apiVersion: v1 kind: Namespace metadata: name: {{ local_volume_provisioner_namespace }} + labels: + name: {{ local_volume_provisioner_namespace }} diff --git a/roles/kubernetes/node/templates/kubelet-container.j2 b/roles/kubernetes/node/templates/kubelet-container.j2 index 28a109ec1..4e8d4c371 100644 --- a/roles/kubernetes/node/templates/kubelet-container.j2 +++ b/roles/kubernetes/node/templates/kubelet-container.j2 @@ -26,9 +26,6 @@ -v /var/run:/var/run:rw \ -v {{kube_config_dir}}:{{kube_config_dir}}:ro \ -v /etc/os-release:/etc/os-release:ro \ -{% if local_volume_provisioner_enabled == true %} - -v {{ local_volume_provisioner_base_dir }}:{{ local_volume_provisioner_base_dir }}:shared \ -{% endif %} {{ hyperkube_image_repo }}:{{ hyperkube_image_tag}} \ ./hyperkube kubelet \ "$@" diff --git a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 b/roles/kubernetes/node/templates/kubelet.rkt.service.j2 index 80825fab3..7e0c2f942 100644 --- a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 +++ b/roles/kubernetes/node/templates/kubelet.rkt.service.j2 @@ -36,9 +36,6 @@ ExecStart=/usr/bin/rkt run \ --volume var-lib-docker,kind=host,source={{ docker_daemon_graph }},readOnly=false \ --volume var-lib-kubelet,kind=host,source=/var/lib/kubelet,readOnly=false,recursive=true \ --volume var-log,kind=host,source=/var/log \ -{% if local_volume_provisioner_enabled == true %} - --volume local-volume-provisioner-base-dir,kind=host,source={{ local_volume_provisioner_base_dir }},readOnly=false,recursive=true \ -{% endif %} {% if kube_network_plugin in ["calico", "weave", "canal", "flannel", "contiv", "cilium"] %} --volume etc-cni,kind=host,source=/etc/cni,readOnly=true \ --volume opt-cni,kind=host,source=/opt/cni,readOnly=true \ @@ -67,9 +64,6 @@ ExecStart=/usr/bin/rkt run \ --mount volume=var-lib-kubelet,target=/var/lib/kubelet \ --mount volume=var-log,target=/var/log \ --mount volume=hosts,target=/etc/hosts \ -{% if local_volume_provisioner_enabled == true %} - --mount volume=local-volume-provisioner-base-dir,target={{ local_volume_provisioner_base_dir }} \ -{% endif %} {% if kubelet_flexvolumes_plugins_dir is defined %} --mount volume=flexvolumes,target={{ kubelet_flexvolumes_plugins_dir }} \ {% endif %} diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml index 24e839806..f23040751 100644 --- a/roles/kubernetes/preinstall/tasks/main.yml +++ b/roles/kubernetes/preinstall/tasks/main.yml @@ -60,7 +60,6 @@ - "{{ kube_config_dir }}/ssl" - "{{ kube_manifest_dir }}" - "{{ kube_script_dir }}" - - "{{ local_volume_provisioner_base_dir }}" - name: check cloud_provider value fail: From a086686e9f9695b55871eecffc99dfd9c16e1e15 Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Fri, 16 Feb 2018 20:53:35 +0800 Subject: [PATCH 057/177] Support multiple artifacts under individual inventory directory --- .gitignore | 2 +- docs/getting-started.md | 70 ++++++++++----------- inventory/sample/group_vars/k8s-cluster.yml | 4 +- roles/kubernetes/client/defaults/main.yml | 2 +- roles/kubernetes/client/tasks/main.yml | 16 ++++- roles/kubespray-defaults/defaults/main.yaml | 4 +- 6 files changed, 56 insertions(+), 42 deletions(-) diff --git a/.gitignore b/.gitignore index 66c9b4867..fcbcd1da1 100644 --- a/.gitignore +++ b/.gitignore @@ -23,7 +23,7 @@ __pycache__/ # Distribution / packaging .Python -artifacts/ +inventory/*/artifacts/ env/ build/ credentials/ diff --git a/docs/getting-started.md b/docs/getting-started.md index 961d1a9cf..78d3f49d1 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -18,11 +18,9 @@ certain threshold. Run `python3 contrib/inventory_builder/inventory.py help` hel Example inventory generator usage: -``` -cp -r inventory/sample inventory/mycluster -declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5) -CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]} -``` + cp -r inventory/sample inventory/mycluster + declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5) + CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]} Starting custom deployment -------------------------- @@ -30,12 +28,10 @@ Starting custom deployment Once you have an inventory, you may want to customize deployment data vars and start the deployment: -**IMPORTANT: Edit my_inventory/groups_vars/*.yaml to override data vars** +**IMPORTANT**: Edit my\_inventory/groups\_vars/\*.yaml to override data vars: -``` -ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml -b -v \ - --private-key=~/.ssh/private_key -``` + ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml -b -v \ + --private-key=~/.ssh/private_key See more details in the [ansible guide](ansible.md). @@ -44,31 +40,31 @@ Adding nodes You may want to add **worker** nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters. -- Add the new worker node to your inventory under kube-node (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)). -- Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`: -``` -ansible-playbook -i inventory/mycluster/hosts.ini scale.yml -b -v \ - --private-key=~/.ssh/private_key -``` +- Add the new worker node to your inventory under kube-node (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)). +- Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`: + + ansible-playbook -i inventory/mycluster/hosts.ini scale.yml -b -v \ + --private-key=~/.ssh/private_key Connecting to Kubernetes ------------------------ + By default, Kubespray configures kube-master hosts with insecure access to kube-apiserver via port 8080. A kubeconfig file is not necessary in this case, -because kubectl will use http://localhost:8080 to connect. The kubeconfig files +because kubectl will use to connect. The kubeconfig files generated will point to localhost (on kube-masters) and kube-node hosts will connect either to a localhost nginx proxy or to a loadbalancer if configured. More details on this process are in the [HA guide](ha-mode.md). -Kubespray permits connecting to the cluster remotely on any IP of any -kube-master host on port 6443 by default. However, this requires -authentication. One could generate a kubeconfig based on one installed +Kubespray permits connecting to the cluster remotely on any IP of any +kube-master host on port 6443 by default. However, this requires +authentication. One could generate a kubeconfig based on one installed kube-master hosts (needs improvement) or connect with a username and password. By default, a user with admin rights is created, named `kube`. -The password can be viewed after deployment by looking at the file +The password can be viewed after deployment by looking at the file `PATH_TO_KUBESPRAY/credentials/kube_user`. This contains a randomly generated password. If you wish to set your own password, just precreate/modify this -file yourself. +file yourself. For more information on kubeconfig and accessing a Kubernetes cluster, refer to the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/). @@ -77,29 +73,33 @@ Accessing Kubernetes Dashboard ------------------------------ As of kubernetes-dashboard v1.7.x: -* New login options that use apiserver auth proxying of token/basic/kubeconfig by default -* Requires RBAC in authorization_modes -* Only serves over https -* No longer available at https://first_master:6443/ui until apiserver is updated with the https proxy URL + +- New login options that use apiserver auth proxying of token/basic/kubeconfig by default +- Requires RBAC in authorization\_modes +- Only serves over https +- No longer available at until apiserver is updated with the https proxy URL If the variable `dashboard_enabled` is set (default is true), then you can access the Kubernetes Dashboard at the following URL, You will be prompted for credentials: -https://first_master:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login + Or you can run 'kubectl proxy' from your local machine to access dashboard in your browser from: -http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login + -It is recommended to access dashboard from behind a gateway (like Ingress Controller) that enforces an authentication token. Details and other access options here: https://github.com/kubernetes/dashboard/wiki/Accessing-Dashboard---1.7.X-and-above +It is recommended to access dashboard from behind a gateway (like Ingress Controller) that enforces an authentication token. Details and other access options here: Accessing Kubernetes API ------------------------ The main client of Kubernetes is `kubectl`. It is installed on each kube-master host and can optionally be configured on your ansible host by setting -`kubeconfig_localhost: true` in the configuration. If enabled, kubectl and -admin.conf will appear in the artifacts/ directory after deployment. You can -see a list of nodes by running the following commands: +`kubectl_localhost: true` and `kubeconfig_localhost: true` in the configuration: - cd artifacts/ - ./kubectl --kubeconfig admin.conf get nodes +- If `kubectl_localhost` enabled, `kubectl` will download onto `/usr/local/bin/` and setup with bash completion. A helper script `inventory/mycluster/artifacts/kubectl.sh` also created for setup with below `admin.conf`. +- If `kubeconfig_localhost` enabled `admin.conf` will appear in the `inventory/mycluster/artifacts/` directory after deployment. -If desired, copy kubectl to your bin dir and admin.conf to ~/.kube/config. +You can see a list of nodes by running the following commands: + + cd inventory/mycluster/artifacts + ./kubectl.sh get nodes + +If desired, copy admin.conf to ~/.kube/config. diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml index a31963f16..d642646fe 100644 --- a/inventory/sample/group_vars/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster.yml @@ -203,9 +203,9 @@ ingress_nginx_enabled: false # Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now ) persistent_volumes_enabled: false -# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts +# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts # kubeconfig_localhost: false -# Download kubectl onto the host that runs Ansible in GITDIR/artifacts +# Download kubectl onto the host that runs Ansible in {{ bin_dir }} # kubectl_localhost: false # dnsmasq diff --git a/roles/kubernetes/client/defaults/main.yml b/roles/kubernetes/client/defaults/main.yml index 5864e991f..32870df01 100644 --- a/roles/kubernetes/client/defaults/main.yml +++ b/roles/kubernetes/client/defaults/main.yml @@ -1,7 +1,7 @@ --- kubeconfig_localhost: false kubectl_localhost: false -artifacts_dir: "./artifacts" +artifacts_dir: "{{ inventory_dir }}/artifacts" kube_config_dir: "/etc/kubernetes" kube_apiserver_port: "6443" diff --git a/roles/kubernetes/client/tasks/main.yml b/roles/kubernetes/client/tasks/main.yml index 3b66c5e1c..cf70b4995 100644 --- a/roles/kubernetes/client/tasks/main.yml +++ b/roles/kubernetes/client/tasks/main.yml @@ -55,9 +55,23 @@ - name: Copy kubectl binary to ansible host fetch: src: "{{ bin_dir }}/kubectl" - dest: "{{ artifacts_dir }}/kubectl" + dest: "{{ bin_dir }}/kubectl" flat: yes validate_checksum: no become: no run_once: yes when: kubectl_localhost|default(false) + +- name: create helper script kubectl.sh on ansible host + copy: + content: | + #!/bin/bash + kubectl --kubeconfig=admin.conf $@ + dest: "{{ artifacts_dir }}/kubectl.sh" + owner: root + group: root + mode: 0755 + become: no + run_once: yes + delegate_to: localhost + when: kubectl_localhost|default(false) and kubeconfig_localhost|default(false) diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index 61f11e97f..21fb044ec 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -152,9 +152,9 @@ helm_deployment_type: host # Enable kubeadm deployment (experimental) kubeadm_enabled: false -# Make a copy of kubeconfig on the host that runs Ansible in GITDIR/artifacts +# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts kubeconfig_localhost: false -# Download kubectl onto the host that runs Ansible in GITDIR/artifacts +# Download kubectl onto the host that runs Ansible in {{ bin_dir }} kubectl_localhost: false # K8s image pull policy (imagePullPolicy) From 12c78e622bc66ae2ef2aa0a890d8e1274a7eb4c0 Mon Sep 17 00:00:00 2001 From: "rong.zhang" Date: Mon, 5 Mar 2018 15:02:47 +0800 Subject: [PATCH 058/177] Remove nodes Drain node except daemonsets resource Use reset cluser for delete deploy data Then delete node --- remove-node.yml | 29 +++++++++++++++++++ roles/remove-node/post-remove/tasks/main.yml | 8 +++++ .../remove-node/pre-remove/defaults/main.yml | 3 ++ roles/remove-node/pre-remove/tasks/main.yml | 15 ++++++++++ 4 files changed, 55 insertions(+) create mode 100644 remove-node.yml create mode 100644 roles/remove-node/post-remove/tasks/main.yml create mode 100644 roles/remove-node/pre-remove/defaults/main.yml create mode 100644 roles/remove-node/pre-remove/tasks/main.yml diff --git a/remove-node.yml b/remove-node.yml new file mode 100644 index 000000000..fbc5bc8ba --- /dev/null +++ b/remove-node.yml @@ -0,0 +1,29 @@ +--- + +- hosts: all + gather_facts: true + +- hosts: etcd:k8s-cluster:vault:calico-rr + vars_prompt: + name: "delete_nodes_confirmation" + prompt: "Are you sure you want to delete nodes state? Type 'yes' to delete nodes." + default: "no" + private: no + + pre_tasks: + - name: check confirmation + fail: + msg: "Delete nodes confirmation failed" + when: delete_nodes_confirmation != "yes" + +- hosts: kube-master + roles: + - { role: remove-node/pre-remove, tags: pre-remove } + +- hosts: kube-node + roles: + - { role: reset, tags: reset } + +- hosts: kube-master + roles: + - { role: remove-node/post-remove, tags: post-remove } diff --git a/roles/remove-node/post-remove/tasks/main.yml b/roles/remove-node/post-remove/tasks/main.yml new file mode 100644 index 000000000..395f9986b --- /dev/null +++ b/roles/remove-node/post-remove/tasks/main.yml @@ -0,0 +1,8 @@ +--- + +- name: Delete node + command: kubectl delete node {{ item }} + with_items: + - "{{ groups['kube-node'] }}" + delegate_to: "{{ groups['kube-master'][0] }}" + ignore_errors: yes diff --git a/roles/remove-node/pre-remove/defaults/main.yml b/roles/remove-node/pre-remove/defaults/main.yml new file mode 100644 index 000000000..e9e1ba28d --- /dev/null +++ b/roles/remove-node/pre-remove/defaults/main.yml @@ -0,0 +1,3 @@ +--- +drain_grace_period: 300 +drain_timeout: 360s diff --git a/roles/remove-node/pre-remove/tasks/main.yml b/roles/remove-node/pre-remove/tasks/main.yml new file mode 100644 index 000000000..12091917a --- /dev/null +++ b/roles/remove-node/pre-remove/tasks/main.yml @@ -0,0 +1,15 @@ +--- + +- name: remove-node | Drain node except daemonsets resource + command: >- + {{ bin_dir }}/kubectl drain + --force + --ignore-daemonsets + --grace-period {{ drain_grace_period }} + --timeout {{ drain_timeout }} + --delete-local-data {{ item }} + with_items: + - "{{ groups['kube-node'] }}" + failed_when: false + delegate_to: "{{ groups['kube-master'][0] }}" + ignore_errors: yes From 96a92503cb27545547bcaf7b98d243f93e7d00fc Mon Sep 17 00:00:00 2001 From: zhengchuan hu Date: Thu, 8 Mar 2018 17:04:16 +0800 Subject: [PATCH 059/177] Fix always download calico_rr image --- roles/download/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index f18dad0e8..d87f4b923 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -220,7 +220,7 @@ downloads: tag: "{{ calico_policy_image_tag }}" sha256: "{{ calico_policy_digest_checksum|default(None) }}" calico_rr: - enabled: "{{ peer_with_calico_rr is defined and peer_with_calico_rr}} and kube_network_plugin == 'calico'" + enabled: "{{ peer_with_calico_rr is defined and peer_with_calico_rr and kube_network_plugin == 'calico' }}" container: true repo: "{{ calico_rr_image_repo }}" tag: "{{ calico_rr_image_tag }}" From 8e36ad09b478c2f4fe302ba70ec926c5f5268b24 Mon Sep 17 00:00:00 2001 From: zhengchuan hu Date: Thu, 8 Mar 2018 23:16:02 +0800 Subject: [PATCH 060/177] clean http-proxy.conf --- roles/reset/tasks/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 9b87a1a13..9ae683df3 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -34,6 +34,7 @@ with_items: - docker-dns.conf - docker-options.conf + - http-proxy.conf register: docker_dropins_removed tags: - docker From b0ab92c9216b54a71a42ee772a0812d2fba52889 Mon Sep 17 00:00:00 2001 From: chadswen Date: Thu, 8 Mar 2018 23:56:46 -0600 Subject: [PATCH 061/177] Prefix system:node CRB Change the name of `system:node` CRB to `kubespray:system:node` to avoid conflicts with the auto-reconciled CRB also named `system:node` Fixes #2121 --- roles/kubernetes-apps/cluster_roles/tasks/main.yml | 2 +- roles/kubernetes-apps/cluster_roles/templates/node-crb.yml.j2 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml index b58670c0f..3f696a9fe 100644 --- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml +++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml @@ -20,7 +20,7 @@ - name: Apply workaround to allow all nodes with cert O=system:nodes to register kube: - name: "system:node" + name: "kubespray:system:node" kubectl: "{{bin_dir}}/kubectl" resource: "clusterrolebinding" filename: "{{ kube_config_dir }}/node-crb.yml" diff --git a/roles/kubernetes-apps/cluster_roles/templates/node-crb.yml.j2 b/roles/kubernetes-apps/cluster_roles/templates/node-crb.yml.j2 index 98e82dff7..9a4a3c46e 100644 --- a/roles/kubernetes-apps/cluster_roles/templates/node-crb.yml.j2 +++ b/roles/kubernetes-apps/cluster_roles/templates/node-crb.yml.j2 @@ -6,7 +6,7 @@ metadata: rbac.authorization.kubernetes.io/autoupdate: "true" labels: kubernetes.io/bootstrapping: rbac-defaults - name: system:node + name: kubespray:system:node roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole From cd153a1fb38d8ab66c9651438685f37e0642959d Mon Sep 17 00:00:00 2001 From: chadswen Date: Fri, 9 Mar 2018 00:11:10 -0600 Subject: [PATCH 062/177] Fix kubernetes cert permission sync Add `state: directory` to `file` task so that `recurse: yes` will actually take effect and ensure certs/keys have the right file mode and owner --- roles/kubernetes/secrets/tasks/gen_certs_script.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/kubernetes/secrets/tasks/gen_certs_script.yml b/roles/kubernetes/secrets/tasks/gen_certs_script.yml index c1dfeb394..011575358 100644 --- a/roles/kubernetes/secrets/tasks/gen_certs_script.yml +++ b/roles/kubernetes/secrets/tasks/gen_certs_script.yml @@ -179,6 +179,7 @@ file: path: "{{ kube_cert_dir }}" group: "{{ kube_cert_group }}" + state: directory owner: kube mode: "u=rwX,g-rwx,o-rwx" recurse: yes From 196995a1a75f768d52558459bd4aff76ed559454 Mon Sep 17 00:00:00 2001 From: "rong.zhang" Date: Mon, 12 Mar 2018 11:05:12 +0800 Subject: [PATCH 063/177] Fix issues#2451 Support docker-ce and docker-engine Support docker-ce and docker-engine include redhat/centos ubuntu debian --- roles/docker/defaults/main.yml | 11 +++++++++ roles/docker/tasks/main.yml | 29 ++++++++++++++++++++---- roles/docker/templates/rh_docker.repo.j2 | 12 ++++++++-- roles/docker/vars/debian.yml | 15 ++++++++++++ roles/docker/vars/redhat.yml | 6 ++++- roles/docker/vars/ubuntu.yml | 18 ++++++++++++++- 6 files changed, 83 insertions(+), 8 deletions(-) diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index 2bbd62835..df7b97ab4 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -11,6 +11,12 @@ docker_repo_key_info: docker_repo_info: repos: +dockerproject_repo_key_info: + repo_keys: + +dockerproject_repo_info: + repos: + docker_dns_servers_strict: yes docker_container_storage_setup: false @@ -24,3 +30,8 @@ docker_ubuntu_repo_gpgkey: 'https://download.docker.com/linux/ubuntu/gpg' #Debian docker-ce repo docker_debian_repo_base_url: "https://download.docker.com/linux/debian" docker_debian_repo_gpgkey: 'https://download.docker.com/linux/debian/gpg' +#dockerproject repo +dockerproject_rh_repo_base_url: 'https://yum.dockerproject.org/repo/main/centos/7' +dockerproject_rh_repo_gpgkey: 'https://yum.dockerproject.org/gpg' +dockerproject_apt_repo_base_url: 'https://apt.dockerproject.org/repo' +dockerproject_apt_repo_gpgkey: 'https://apt.dockerproject.org/gpg' diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 1c95f819f..80b917114 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -30,7 +30,7 @@ tags: - facts -- name: ensure docker repository public key is installed +- name: ensure docker-ce repository public key is installed action: "{{ docker_repo_key_info.pkg_key }}" args: id: "{{item}}" @@ -41,15 +41,36 @@ retries: 4 delay: "{{ retry_stagger | random + 3 }}" with_items: "{{ docker_repo_key_info.repo_keys }}" - when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic) + when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) -- name: ensure docker repository is enabled +- name: ensure docker-ce repository is enabled action: "{{ docker_repo_info.pkg_repo }}" args: repo: "{{item}}" state: present with_items: "{{ docker_repo_info.repos }}" - when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic) and (docker_repo_info.repos|length > 0) + when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) and (docker_repo_info.repos|length > 0) + +- name: ensure docker-engine repository public key is installed + action: "{{ dockerproject_repo_key_info.pkg_key }}" + args: + id: "{{item}}" + url: "{{dockerproject_repo_key_info.url}}" + state: present + register: keyserver_task_result + until: keyserver_task_result|succeeded + retries: 4 + delay: "{{ retry_stagger | random + 3 }}" + with_items: "{{ dockerproject_repo_key_info.repo_keys }}" + when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) + +- name: ensure docker-engine repository is enabled + action: "{{ dockerproject_repo_info.pkg_repo }}" + args: + repo: "{{item}}" + state: present + with_items: "{{ dockerproject_repo_info.repos }}" + when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat"] or is_atomic) and (dockerproject_repo_info.repos|length > 0) - name: Configure docker repository on RedHat/CentOS template: diff --git a/roles/docker/templates/rh_docker.repo.j2 b/roles/docker/templates/rh_docker.repo.j2 index 7cb728625..fe2aeac1c 100644 --- a/roles/docker/templates/rh_docker.repo.j2 +++ b/roles/docker/templates/rh_docker.repo.j2 @@ -1,7 +1,15 @@ -[dockerrepo] -name=Docker Repository +[docker-ce] +name=Docker-CE Repository baseurl={{ docker_rh_repo_base_url }} enabled=1 gpgcheck=1 gpgkey={{ docker_rh_repo_gpgkey }} {% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %} + +[docker-engine] +name=Docker-Engine Repository +baseurl={{ dockerproject_rh_repo_base_url }} +enabled=1 +gpgcheck=1 +gpgkey={{ dockerproject_rh_repo_gpgkey }} +{% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %} diff --git a/roles/docker/vars/debian.yml b/roles/docker/vars/debian.yml index 0a43c7c79..a17cd7575 100644 --- a/roles/docker/vars/debian.yml +++ b/roles/docker/vars/debian.yml @@ -2,6 +2,7 @@ docker_kernel_min_version: '3.10' # https://download.docker.com/linux/debian/ +# https://apt.dockerproject.org/repo/dists/debian-wheezy/main/filelist docker_versioned_pkg: 'latest': docker-ce '1.11': docker-engine=1.11.2-0~{{ ansible_distribution_release|lower }} @@ -30,3 +31,17 @@ docker_repo_info: deb {{ docker_debian_repo_base_url }} {{ ansible_distribution_release|lower }} stable + +dockerproject_repo_key_info: + pkg_key: apt_key + url: '{{ dockerproject_apt_repo_gpgkey }}' + repo_keys: + - 58118E89F3A912897C070ADBF76221572C52609D + +dockerproject_repo_info: + pkg_repo: apt_repository + repos: + - > + deb {{ dockerproject_apt_repo_base_url }} + {{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }} + main diff --git a/roles/docker/vars/redhat.yml b/roles/docker/vars/redhat.yml index 96950719e..39ba211d8 100644 --- a/roles/docker/vars/redhat.yml +++ b/roles/docker/vars/redhat.yml @@ -3,6 +3,7 @@ docker_kernel_min_version: '0' # https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package # https://download.docker.com/linux/centos/7/x86_64/stable/Packages/ +# https://yum.dockerproject.org/repo/main/centos/7 # or do 'yum --showduplicates list docker-engine' docker_versioned_pkg: 'latest': docker-ce @@ -11,10 +12,13 @@ docker_versioned_pkg: '1.13': docker-engine-1.13.1-1.el7.centos '17.03': docker-ce-17.03.2.ce-1.el7.centos 'stable': docker-ce-17.03.2.ce-1.el7.centos - 'edge': docker-ce-17.03.2.ce-1.el7.centos + 'edge': docker-ce-17.12.1.ce-1.el7.centos docker_selinux_versioned_pkg: 'latest': docker-ce-selinux + '1.11': docker-engine-selinux-1.11.2-1.el7.centos + '1.12': docker-engine-selinux-1.12.6-1.el7.centos + '1.13': docker-engine-selinux-1.13.1-1.el7.centos '17.03': docker-ce-selinux-17.03.2.ce-1.el7.centos 'stable': docker-ce-selinux-17.03.2.ce-1.el7.centos 'edge': docker-ce-selinux-17.03.2.ce-1.el7.centos diff --git a/roles/docker/vars/ubuntu.yml b/roles/docker/vars/ubuntu.yml index 897c23ce0..f4d6b1e0f 100644 --- a/roles/docker/vars/ubuntu.yml +++ b/roles/docker/vars/ubuntu.yml @@ -4,10 +4,12 @@ docker_kernel_min_version: '3.10' # https://download.docker.com/linux/ubuntu/ docker_versioned_pkg: 'latest': docker-ce - '1.11': docker-engine=1.11.1-0~{{ ansible_distribution_release|lower }} + '1.11': docker-engine=1.11.2-0~{{ ansible_distribution_release|lower }} '1.12': docker-engine=1.12.6-0~ubuntu-{{ ansible_distribution_release|lower }} '1.13': docker-engine=1.13.1-0~ubuntu-{{ ansible_distribution_release|lower }} '17.03': docker-ce=17.03.2~ce-0~ubuntu-{{ ansible_distribution_release|lower }} + 'stable': docker-ce=17.03.2~ce-0~ubuntu-{{ ansible_distribution_release|lower }} + 'edge': docker-ce=17.12.1~ce-0~ubuntu-{{ ansible_distribution_release|lower }} docker_package_info: pkg_mgr: apt @@ -28,3 +30,17 @@ docker_repo_info: deb {{ docker_ubuntu_repo_base_url }} {{ ansible_distribution_release|lower }} stable + +dockerproject_repo_key_info: + pkg_key: apt_key + url: '{{ dockerproject_apt_repo_gpgkey }}' + repo_keys: + - 58118E89F3A912897C070ADBF76221572C52609D + +dockerproject_repo_info: + pkg_repo: apt_repository + repos: + - > + deb {{ dockerproject_apt_repo_base_url }} + {{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }} + main From ecec94ee7e25ff0e05a00b734664b5f85acf2f98 Mon Sep 17 00:00:00 2001 From: RongZhang Date: Wed, 7 Mar 2018 04:36:39 -0600 Subject: [PATCH 064/177] Fix Docker exits prematurely details:https://github.com/moby/moby/pull/31490/files --- roles/docker/templates/docker.service.j2 | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/roles/docker/templates/docker.service.j2 b/roles/docker/templates/docker.service.j2 index 29abb6d53..d8efe2025 100644 --- a/roles/docker/templates/docker.service.j2 +++ b/roles/docker/templates/docker.service.j2 @@ -31,7 +31,10 @@ LimitNOFILE=1048576 LimitNPROC=1048576 LimitCORE=infinity TimeoutStartSec=1min -Restart=on-abnormal +# restart the docker process if it exits prematurely +Restart=on-failure +StartLimitBurst=3 +StartLimitInterval=60s [Install] WantedBy=multi-user.target From 50e3ccfa2baed51522740ed3aa97e7f86f38bdae Mon Sep 17 00:00:00 2001 From: Dann Bohn Date: Mon, 12 Mar 2018 12:46:14 -0400 Subject: [PATCH 065/177] uses new kube_memory_reserved/kube_cpu_reserved variables in kubelt --- roles/kubernetes/node/templates/kubelet-container.j2 | 4 ++-- roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/kubernetes/node/templates/kubelet-container.j2 b/roles/kubernetes/node/templates/kubelet-container.j2 index 4e8d4c371..22671b2c3 100644 --- a/roles/kubernetes/node/templates/kubelet-container.j2 +++ b/roles/kubernetes/node/templates/kubelet-container.j2 @@ -5,8 +5,8 @@ --privileged \ --name=kubelet \ --restart=on-failure:5 \ - --memory={{ kubelet_memory_limit|regex_replace('Mi', 'M') }} \ - --cpu-shares={{ kubelet_cpu_limit|regex_replace('m', '') }} \ + --memory={{ kube_memory_reserved|regex_replace('Mi', 'M') }} \ + --cpu-shares={{ kube_cpu_reserved|regex_replace('m', '') }} \ -v /dev:/dev:rw \ -v /etc/cni:/etc/cni:ro \ -v /opt/cni:/opt/cni:ro \ diff --git a/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 index c8cf40e7b..f67c72bf8 100644 --- a/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 +++ b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 @@ -29,7 +29,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" --cadvisor-port={{ kube_cadvisor_port }} \ {# end kubeadm specific settings #} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }} \ ---kube-reserved cpu={{ kubelet_cpu_limit }},memory={{ kubelet_memory_limit|regex_replace('Mi', 'M') }} \ +--kube-reserved cpu={{ kube_cpu_reserved }},memory={{ kube_memory_reserved|regex_replace('Mi', 'M') }} \ --node-status-update-frequency={{ kubelet_status_update_frequency }} \ --cgroup-driver={{ kubelet_cgroup_driver|default(kubelet_cgroup_driver_detected) }} \ --docker-disable-shared-pid={{ kubelet_disable_shared_pid }} \ From 9a4aa4288cc6bcbe4bc1601540c3f466e138dcb9 Mon Sep 17 00:00:00 2001 From: MQasimSarfraz Date: Mon, 12 Mar 2018 18:07:08 +0000 Subject: [PATCH 066/177] Fix vsphere cloud_provider RBAC permissions --- .../cluster_roles/tasks/main.yml | 27 ++++++++++++++ .../templates/vsphere-rbac.yml.j2 | 35 +++++++++++++++++++ 2 files changed, 62 insertions(+) create mode 100644 roles/kubernetes-apps/cluster_roles/templates/vsphere-rbac.yml.j2 diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml index 3f696a9fe..f9c5fc9b2 100644 --- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml +++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml @@ -75,6 +75,33 @@ - node_webhook_crb_manifest.changed tags: node-webhook +- name: Write vsphere-cloud-provider ClusterRole manifest + template: + src: "vsphere-rbac.yml.j2" + dest: "{{ kube_config_dir }}/vsphere-rbac.yml" + register: vsphere_rbac_manifest + when: + - rbac_enabled + - cloud_provider is defined + - cloud_provider == 'vsphere' + - kube_version | version_compare('v1.9.0', '>=') + tags: vsphere + +- name: Apply vsphere-cloud-provider ClusterRole + kube: + name: "system:vsphere-cloud-provider" + kubectl: "{{bin_dir}}/kubectl" + resource: "clusterrolebinding" + filename: "{{ kube_config_dir }}/vsphere-rbac.yml" + state: latest + when: + - rbac_enabled + - cloud_provider is defined + - cloud_provider == 'vsphere' + - vsphere_rbac_manifest.changed + - kube_version | version_compare('v1.9.0', '>=') + tags: vsphere + # This is not a cluster role, but should be run after kubeconfig is set on master - name: Write kube system namespace manifest template: diff --git a/roles/kubernetes-apps/cluster_roles/templates/vsphere-rbac.yml.j2 b/roles/kubernetes-apps/cluster_roles/templates/vsphere-rbac.yml.j2 new file mode 100644 index 000000000..99da0462f --- /dev/null +++ b/roles/kubernetes-apps/cluster_roles/templates/vsphere-rbac.yml.j2 @@ -0,0 +1,35 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:vsphere-cloud-provider +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:vsphere-cloud-provider +roleRef: + kind: ClusterRole + name: system:vsphere-cloud-provider + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: vsphere-cloud-provider + namespace: kube-system From 6abe78ff461fc5006458f2478fb3cb8fb4a6dae9 Mon Sep 17 00:00:00 2001 From: Cyril Jouve Date: Mon, 12 Mar 2018 19:59:22 +0100 Subject: [PATCH 067/177] use archive instead of command --- scripts/collect-info.yaml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/scripts/collect-info.yaml b/scripts/collect-info.yaml index 1a0e2307b..14daf9d19 100644 --- a/scripts/collect-info.yaml +++ b/scripts/collect-info.yaml @@ -114,7 +114,12 @@ with_items: "{{logs}}" - name: Pack results and logs - local_action: raw GZIP=-9 tar --remove-files -cvzf {{dir|default(".")}}/logs.tar.gz -C /tmp collect-info + archive: + path: "/tmp/collect-info" + dest: "{{ dir|default('.') }}/logs.tar.gz" + remove: true + delegate_to: localhost + become: false run_once: true - name: Clean up collected command outputs From d264da8f080c4917e0f6646d81e96e5bbaf71338 Mon Sep 17 00:00:00 2001 From: "rong.zhang" Date: Tue, 13 Mar 2018 14:28:49 +0800 Subject: [PATCH 068/177] Fix yamllint roles error for #2188 commit --- roles/network_plugin/weave/tasks/main.yml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/roles/network_plugin/weave/tasks/main.yml b/roles/network_plugin/weave/tasks/main.yml index dc0a032af..a8dfa0586 100644 --- a/roles/network_plugin/weave/tasks/main.yml +++ b/roles/network_plugin/weave/tasks/main.yml @@ -2,12 +2,11 @@ - import_tasks: seed.yml when: weave_mode_seed - - name: template weavenet conflist template: - src: weavenet.conflist.j2 - dest: /etc/cni/net.d/00-weave.conflist - owner: kube + src: weavenet.conflist.j2 + dest: /etc/cni/net.d/00-weave.conflist + owner: kube - name: Weave | Copy cni plugins from hyperkube command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -r /opt/cni/bin/. /cnibindir/" From 2e0b33f75420bd7f82468ace18ed531cfe49ce8a Mon Sep 17 00:00:00 2001 From: "rong.zhang" Date: Tue, 13 Mar 2018 14:05:03 +0800 Subject: [PATCH 069/177] Add remove node to getting-started doc --- docs/getting-started.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/getting-started.md b/docs/getting-started.md index 961d1a9cf..26141050a 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -51,6 +51,18 @@ ansible-playbook -i inventory/mycluster/hosts.ini scale.yml -b -v \ --private-key=~/.ssh/private_key ``` +Remove nodes +------------ + +You may want to remove **worker** nodes to your existing cluster. This can be done by re-running the `remove-node.yml` playbook. First, all nodes will be drained, then stop some kubernetes services and delete some certificates, and finally execute the kubectl command to delete these nodes. This can be combined with the add node function, This is generally helpful when doing something like autoscaling your clusters. Of course if a node is not working, you can remove the node and install it again. + +- Add worker nodes to the list under kube-node if you want to delete them (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)). +- Run the ansible-playbook command, substituting `remove-node.yml`: +``` +ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \ + --private-key=~/.ssh/private_key +``` + Connecting to Kubernetes ------------------------ By default, Kubespray configures kube-master hosts with insecure access to From 39d247a2384339ed8969b9bd78634080583fded2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kr=C3=BCger?= Date: Tue, 13 Mar 2018 10:31:15 +0100 Subject: [PATCH 070/177] Add support to kubeadm too Explicitly defines the --kubelet-preferred-address-types parameter #2418 Fixes #2453 --- roles/kubernetes/master/templates/kubeadm-config.yaml.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 index ed1cc7add..e4657a601 100644 --- a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 +++ b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 @@ -37,6 +37,7 @@ apiServerExtraArgs: admission-control: {{ kube_apiserver_admission_control | join(',') }} apiserver-count: "{{ kube_apiserver_count }}" service-node-port-range: {{ kube_apiserver_node_port_range }} + kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}" {% if kube_basic_auth|default(true) %} basic-auth-file: {{ kube_users_dir }}/known_users.csv {% endif %} From f3788525ffcae3229bbd22881271f44fb7cba1aa Mon Sep 17 00:00:00 2001 From: Dann Bohn Date: Tue, 13 Mar 2018 06:15:48 -0400 Subject: [PATCH 071/177] fixes yamllint for docker defaults, and weave network plugin --- roles/docker/defaults/main.yml | 8 ++++---- roles/network_plugin/weave/tasks/main.yml | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index df7b97ab4..aa10371f5 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -21,16 +21,16 @@ docker_dns_servers_strict: yes docker_container_storage_setup: false -#CentOS/RedHat docker-ce repo +# CentOS/RedHat docker-ce repo docker_rh_repo_base_url: 'https://download.docker.com/linux/centos/7/$basearch/stable' docker_rh_repo_gpgkey: 'https://download.docker.com/linux/centos/gpg' -#Ubuntu docker-ce repo +# Ubuntu docker-ce repo docker_ubuntu_repo_base_url: "https://download.docker.com/linux/ubuntu" docker_ubuntu_repo_gpgkey: 'https://download.docker.com/linux/ubuntu/gpg' -#Debian docker-ce repo +# Debian docker-ce repo docker_debian_repo_base_url: "https://download.docker.com/linux/debian" docker_debian_repo_gpgkey: 'https://download.docker.com/linux/debian/gpg' -#dockerproject repo +# dockerproject repo dockerproject_rh_repo_base_url: 'https://yum.dockerproject.org/repo/main/centos/7' dockerproject_rh_repo_gpgkey: 'https://yum.dockerproject.org/gpg' dockerproject_apt_repo_base_url: 'https://apt.dockerproject.org/repo' diff --git a/roles/network_plugin/weave/tasks/main.yml b/roles/network_plugin/weave/tasks/main.yml index dc0a032af..43cb81a1c 100644 --- a/roles/network_plugin/weave/tasks/main.yml +++ b/roles/network_plugin/weave/tasks/main.yml @@ -5,9 +5,9 @@ - name: template weavenet conflist template: - src: weavenet.conflist.j2 - dest: /etc/cni/net.d/00-weave.conflist - owner: kube + src: weavenet.conflist.j2 + dest: /etc/cni/net.d/00-weave.conflist + owner: kube - name: Weave | Copy cni plugins from hyperkube command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -r /opt/cni/bin/. /cnibindir/" From d1e6632e6ae02c6e9c2246cc3aff892c1c8d2b81 Mon Sep 17 00:00:00 2001 From: zhengchuan hu Date: Wed, 14 Mar 2018 17:18:55 +0800 Subject: [PATCH 072/177] Fix err in kubelet.kubeadm.env.j2 1. 404 link url 2. kubelet_authentication_token_webhook is not work 3. kube_reserved variable set twice --- roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 index f67c72bf8..5be20d533 100644 --- a/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 +++ b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 @@ -1,4 +1,4 @@ -### Upstream source https://github.com/kubernetes/release/blob/master/debian/xenial/kubeadm/channel/stable/etc/systemd/system/kubelet.service.d/10-kubeadm.conf +### Upstream source https://github.com/kubernetes/release/blob/master/debian/xenial/kubeadm/channel/stable/etc/systemd/system/kubelet.service.d/ ### All upstream values should be present in this file # logging to stderr means we get it in the systemd journal @@ -23,13 +23,14 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" {% if kubelet_authentication_token_webhook %} --authentication-token-webhook \ {% endif %} +{% if kubelet_authorization_mode_webhook %} --authorization-mode=Webhook \ +{% endif %} --client-ca-file={{ kube_cert_dir }}/ca.crt \ --pod-manifest-path={{ kube_manifest_dir }} \ --cadvisor-port={{ kube_cadvisor_port }} \ {# end kubeadm specific settings #} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }} \ ---kube-reserved cpu={{ kube_cpu_reserved }},memory={{ kube_memory_reserved|regex_replace('Mi', 'M') }} \ --node-status-update-frequency={{ kubelet_status_update_frequency }} \ --cgroup-driver={{ kubelet_cgroup_driver|default(kubelet_cgroup_driver_detected) }} \ --docker-disable-shared-pid={{ kubelet_disable_shared_pid }} \ From f8fed0f3081954d469808f362ca961adfec9beab Mon Sep 17 00:00:00 2001 From: Sergey Bondarev Date: Wed, 14 Mar 2018 13:33:36 +0300 Subject: [PATCH 073/177] change expirations period for generated certificate from 10 years to 100 years --- roles/etcd/files/make-ssl-etcd.sh | 8 ++++---- roles/kubernetes/secrets/files/make-ssl.sh | 4 ++-- roles/network_plugin/contiv/files/generate-certificate.sh | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/roles/etcd/files/make-ssl-etcd.sh b/roles/etcd/files/make-ssl-etcd.sh index 5544d6639..ebf0e2afa 100755 --- a/roles/etcd/files/make-ssl-etcd.sh +++ b/roles/etcd/files/make-ssl-etcd.sh @@ -65,7 +65,7 @@ if [ -e "$SSLDIR/ca-key.pem" ]; then cp $SSLDIR/{ca.pem,ca-key.pem} . else openssl genrsa -out ca-key.pem 2048 > /dev/null 2>&1 - openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=etcd-ca" > /dev/null 2>&1 + openssl req -x509 -new -nodes -key ca-key.pem -days 36500 -out ca.pem -subj "/CN=etcd-ca" > /dev/null 2>&1 fi # ETCD member @@ -75,12 +75,12 @@ if [ -n "$MASTERS" ]; then # Member key openssl genrsa -out member-${host}-key.pem 2048 > /dev/null 2>&1 openssl req -new -key member-${host}-key.pem -out member-${host}.csr -subj "/CN=etcd-member-${cn}" -config ${CONFIG} > /dev/null 2>&1 - openssl x509 -req -in member-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out member-${host}.pem -days 3650 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1 + openssl x509 -req -in member-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out member-${host}.pem -days 36500 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1 # Admin key openssl genrsa -out admin-${host}-key.pem 2048 > /dev/null 2>&1 openssl req -new -key admin-${host}-key.pem -out admin-${host}.csr -subj "/CN=etcd-admin-${cn}" > /dev/null 2>&1 - openssl x509 -req -in admin-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin-${host}.pem -days 3650 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1 + openssl x509 -req -in admin-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin-${host}.pem -days 36500 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1 done fi @@ -90,7 +90,7 @@ if [ -n "$HOSTS" ]; then cn="${host%%.*}" openssl genrsa -out node-${host}-key.pem 2048 > /dev/null 2>&1 openssl req -new -key node-${host}-key.pem -out node-${host}.csr -subj "/CN=etcd-node-${cn}" > /dev/null 2>&1 - openssl x509 -req -in node-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out node-${host}.pem -days 3650 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1 + openssl x509 -req -in node-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out node-${host}.pem -days 36500 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1 done fi diff --git a/roles/kubernetes/secrets/files/make-ssl.sh b/roles/kubernetes/secrets/files/make-ssl.sh index 750e9c4fe..724c6f369 100755 --- a/roles/kubernetes/secrets/files/make-ssl.sh +++ b/roles/kubernetes/secrets/files/make-ssl.sh @@ -69,7 +69,7 @@ if [ -e "$SSLDIR/ca-key.pem" ]; then cp $SSLDIR/{ca.pem,ca-key.pem} . else openssl genrsa -out ca-key.pem 2048 > /dev/null 2>&1 - openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1 + openssl req -x509 -new -nodes -key ca-key.pem -days 36500 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1 fi gen_key_and_cert() { @@ -77,7 +77,7 @@ gen_key_and_cert() { local subject=$2 openssl genrsa -out ${name}-key.pem 2048 > /dev/null 2>&1 openssl req -new -key ${name}-key.pem -out ${name}.csr -subj "${subject}" -config ${CONFIG} > /dev/null 2>&1 - openssl x509 -req -in ${name}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${name}.pem -days 3650 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1 + openssl x509 -req -in ${name}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${name}.pem -days 36500 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1 } # Admins diff --git a/roles/network_plugin/contiv/files/generate-certificate.sh b/roles/network_plugin/contiv/files/generate-certificate.sh index e794dbb69..0235b2664 100644 --- a/roles/network_plugin/contiv/files/generate-certificate.sh +++ b/roles/network_plugin/contiv/files/generate-certificate.sh @@ -17,7 +17,7 @@ rm -f $KEY_PATH rm -f $CERT_PATH openssl genrsa -out $KEY_PATH 2048 >/dev/null 2>&1 -openssl req -new -x509 -sha256 -days 3650 \ +openssl req -new -x509 -sha256 -days 36500 \ -key $KEY_PATH \ -out $CERT_PATH \ -subj "/C=US/ST=CA/L=San Jose/O=CPSG/OU=IT Department/CN=auth-local.cisco.com" From 1bcc641daead5b79d9a6c2335712f3cffb241829 Mon Sep 17 00:00:00 2001 From: MQasimSarfraz Date: Wed, 14 Mar 2018 11:23:22 +0000 Subject: [PATCH 074/177] Create vsphere clusterrole only if it doesnt exists --- .../cluster_roles/tasks/main.yml | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml index f9c5fc9b2..5dbf49092 100644 --- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml +++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml @@ -75,6 +75,18 @@ - node_webhook_crb_manifest.changed tags: node-webhook +- name: Check if vsphere-cloud-provider ClusterRole exists + command: "{{ bin_dir }}/kubectl get clusterroles system:vsphere-cloud-provider" + register: vsphere_cloud_provider + ignore_errors: true + when: + - rbac_enabled + - cloud_provider is defined + - cloud_provider == 'vsphere' + - kube_version | version_compare('v1.9.0', '>=') + - kube_version | version_compare('v1.9.3', '<=') + tags: vsphere + - name: Write vsphere-cloud-provider ClusterRole manifest template: src: "vsphere-rbac.yml.j2" @@ -84,7 +96,9 @@ - rbac_enabled - cloud_provider is defined - cloud_provider == 'vsphere' + - vsphere_cloud_provider.rc != 0 - kube_version | version_compare('v1.9.0', '>=') + - kube_version | version_compare('v1.9.3', '<=') tags: vsphere - name: Apply vsphere-cloud-provider ClusterRole @@ -98,8 +112,9 @@ - rbac_enabled - cloud_provider is defined - cloud_provider == 'vsphere' - - vsphere_rbac_manifest.changed + - vsphere_cloud_provider.rc != 0 - kube_version | version_compare('v1.9.0', '>=') + - kube_version | version_compare('v1.9.3', '<=') tags: vsphere # This is not a cluster role, but should be run after kubeconfig is set on master From 788e41a315b36c86d60e68fa71f3493bb286c895 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kr=C3=BCger?= Date: Wed, 14 Mar 2018 19:23:43 +0100 Subject: [PATCH 075/177] Make sure output from extra args is strings Setting the following: ``` kube_kubeadm_controller_extra_args: address: 0.0.0.0 terminated-pod-gc-threshold: "100" ``` Results in `terminated-pod-gc-threshold: 100` in the kubeadm config file. But it has to be a string to work. --- roles/kubernetes/master/templates/kubeadm-config.yaml.j2 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 index ed1cc7add..a8ffbbb17 100644 --- a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 +++ b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 @@ -59,7 +59,7 @@ apiServerExtraArgs: {% endif %} allow-privileged: "true" {% for key in kube_kubeadm_apiserver_extra_args %} - {{ key }}: {{ kube_kubeadm_apiserver_extra_args[key] }} + {{ key }}: {{ kube_kubeadm_apiserver_extra_args[key]|string }} {% endfor %} controllerManagerExtraArgs: node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }} @@ -69,12 +69,12 @@ controllerManagerExtraArgs: feature-gates: {{ kube_feature_gates|join(',') }} {% endif %} {% for key in kube_kubeadm_controller_extra_args %} - {{ key }}: {{ kube_kubeadm_controller_extra_args[key] }} + {{ key }}: {{ kube_kubeadm_controller_extra_args[key]|string }} {% endfor %} {% if kube_kubeadm_scheduler_extra_args|length > 0 %} schedulerExtraArgs: {% for key in kube_kubeadm_scheduler_extra_args %} - {{ key }}: {{ kube_kubeadm_scheduler_extra_args[key] }} + {{ key }}: {{ kube_kubeadm_scheduler_extra_args[key]|string }} {% endfor %} {% endif %} apiServerCertSANs: From d843e3d562080681d4da7fe218211c8b829d37e6 Mon Sep 17 00:00:00 2001 From: Oleg Vyukov Date: Thu, 15 Mar 2018 22:18:18 +0300 Subject: [PATCH 076/177] Fix indent Custom ConfigMap ingress-nginx (#2447) --- .../ingress_nginx/templates/ingress-nginx-cm.yml.j2 | 2 +- .../templates/ingress-nginx-tcp-servicecs-cm.yml.j2 | 2 +- .../templates/ingress-nginx-udp-servicecs-cm.yml.j2 | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-cm.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-cm.yml.j2 index 79b9e17e7..7e47e81b1 100644 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-cm.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-cm.yml.j2 @@ -7,4 +7,4 @@ metadata: labels: k8s-app: ingress-nginx data: - {{ ingress_nginx_configmap | to_nice_yaml }} + {{ ingress_nginx_configmap | to_nice_yaml | indent(2) }} diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-tcp-servicecs-cm.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-tcp-servicecs-cm.yml.j2 index 5fb875940..0a87e91b7 100644 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-tcp-servicecs-cm.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-tcp-servicecs-cm.yml.j2 @@ -7,4 +7,4 @@ metadata: labels: k8s-app: ingress-nginx data: - {{ ingress_nginx_configmap_tcp_services | to_nice_yaml }} + {{ ingress_nginx_configmap_tcp_services | to_nice_yaml | indent(2) }} diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-udp-servicecs-cm.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-udp-servicecs-cm.yml.j2 index bcb004bc9..d943e5718 100644 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-udp-servicecs-cm.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-udp-servicecs-cm.yml.j2 @@ -7,4 +7,4 @@ metadata: labels: k8s-app: ingress-nginx data: - {{ ingress_nginx_configmap_udp_services | to_nice_yaml }} + {{ ingress_nginx_configmap_udp_services | to_nice_yaml | indent(2) }} From 3d6fd491795adb8a38493afe6c2968a46051d5ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kr=C3=BCger?= Date: Thu, 15 Mar 2018 20:20:05 +0100 Subject: [PATCH 077/177] Added option for encrypting secrets to etcd v.2 (#2428) * Added option for encrypting secrets to etcd * Fix keylength to 32 * Forgot the default * Rename secrets.yaml to secrets_encryption.yaml * Fix static path for secrets file to use ansible variable * Rename secrets.yaml.j2 to secrets_encryption.yaml.j2 * Base64 encode the token * Fixed merge error * Changed path to credentials dir * Update path to secrets file which is now readable inside the apiserver container. Set better file permissions * Add encryption option to k8s-cluster.yml --- inventory/sample/group_vars/k8s-cluster.yml | 5 ++++- roles/kubernetes/master/defaults/main.yml | 5 +++++ roles/kubernetes/master/tasks/encrypt-at-rest.yml | 10 ++++++++++ roles/kubernetes/master/tasks/main.yml | 3 +++ .../master/templates/kubeadm-config.yaml.j2 | 3 +++ .../templates/manifests/kube-apiserver.manifest.j2 | 3 +++ .../master/templates/secrets_encryption.yaml.j2 | 11 +++++++++++ 7 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 roles/kubernetes/master/tasks/encrypt-at-rest.yml create mode 100644 roles/kubernetes/master/templates/secrets_encryption.yaml.j2 diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml index 128e8cc99..8f69afc25 100644 --- a/inventory/sample/group_vars/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster.yml @@ -111,7 +111,10 @@ kube_apiserver_insecure_port: 8080 # (http) # Kube-proxy proxyMode configuration. # Can be ipvs, iptables -kube_proxy_mode: iptables +kube_proxy_mode: iptables + +## Encrypting Secret Data at Rest (experimental) +kube_encrypt_secret_data: false # DNS configuration. # Kubernetes cluster name, also will be used as DNS domain diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml index 59e528822..a1b506d4e 100644 --- a/roles/kubernetes/master/defaults/main.yml +++ b/roles/kubernetes/master/defaults/main.yml @@ -92,3 +92,8 @@ kube_kubeadm_scheduler_extra_args: {} ## Variable for influencing kube-scheduler behaviour volume_cross_zone_attachment: false + +## Encrypting Secret Data at Rest +kube_encrypt_secret_data: false +kube_encrypt_token: "{{ lookup('password', inventory_dir + '/credentials/kube_encrypt_token length=32 chars=ascii_letters,digits') }}" +kube_encryption_algorithm: "aescbc" # Must be either: aescbc, secretbox or aesgcm diff --git a/roles/kubernetes/master/tasks/encrypt-at-rest.yml b/roles/kubernetes/master/tasks/encrypt-at-rest.yml new file mode 100644 index 000000000..2e569b08b --- /dev/null +++ b/roles/kubernetes/master/tasks/encrypt-at-rest.yml @@ -0,0 +1,10 @@ +--- +- name: Write secrets for encrypting secret data at rest + template: + src: secrets_encryption.yaml.j2 + dest: "{{ kube_config_dir }}/ssl/secrets_encryption.yaml" + owner: root + group: "{{ kube_cert_group }}" + mode: 0640 + tags: + - kube-apiserver diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml index 04ad307fd..daa10fd79 100644 --- a/roles/kubernetes/master/tasks/main.yml +++ b/roles/kubernetes/master/tasks/main.yml @@ -12,6 +12,9 @@ - import_tasks: users-file.yml when: kube_basic_auth|default(true) +- import_tasks: encrypt-at-rest.yml + when: kube_encrypt_secret_data + - name: Compare host kubectl with hyperkube container command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/systembindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /usr/bin/cmp /hyperkube /systembindir/kubectl" register: kubectl_task_compare_result diff --git a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 index ed1cc7add..cd266ed3d 100644 --- a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 +++ b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 @@ -52,6 +52,9 @@ apiServerExtraArgs: {% if kube_oidc_groups_claim is defined %} oidc-groups-claim: {{ kube_oidc_groups_claim }} {% endif %} +{% endif %} +{% if kube_encrypt_secret_data %} + experimental-encryption-provider-config: {{ kube_config_dir }}/ssl/secrets_encryption.yaml {% endif %} storage-backend: {{ kube_apiserver_storage_backend }} {% if kube_api_runtime_config is defined %} diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 0dbe93cab..c1685410d 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -103,6 +103,9 @@ spec: {% if authorization_modes %} - --authorization-mode={{ authorization_modes|join(',') }} {% endif %} +{% if kube_encrypt_secret_data %} + - --experimental-encryption-provider-config={{ kube_config_dir }}/ssl/secrets_encryption.yaml +{% endif %} {% if kube_feature_gates %} - --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} diff --git a/roles/kubernetes/master/templates/secrets_encryption.yaml.j2 b/roles/kubernetes/master/templates/secrets_encryption.yaml.j2 new file mode 100644 index 000000000..84c6a4ea8 --- /dev/null +++ b/roles/kubernetes/master/templates/secrets_encryption.yaml.j2 @@ -0,0 +1,11 @@ +kind: EncryptionConfig +apiVersion: v1 +resources: + - resources: + - secrets + providers: + - {{ kube_encryption_algorithm }}: + keys: + - name: key + secret: {{ kube_encrypt_token | b64encode }} + - identity: {} From 40c0f3756bbabbbf8b9f05eaf3e86bce600a7e11 Mon Sep 17 00:00:00 2001 From: woopstar Date: Thu, 15 Mar 2018 20:27:19 +0100 Subject: [PATCH 078/177] Encapsulate item instead of casting to string --- roles/kubernetes/master/templates/kubeadm-config.yaml.j2 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 index a8ffbbb17..bbe329b5f 100644 --- a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 +++ b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 @@ -59,7 +59,7 @@ apiServerExtraArgs: {% endif %} allow-privileged: "true" {% for key in kube_kubeadm_apiserver_extra_args %} - {{ key }}: {{ kube_kubeadm_apiserver_extra_args[key]|string }} + {{ key }}: "{{ kube_kubeadm_apiserver_extra_args[key] }}" {% endfor %} controllerManagerExtraArgs: node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }} @@ -69,12 +69,12 @@ controllerManagerExtraArgs: feature-gates: {{ kube_feature_gates|join(',') }} {% endif %} {% for key in kube_kubeadm_controller_extra_args %} - {{ key }}: {{ kube_kubeadm_controller_extra_args[key]|string }} + {{ key }}: "{{ kube_kubeadm_controller_extra_args[key] }}" {% endfor %} {% if kube_kubeadm_scheduler_extra_args|length > 0 %} schedulerExtraArgs: {% for key in kube_kubeadm_scheduler_extra_args %} - {{ key }}: {{ kube_kubeadm_scheduler_extra_args[key]|string }} + {{ key }}: "{{ kube_kubeadm_scheduler_extra_args[key] }}" {% endfor %} {% endif %} apiServerCertSANs: From 1a35948ff67369d856b08d8ccb747374330ab82d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kr=C3=BCger?= Date: Thu, 15 Mar 2018 20:33:57 +0100 Subject: [PATCH 079/177] Enable encrypting the secrets Enable the CI test to check the encryption of secrets --- tests/files/gce_centos7-flannel-addons.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/files/gce_centos7-flannel-addons.yml b/tests/files/gce_centos7-flannel-addons.yml index 0e4346f67..8ac8a901b 100644 --- a/tests/files/gce_centos7-flannel-addons.yml +++ b/tests/files/gce_centos7-flannel-addons.yml @@ -15,3 +15,4 @@ etcd_deployment_type: host deploy_netchecker: true kubedns_min_replicas: 1 cloud_provider: gce +kube_encrypt_secret_data: true From b7e6dd0dd49cec06ef36d8982eb2ecf42a980b8a Mon Sep 17 00:00:00 2001 From: Sergey Bondarev Date: Fri, 16 Mar 2018 21:44:36 +0300 Subject: [PATCH 080/177] Add --iface-regex options to flannel Flannel use interface for inter-host communication setted on --iface options Defaults to the interface for the default route on the machine. flannel config set via daemonset, and flannel config on all nodes is the same. But different nodes can have different interface names for the inter-host communication network The option --iface-regex allows the flannel to find the interface on which the address is set from the inter-host communication network --- roles/network_plugin/flannel/defaults/main.yml | 8 +++++++- roles/network_plugin/flannel/templates/cni-flannel.yml.j2 | 2 +- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/roles/network_plugin/flannel/defaults/main.yml b/roles/network_plugin/flannel/defaults/main.yml index 08f4ac145..e48a9475a 100644 --- a/roles/network_plugin/flannel/defaults/main.yml +++ b/roles/network_plugin/flannel/defaults/main.yml @@ -5,9 +5,15 @@ # flannel_public_ip: "{{ access_ip|default(ip|default(ansible_default_ipv4.address)) }}" ## interface that should be used for flannel operations -## This is actually an inventory node-level item +## This is actually an inventory cluster-level item # flannel_interface: +## Select interface that should be used for flannel operations by regexp on Name or IP +## This is actually an inventory cluster-level item +## example: select interface with ip from net 10.0.0.0/23 +## single quote and escape backslashes +# flannel_interface_regexp: '10\\.0\\.[0-2]\\.\\d{1,3}' + # You can choose what type of flannel backend to use # please refer to flannel's docs : https://github.com/coreos/flannel/blob/master/README.md flannel_backend_type: "vxlan" diff --git a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 index 6c69dcaa8..bb2a6a7f8 100644 --- a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 +++ b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 @@ -66,7 +66,7 @@ spec: requests: cpu: {{ flannel_cpu_requests }} memory: {{ flannel_memory_requests }} - command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr"{% if flannel_interface is defined %}, "--iface={{ flannel_interface }}"{% endif %} ] + command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr"{% if flannel_interface is defined %}, "--iface={{ flannel_interface }}"{% endif %}{% if flannel_interface_regexp is defined %}, "--iface-regex={{ flannel_interface_regexp }}"{% endif %} ] securityContext: privileged: true env: From e40368ae2bede83978523cb1d4c04866c322468e Mon Sep 17 00:00:00 2001 From: woopstar Date: Tue, 13 Mar 2018 12:00:05 +0100 Subject: [PATCH 081/177] Add CoreDNS support with various fixes Added CoreDNS to downloads Updated with labels. Should now work without RBAC too Fix DNS settings on hosts Rename CoreDNS service from kube-dns to coredns Add rotate based on http://edgeofsanity.net/rant/2017/12/20/systemd-resolved-is-broken.html Updated docs with CoreDNS info Added labels and fixed minor settings from official yaml file: https://github.com/kubernetes/kubernetes/blob/release-1.9/cluster/addons/dns/coredns.yaml.sed Added a secondary deployment and secondary service ip. This is to mitigate dns timeouts and create high resitency for failures. See discussion at 'https://github.com/coreos/coreos-kubernetes/issues/641#issuecomment-281174806' Set dns list correct. Thanks to @whereismyjetpack Only download KubeDNS or CoreDNS if selected Move dns cleanup to its own file and import tasks based on dns mode Fix install of KubeDNS when dnsmask_kubedns mode is selected Add new dns option coredns_dual for dual stack deployment. Added variable to configure replicas deployed. Updated docs for dual stack deployment. Removed rotate option in resolv.conf. Run DNS manifests for CoreDNS and KubeDNS Set skydns servers on dual stack deployment Use only one template for CoreDNS dual deployment Set correct cluster ip for the dns server --- docs/dns-stack.md | 8 ++ docs/vars.md | 7 +- inventory/sample/group_vars/k8s-cluster.yml | 8 +- roles/docker/tasks/set_facts_dns.yml | 6 +- roles/download/defaults/main.yml | 17 +++- .../kubernetes-apps/ansible/defaults/main.yml | 3 + .../ansible/tasks/cleanup_dns.yml | 54 +++++++++++++ .../kubernetes-apps/ansible/tasks/coredns.yml | 39 +++++++++ .../kubernetes-apps/ansible/tasks/kubedns.yml | 41 ++++++++++ roles/kubernetes-apps/ansible/tasks/main.yml | 69 ++++------------ .../templates/coredns-clusterrole.yml.j2 | 19 +++++ .../coredns-clusterrolebinding.yml.j2 | 18 +++++ .../ansible/templates/coredns-config.yml.j2 | 22 +++++ .../templates/coredns-deployment.yml.j2 | 81 +++++++++++++++++++ .../ansible/templates/coredns-sa.yml.j2 | 9 +++ .../ansible/templates/coredns-svc.yml.j2 | 22 +++++ .../node/templates/kubelet.kubeadm.env.j2 | 4 +- .../node/templates/kubelet.standard.env.j2 | 4 +- .../preinstall/tasks/set_resolv_facts.yml | 4 +- roles/kubespray-defaults/defaults/main.yaml | 1 + 20 files changed, 369 insertions(+), 67 deletions(-) create mode 100644 roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml create mode 100644 roles/kubernetes-apps/ansible/tasks/coredns.yml create mode 100644 roles/kubernetes-apps/ansible/tasks/kubedns.yml create mode 100644 roles/kubernetes-apps/ansible/templates/coredns-clusterrole.yml.j2 create mode 100644 roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2 create mode 100644 roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 create mode 100644 roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 create mode 100644 roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2 create mode 100644 roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 diff --git a/docs/dns-stack.md b/docs/dns-stack.md index 6215114af..1deb88776 100644 --- a/docs/dns-stack.md +++ b/docs/dns-stack.md @@ -62,6 +62,14 @@ other queries are forwardet to the nameservers found in ``upstream_dns_servers`` This does not install the dnsmasq DaemonSet and instructs kubelet to directly use kubedns/skydns for all queries. +#### coredns +This does not install the dnsmasq DaemonSet and instructs kubelet to directly use CoreDNS for +all queries. + +#### coredns_dual +This does not install the dnsmasq DaemonSet and instructs kubelet to directly use CoreDNS for +all queries. It will also deploy a secondary CoreDNS stack + #### manual This does not install dnsmasq or kubedns, but allows you to specify `manual_dns_server`, which will be configured on nodes for handling Pod DNS. diff --git a/docs/vars.md b/docs/vars.md index 3303f6bcb..f612b4f52 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -63,7 +63,8 @@ following default cluster paramters: bits in kube_pods_subnet dictates how many kube-nodes can be in cluster. * *dns_setup* - Enables dnsmasq * *dnsmasq_dns_server* - Cluster IP for dnsmasq (default is 10.233.0.2) -* *skydns_server* - Cluster IP for KubeDNS (default is 10.233.0.3) +* *skydns_server* - Cluster IP for DNS (default is 10.233.0.3) +* *skydns_server_secondary* - Secondary Cluster IP for CoreDNS used with coredns_dual deployment (default is 10.233.0.4) * *cloud_provider* - Enable extra Kubelet option if operating inside GCE or OpenStack (default is unset) * *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in @@ -105,9 +106,9 @@ Stack](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/dns-st * *http_proxy/https_proxy/no_proxy* - Proxy variables for deploying behind a proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames that correspond to each node. -* *kubelet_deployment_type* - Controls which platform to deploy kubelet on. +* *kubelet_deployment_type* - Controls which platform to deploy kubelet on. Available options are ``host``, ``rkt``, and ``docker``. ``docker`` mode - is unlikely to work on newer releases. Starting with Kubernetes v1.7 + is unlikely to work on newer releases. Starting with Kubernetes v1.7 series, this now defaults to ``host``. Before v1.7, the default was Docker. This is because of cgroup [issues](https://github.com/kubernetes/kubernetes/issues/43704). * *kubelet_load_modules* - For some things, kubelet needs to load kernel modules. For example, diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml index 128e8cc99..df5d3513d 100644 --- a/inventory/sample/group_vars/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster.yml @@ -111,14 +111,17 @@ kube_apiserver_insecure_port: 8080 # (http) # Kube-proxy proxyMode configuration. # Can be ipvs, iptables -kube_proxy_mode: iptables +kube_proxy_mode: iptables + +## Encrypting Secret Data at Rest (experimental) +kube_encrypt_secret_data: false # DNS configuration. # Kubernetes cluster name, also will be used as DNS domain cluster_name: cluster.local # Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods ndots: 2 -# Can be dnsmasq_kubedns, kubedns, manual or none +# Can be dnsmasq_kubedns, kubedns, coredns, coredns_dual, manual or none dns_mode: kubedns # Set manual server if using a custom cluster DNS server #manual_dns_server: 10.x.x.x @@ -129,6 +132,7 @@ resolvconf_mode: docker_dns deploy_netchecker: false # Ip address of the kubernetes skydns service skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}" dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}" dns_domain: "{{ cluster_name }}" diff --git a/roles/docker/tasks/set_facts_dns.yml b/roles/docker/tasks/set_facts_dns.yml index 7152b442b..6fe516c2d 100644 --- a/roles/docker/tasks/set_facts_dns.yml +++ b/roles/docker/tasks/set_facts_dns.yml @@ -3,8 +3,10 @@ - name: set dns server for docker set_fact: docker_dns_servers: |- - {%- if dns_mode == 'kubedns' -%} + {%- if dns_mode in ['kubedns', 'coredns'] -%} {{ [ skydns_server ] }} + {%- elif dns_mode == 'coredns_dual' -%} + {{ [ skydns_server ] + [ skydns_server_secondary ] }} {%- elif dns_mode == 'dnsmasq_kubedns' -%} {{ [ dnsmasq_dns_server ] }} {%- elif dns_mode == 'manual' -%} @@ -24,7 +26,7 @@ - name: add upstream dns servers (only when dnsmasq is not used) set_fact: docker_dns_servers: "{{ docker_dns_servers + upstream_dns_servers|default([]) }}" - when: dns_mode == 'kubedns' + when: dns_mode in ['kubedns', 'coredns', 'coreos_dual'] - name: add global searchdomains set_fact: diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index d87f4b923..b43a5aa0b 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -100,6 +100,9 @@ dnsmasq_image_tag: "{{ dnsmasq_version }}" kubedns_version: 1.14.8 kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-amd64" kubedns_image_tag: "{{ kubedns_version }}" +coredns_version: 1.1.0 +coredns_image_repo: "docker.io/coredns/coredns" +coredns_image_tag: "{{ coredns_version }}" dnsmasq_nanny_image_repo: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64" dnsmasq_nanny_image_tag: "{{ kubedns_version }}" dnsmasq_sidecar_image_repo: "gcr.io/google_containers/k8s-dns-sidecar-amd64" @@ -274,25 +277,31 @@ downloads: tag: "{{ dnsmasq_image_tag }}" sha256: "{{ dnsmasq_digest_checksum|default(None) }}" kubedns: - enabled: true + enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}" container: true repo: "{{ kubedns_image_repo }}" tag: "{{ kubedns_image_tag }}" sha256: "{{ kubedns_digest_checksum|default(None) }}" + coredns: + enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}" + container: true + repo: "{{ coredns_image_repo }}" + tag: "{{ coredns_image_tag }}" + sha256: "{{ coredns_digest_checksum|default(None) }}" dnsmasq_nanny: - enabled: true + enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}" container: true repo: "{{ dnsmasq_nanny_image_repo }}" tag: "{{ dnsmasq_nanny_image_tag }}" sha256: "{{ dnsmasq_nanny_digest_checksum|default(None) }}" dnsmasq_sidecar: - enabled: true + enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}" container: true repo: "{{ dnsmasq_sidecar_image_repo }}" tag: "{{ dnsmasq_sidecar_image_tag }}" sha256: "{{ dnsmasq_sidecar_digest_checksum|default(None) }}" kubednsautoscaler: - enabled: true + enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}" container: true repo: "{{ kubednsautoscaler_image_repo }}" tag: "{{ kubednsautoscaler_image_tag }}" diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml index 350f663a1..4dc4be212 100644 --- a/roles/kubernetes-apps/ansible/defaults/main.yml +++ b/roles/kubernetes-apps/ansible/defaults/main.yml @@ -10,6 +10,9 @@ dns_memory_requests: 70Mi kubedns_min_replicas: 2 kubedns_nodes_per_replica: 10 +# CoreDNS +coredns_replicas: 2 + # Images kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-amd64" kubedns_image_tag: "{{ kubedns_version }}" diff --git a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml new file mode 100644 index 000000000..5f8356cf9 --- /dev/null +++ b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml @@ -0,0 +1,54 @@ +--- +- name: Kubernetes Apps | Delete old CoreDNS resources + kube: + name: "coredns" + namespace: "{{ system_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item }}" + state: absent + with_items: + - 'deploy' + - 'configmap' + - 'svc' + tags: + - upgrade + +- name: Kubernetes Apps | Delete kubeadm CoreDNS + kube: + name: "coredns" + namespace: "{{ system_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "deploy" + state: absent + when: + - kubeadm_enabled|default(false) + - kubeadm_init.changed|default(false) + - inventory_hostname == groups['kube-master'][0] + +- name: Kubernetes Apps | Delete old KubeDNS resources + kube: + name: "kube-dns" + namespace: "{{ system_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item }}" + state: absent + with_items: + - 'deploy' + - 'svc' + tags: + - upgrade + +- name: Kubernetes Apps | Delete kubeadm KubeDNS + kube: + name: "kube-dns" + namespace: "{{ system_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item }}" + state: absent + with_items: + - 'deploy' + - 'svc' + when: + - kubeadm_enabled|default(false) + - kubeadm_init.changed|default(false) + - inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-apps/ansible/tasks/coredns.yml b/roles/kubernetes-apps/ansible/tasks/coredns.yml new file mode 100644 index 000000000..fcd6c4c6d --- /dev/null +++ b/roles/kubernetes-apps/ansible/tasks/coredns.yml @@ -0,0 +1,39 @@ +--- +- name: Kubernetes Apps | Lay Down CoreDNS Template + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + with_items: + - { name: coredns, file: coredns-config.yml, type: configmap } + - { name: coredns, file: coredns-sa.yml, type: sa } + - { name: coredns, file: coredns-deployment.yml, type: deployment } + - { name: coredns, file: coredns-svc.yml, type: svc } + - { name: coredns, file: coredns-clusterrole.yml, type: clusterrole } + - { name: coredns, file: coredns-clusterrolebinding.yml, type: clusterrolebinding } + register: coredns_manifests + vars: + clusterIP: "{{ skydns_server }}" + when: + - dns_mode in ['coredns', 'coredns_dual'] + - inventory_hostname == groups['kube-master'][0] + - rbac_enabled or item.type not in rbac_resources + tags: + - coredns + +- name: Kubernetes Apps | Lay Down Secondary CoreDNS Template + template: + src: "{{ item.src }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + with_items: + - { name: coredns, src: coredns-deployment.yml, file: coredns-deployment-secondary.yml, type: deployment } + - { name: coredns, src: coredns-svc.yml, file: coredns-svc-secondary.yml, type: svc } + register: coredns_secondary_manifests + vars: + clusterIP: "{{ skydns_server_secondary }}" + coredns_ordinal_suffix: "-secondary" + when: + - dns_mode == 'coredns_dual' + - inventory_hostname == groups['kube-master'][0] + - rbac_enabled or item.type not in rbac_resources + tags: + - coredns diff --git a/roles/kubernetes-apps/ansible/tasks/kubedns.yml b/roles/kubernetes-apps/ansible/tasks/kubedns.yml new file mode 100644 index 000000000..c4c34ecf8 --- /dev/null +++ b/roles/kubernetes-apps/ansible/tasks/kubedns.yml @@ -0,0 +1,41 @@ +--- + +- name: Kubernetes Apps | Lay Down KubeDNS Template + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/{{ item.file }}" + with_items: + - { name: kube-dns, file: kubedns-sa.yml, type: sa } + - { name: kube-dns, file: kubedns-deploy.yml, type: deployment } + - { name: kube-dns, file: kubedns-svc.yml, type: svc } + - { name: kubedns-autoscaler, file: kubedns-autoscaler-sa.yml, type: sa } + - { name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrole.yml, type: clusterrole } + - { name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding } + - { name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment } + register: kubedns_manifests + when: + - dns_mode in ['kubedns','dnsmasq_kubedns'] + - inventory_hostname == groups['kube-master'][0] + - rbac_enabled or item.type not in rbac_resources + tags: + - dnsmasq + +# see https://github.com/kubernetes/kubernetes/issues/45084, only needed for "old" kube-dns +- name: Kubernetes Apps | Patch system:kube-dns ClusterRole + command: > + {{ bin_dir }}/kubectl patch clusterrole system:kube-dns + --patch='{ + "rules": [ + { + "apiGroups" : [""], + "resources" : ["endpoints", "services"], + "verbs": ["list", "watch", "get"] + } + ] + }' + when: + - dns_mode in ['kubedns', 'dnsmasq_kubedns'] + - inventory_hostname == groups['kube-master'][0] + - rbac_enabled and kubedns_version|version_compare("1.11.0", "<", strict=True) + tags: + - dnsmasq diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index a25d595eb..55d417982 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -11,66 +11,26 @@ delay: 2 when: inventory_hostname == groups['kube-master'][0] -- name: Kubernetes Apps | Delete old kubedns resources - kube: - name: "kubedns" - namespace: "{{ system_namespace }}" - kubectl: "{{ bin_dir }}/kubectl" - resource: "{{ item }}" - state: absent - with_items: - - 'deploy' - - 'svc' +- name: Kubernetes Apps | Cleanup DNS + import_tasks: tasks/cleanup_dns.yml + when: + - inventory_hostname == groups['kube-master'][0] tags: - upgrade -- name: Kubernetes Apps | Delete kubeadm kubedns - kube: - name: "kubedns" - namespace: "{{ system_namespace }}" - kubectl: "{{ bin_dir }}/kubectl" - resource: "deploy" - state: absent +- name: Kubernetes Apps | CoreDNS + import_tasks: "tasks/coredns.yml" when: - - kubeadm_enabled|default(false) - - kubeadm_init.changed|default(false) + - dns_mode in ['coredns', 'coredns_dual'] - inventory_hostname == groups['kube-master'][0] - -- name: Kubernetes Apps | Lay Down KubeDNS Template - template: - src: "{{ item.file }}.j2" - dest: "{{ kube_config_dir }}/{{ item.file }}" - with_items: - - { name: kube-dns, file: kubedns-sa.yml, type: sa } - - { name: kube-dns, file: kubedns-deploy.yml, type: deployment } - - { name: kube-dns, file: kubedns-svc.yml, type: svc } - - { name: kubedns-autoscaler, file: kubedns-autoscaler-sa.yml, type: sa } - - { name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrole.yml, type: clusterrole } - - { name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding } - - { name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment } - register: manifests - when: - - dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] - - rbac_enabled or item.type not in rbac_resources tags: - - dnsmasq + - coredns -# see https://github.com/kubernetes/kubernetes/issues/45084, only needed for "old" kube-dns -- name: Kubernetes Apps | Patch system:kube-dns ClusterRole - command: > - {{ bin_dir }}/kubectl patch clusterrole system:kube-dns - --patch='{ - "rules": [ - { - "apiGroups" : [""], - "resources" : ["endpoints", "services"], - "verbs": ["list", "watch", "get"] - } - ] - }' +- name: Kubernetes Apps | KubeDNS + import_tasks: "tasks/kubedns.yml" when: - - dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] - - rbac_enabled and kubedns_version|version_compare("1.11.0", "<", strict=True) + - dns_mode in ['kubedns', 'dnsmasq_kubedns'] + - inventory_hostname == groups['kube-master'][0] tags: - dnsmasq @@ -82,7 +42,10 @@ resource: "{{ item.item.type }}" filename: "{{ kube_config_dir }}/{{ item.item.file }}" state: "latest" - with_items: "{{ manifests.results }}" + with_items: + - "{{ kubedns_manifests.results | default({}) }}" + - "{{ coredns_manifests.results | default({}) }}" + - "{{ coredns_secondary_manifests.results | default({}) }}" when: - dns_mode != 'none' - inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-apps/ansible/templates/coredns-clusterrole.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-clusterrole.yml.j2 new file mode 100644 index 000000000..4136d603e --- /dev/null +++ b/roles/kubernetes-apps/ansible/templates/coredns-clusterrole.yml.j2 @@ -0,0 +1,19 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + kubernetes.io/bootstrapping: rbac-defaults + addonmanager.kubernetes.io/mode: Reconcile + name: system:coredns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch diff --git a/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2 new file mode 100644 index 000000000..6c49d047f --- /dev/null +++ b/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2 @@ -0,0 +1,18 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + addonmanager.kubernetes.io/mode: EnsureExists + name: system:coredns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: +- kind: ServiceAccount + name: coredns + namespace: {{ system_namespace }} diff --git a/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 new file mode 100644 index 000000000..983d2579f --- /dev/null +++ b/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 @@ -0,0 +1,22 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: {{ system_namespace }} + labels: + addonmanager.kubernetes.io/mode: EnsureExists +data: + Corefile: | + .:53 { + errors + health + kubernetes {{ cluster_name }} in-addr.arpa ip6.arpa { + pods insecure + upstream /etc/resolv.conf + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + proxy . /etc/resolv.conf + cache 30 + } diff --git a/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 new file mode 100644 index 000000000..30128d566 --- /dev/null +++ b/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 @@ -0,0 +1,81 @@ +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: coredns{{ coredns_ordinal_suffix | default('') }} + namespace: {{ system_namespace }} + labels: + k8s-app: coredns{{ coredns_ordinal_suffix | default('') }} + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/name: "CoreDNS" +spec: + replicas: {{ coredns_replicas }} + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 0 + maxSurge: 10% + selector: + matchLabels: + k8s-app: coredns{{ coredns_ordinal_suffix | default('') }} + template: + metadata: + labels: + k8s-app: coredns{{ coredns_ordinal_suffix | default('') }} + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: +{% if rbac_enabled %} + serviceAccountName: coredns +{% endif %} + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: "CriticalAddonsOnly" + operator: "Exists" + containers: + - name: coredns + image: "{{ coredns_image_repo }}:{{ coredns_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + # TODO: Set memory limits when we've profiled the container for large + # clusters, then set request = limit to keep this container in + # guaranteed class. Currently, this container falls into the + # "burstable" category so the kubelet doesn't backoff from restarting it. + limits: + memory: {{ dns_memory_limit }} + requests: + cpu: {{ dns_cpu_requests }} + memory: {{ dns_memory_requests }} + args: [ "-conf", "/etc/coredns/Corefile" ] + volumeMounts: + - name: config-volume + mountPath: /etc/coredns + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + dnsPolicy: Default + volumes: + - name: config-volume + configMap: + name: coredns + items: + - key: Corefile + path: Corefile diff --git a/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2 new file mode 100644 index 000000000..db5682354 --- /dev/null +++ b/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2 @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: coredns + namespace: {{ system_namespace }} + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile diff --git a/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 new file mode 100644 index 000000000..c5b76b0b5 --- /dev/null +++ b/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 @@ -0,0 +1,22 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: coredns{{ coredns_ordinal_suffix | default('') }} + namespace: {{ system_namespace }} + labels: + k8s-app: coredns{{ coredns_ordinal_suffix | default('') }} + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/name: "CoreDNS" +spec: + selector: + k8s-app: coredns{{ coredns_ordinal_suffix | default('') }} + clusterIP: {{ clusterIP }} + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP diff --git a/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 index c8cf40e7b..28467a501 100644 --- a/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 +++ b/roles/kubernetes/node/templates/kubelet.kubeadm.env.j2 @@ -50,8 +50,10 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" {% endif %} {# DNS settings for kubelet #} -{% if dns_mode == 'kubedns' %} +{% if dns_mode in ['kubedns', 'coredns'] %} {% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }}{% endset %} +{% elif dns_mode == 'coredns_dual' %} +{% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }},{{ skydns_server_secondary }}{% endset %} {% elif dns_mode == 'dnsmasq_kubedns' %} {% set kubelet_args_cluster_dns %}--cluster-dns={{ dnsmasq_dns_server }}{% endset %} {% elif dns_mode == 'manual' %} diff --git a/roles/kubernetes/node/templates/kubelet.standard.env.j2 b/roles/kubernetes/node/templates/kubelet.standard.env.j2 index 8e05e0253..d33adfba7 100644 --- a/roles/kubernetes/node/templates/kubelet.standard.env.j2 +++ b/roles/kubernetes/node/templates/kubelet.standard.env.j2 @@ -42,8 +42,10 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" --enforce-node-allocatable={{ kubelet_enforce_node_allocatable }} {% endif %}{% endset %} {# DNS settings for kubelet #} -{% if dns_mode == 'kubedns' %} +{% if dns_mode in ['kubedns', 'coredns'] %} {% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }}{% endset %} +{% elif dns_mode == 'coredns_dual' %} +{% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }},{{ skydns_server_secondary }}{% endset %} {% elif dns_mode == 'dnsmasq_kubedns' %} {% set kubelet_args_cluster_dns %}--cluster-dns={{ dnsmasq_dns_server }}{% endset %} {% elif dns_mode == 'manual' %} diff --git a/roles/kubernetes/preinstall/tasks/set_resolv_facts.yml b/roles/kubernetes/preinstall/tasks/set_resolv_facts.yml index fdc46125e..eb8f3f43f 100644 --- a/roles/kubernetes/preinstall/tasks/set_resolv_facts.yml +++ b/roles/kubernetes/preinstall/tasks/set_resolv_facts.yml @@ -93,8 +93,10 @@ - name: pick dnsmasq cluster IP or default resolver set_fact: dnsmasq_server: |- - {%- if dns_mode == 'kubedns' and not dns_early|bool -%} + {%- if dns_mode in ['kubedns', 'coredns'] and not dns_early|bool -%} {{ [ skydns_server ] + upstream_dns_servers|default([]) }} + {%- elif dns_mode == 'coredns_dual' and not dns_early|bool -%} + {{ [ skydns_server ] + [ skydns_server_secondary ] + upstream_dns_servers|default([]) }} {%- elif dns_mode == 'manual' and not dns_early|bool -%} {{ [ manual_dns_server ] + upstream_dns_servers|default([]) }} {%- elif dns_early|bool -%} diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index 61f11e97f..0b10adf62 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -49,6 +49,7 @@ resolvconf_mode: docker_dns deploy_netchecker: false # Ip address of the kubernetes skydns service skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}" dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}" dns_domain: "{{ cluster_name }}" From 728598b230b9e90fe6db4e7476c32a4bb65cb720 Mon Sep 17 00:00:00 2001 From: MQasimSarfraz Date: Fri, 16 Mar 2018 20:33:29 +0000 Subject: [PATCH 082/177] Mark "calico-rr" as optional in fact gathering --- upgrade-cluster.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml index 88969436b..7acec3083 100644 --- a/upgrade-cluster.yml +++ b/upgrade-cluster.yml @@ -26,7 +26,7 @@ setup: delegate_to: "{{item}}" delegate_facts: True - with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr'] }}" + with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]) }}" - hosts: k8s-cluster:etcd:calico-rr any_errors_fatal: "{{ any_errors_fatal | default(true) }}" From 1481f7d64b7cdaa7d269cc71b1bef6c442ec0b02 Mon Sep 17 00:00:00 2001 From: Sergey Bondarev Date: Sat, 17 Mar 2018 02:54:46 +0300 Subject: [PATCH 083/177] Dedicated node for ingress nginx controller The ability to create dedicated node for ingress nginx controller host type network for nginx controller and add from example https://github.com/kubernetes/ingress-nginx/blob/master/docs/examples/static-ip/nginx-ingress-controller.yaml terminationGracePeriodSeconds: 60 --- inventory/sample/group_vars/k8s-cluster.yml | 1 + inventory/sample/hosts.ini | 5 +++++ .../templates/ingress-nginx-controller-ds.yml.j2 | 8 ++++++++ roles/kubernetes/node/templates/kubelet.standard.env.j2 | 2 ++ 4 files changed, 16 insertions(+) diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml index 128e8cc99..19ffc8cca 100644 --- a/inventory/sample/group_vars/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster.yml @@ -189,6 +189,7 @@ cephfs_provisioner_enabled: false # Nginx ingress controller deployment ingress_nginx_enabled: false +# ingres_nginx_host_network: true # ingress_nginx_namespace: "ingress-nginx" # ingress_nginx_insecure_port: 80 # ingress_nginx_secure_port: 443 diff --git a/inventory/sample/hosts.ini b/inventory/sample/hosts.ini index 13cc3612e..8eece0d48 100644 --- a/inventory/sample/hosts.ini +++ b/inventory/sample/hosts.ini @@ -26,6 +26,11 @@ # node5 # node6 +# optional for dedicated ingress node +# [kube-ingress] +# node2 +# node3 + # [k8s-cluster:children] # kube-node # kube-master diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 index 7fd3a946c..289119a60 100644 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 @@ -21,6 +21,14 @@ spec: k8s-app: ingress-nginx version: v{{ ingress_nginx_controller_image_tag }} spec: +{% if ingres_nginx_host_network is defined and ingres_nginx_host_network %} + hostNetwork: true +{% endif %} +{% if 'kube-ingress' in groups %} + nodeSelector: + node-role.kubernetes.io/ingress: "true" +{% endif %} + terminationGracePeriodSeconds: 60 containers: - name: ingress-nginx-controller image: {{ ingress_nginx_controller_image_repo }}:{{ ingress_nginx_controller_image_tag }} diff --git a/roles/kubernetes/node/templates/kubelet.standard.env.j2 b/roles/kubernetes/node/templates/kubelet.standard.env.j2 index 8e05e0253..ed06cf72d 100644 --- a/roles/kubernetes/node/templates/kubelet.standard.env.j2 +++ b/roles/kubernetes/node/templates/kubelet.standard.env.j2 @@ -84,6 +84,8 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" {% if not standalone_kubelet|bool %} {% set node_labels %}{{ node_labels }},node-role.kubernetes.io/node=true{% endset %} {% endif %} +{% elif inventory_hostname in groups['kube-ingress']|default([]) %} +{% set node_labels %}--node-labels=node-role.kubernetes.io/ingress=true{% endset %} {% else %} {% set node_labels %}--node-labels=node-role.kubernetes.io/node=true{% endset %} {% endif %} From b9a949820a0d8269054f95b05187ef8366ece624 Mon Sep 17 00:00:00 2001 From: woopstar Date: Sun, 18 Mar 2018 08:42:36 +0100 Subject: [PATCH 084/177] Only copy tokens if tokens_list contains any --- roles/kubernetes/secrets/tasks/gen_tokens.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes/secrets/tasks/gen_tokens.yml b/roles/kubernetes/secrets/tasks/gen_tokens.yml index a4cc0f69b..df47d157d 100644 --- a/roles/kubernetes/secrets/tasks/gen_tokens.yml +++ b/roles/kubernetes/secrets/tasks/gen_tokens.yml @@ -55,4 +55,4 @@ - name: Gen_tokens | Copy tokens on masters shell: "echo '{{ tokens_data.stdout|quote }}' | base64 -d | tar xz -C /" when: inventory_hostname in groups['kube-master'] and sync_tokens|default(false) and - inventory_hostname != groups['kube-master'][0] + inventory_hostname != groups['kube-master'][0] and tokens_data.stdout != '' From f1d2f840434ce50de1e04946e86c4a34b62d4621 Mon Sep 17 00:00:00 2001 From: woopstar Date: Sun, 18 Mar 2018 16:15:00 +0100 Subject: [PATCH 085/177] Only apply roles from first master node to fix regression --- roles/kubernetes-apps/cluster_roles/tasks/main.yml | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml index 5dbf49092..c576586a2 100644 --- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml +++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml @@ -16,7 +16,9 @@ src: "node-crb.yml.j2" dest: "{{ kube_config_dir }}/node-crb.yml" register: node_crb_manifest - when: rbac_enabled + when: + - rbac_enabled + - inventory_hostname == groups['kube-master'][0] - name: Apply workaround to allow all nodes with cert O=system:nodes to register kube: @@ -28,6 +30,7 @@ when: - rbac_enabled - node_crb_manifest.changed + - inventory_hostname == groups['kube-master'][0] - name: Kubernetes Apps | Add webhook ClusterRole that grants access to proxy, stats, log, spec, and metrics on a kubelet template: @@ -37,6 +40,7 @@ when: - rbac_enabled - kubelet_authorization_mode_webhook + - inventory_hostname == groups['kube-master'][0] tags: node-webhook - name: Apply webhook ClusterRole @@ -50,6 +54,7 @@ - rbac_enabled - kubelet_authorization_mode_webhook - node_webhook_cr_manifest.changed + - inventory_hostname == groups['kube-master'][0] tags: node-webhook - name: Kubernetes Apps | Add ClusterRoleBinding for system:nodes to webhook ClusterRole @@ -60,6 +65,7 @@ when: - rbac_enabled - kubelet_authorization_mode_webhook + - inventory_hostname == groups['kube-master'][0] tags: node-webhook - name: Grant system:nodes the webhook ClusterRole @@ -73,6 +79,7 @@ - rbac_enabled - kubelet_authorization_mode_webhook - node_webhook_crb_manifest.changed + - inventory_hostname == groups['kube-master'][0] tags: node-webhook - name: Check if vsphere-cloud-provider ClusterRole exists @@ -85,6 +92,7 @@ - cloud_provider == 'vsphere' - kube_version | version_compare('v1.9.0', '>=') - kube_version | version_compare('v1.9.3', '<=') + - inventory_hostname == groups['kube-master'][0] tags: vsphere - name: Write vsphere-cloud-provider ClusterRole manifest @@ -99,6 +107,7 @@ - vsphere_cloud_provider.rc != 0 - kube_version | version_compare('v1.9.0', '>=') - kube_version | version_compare('v1.9.3', '<=') + - inventory_hostname == groups['kube-master'][0] tags: vsphere - name: Apply vsphere-cloud-provider ClusterRole @@ -115,6 +124,7 @@ - vsphere_cloud_provider.rc != 0 - kube_version | version_compare('v1.9.0', '>=') - kube_version | version_compare('v1.9.3', '<=') + - inventory_hostname == groups['kube-master'][0] tags: vsphere # This is not a cluster role, but should be run after kubeconfig is set on master From 4ee9cb2be9053799af9aefe3036fe9e66d95ab30 Mon Sep 17 00:00:00 2001 From: Chen Hong Date: Mon, 19 Mar 2018 14:32:25 +0800 Subject: [PATCH 086/177] gather facts from all instances, maybe include calico-rr --- cluster.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster.yml b/cluster.yml index 00c68a593..6dcb8b659 100644 --- a/cluster.yml +++ b/cluster.yml @@ -26,7 +26,7 @@ setup: delegate_to: "{{item}}" delegate_facts: True - with_items: "{{ groups['k8s-cluster'] + groups['etcd'] }}" + with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr'] }}" - hosts: k8s-cluster:etcd:calico-rr any_errors_fatal: "{{ any_errors_fatal | default(true) }}" From 73cd24bf5af15ad55cc7d232fa0efede8f7a5000 Mon Sep 17 00:00:00 2001 From: Chen Hong Date: Mon, 19 Mar 2018 14:35:45 +0800 Subject: [PATCH 087/177] gather facts from all instances, maybe include calico-rr --- cluster.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster.yml b/cluster.yml index 6dcb8b659..fb7dec4cb 100644 --- a/cluster.yml +++ b/cluster.yml @@ -26,7 +26,7 @@ setup: delegate_to: "{{item}}" delegate_facts: True - with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr'] }}" + with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]) }}" - hosts: k8s-cluster:etcd:calico-rr any_errors_fatal: "{{ any_errors_fatal | default(true) }}" From 038da7255fc1dad26102ce8b7ce77cbf1ab3f77b Mon Sep 17 00:00:00 2001 From: Sergey Bondarev Date: Mon, 19 Mar 2018 12:59:38 +0300 Subject: [PATCH 088/177] check if group kube-ingress is not empty fix spelling mistaker ingress_nginx_host_network set default value for ingress_nginx_host_network: false --- inventory/sample/group_vars/k8s-cluster.yml | 2 +- .../ingress_controller/ingress_nginx/defaults/main.yml | 1 + .../templates/ingress-nginx-controller-ds.yml.j2 | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml index 19ffc8cca..1038fe376 100644 --- a/inventory/sample/group_vars/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster.yml @@ -189,7 +189,7 @@ cephfs_provisioner_enabled: false # Nginx ingress controller deployment ingress_nginx_enabled: false -# ingres_nginx_host_network: true +# ingress_nginx_host_network: false # ingress_nginx_namespace: "ingress-nginx" # ingress_nginx_insecure_port: 80 # ingress_nginx_secure_port: 443 diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml b/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml index dce234f6c..fc114a2ba 100644 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml @@ -6,6 +6,7 @@ ingress_nginx_controller_image_repo: quay.io/kubernetes-ingress-controller/nginx ingress_nginx_controller_image_tag: 0.11.0 ingress_nginx_namespace: "ingress-nginx" +ingress_nginx_host_network: false ingress_nginx_insecure_port: 80 ingress_nginx_secure_port: 443 ingress_nginx_configmap: {} diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 index 289119a60..b88bb9d6f 100644 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 @@ -21,10 +21,10 @@ spec: k8s-app: ingress-nginx version: v{{ ingress_nginx_controller_image_tag }} spec: -{% if ingres_nginx_host_network is defined and ingres_nginx_host_network %} +{% if ingress_nginx_host_network %} hostNetwork: true {% endif %} -{% if 'kube-ingress' in groups %} +{% if 'kube-ingress' in groups and groups['kube-ingress']|length > 0 %} nodeSelector: node-role.kubernetes.io/ingress: "true" {% endif %} From 14ac7d797b9573bc7a78a28e983080c1cf580ebb Mon Sep 17 00:00:00 2001 From: Andreas Holmsten Date: Mon, 19 Mar 2018 13:04:18 +0100 Subject: [PATCH 089/177] Rotate local-volume-provisioner token When tokens need to rotate, include local-volume-provisioner --- roles/kubernetes-apps/rotate_tokens/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml index 52101ae16..4abc7d730 100644 --- a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml +++ b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml @@ -34,7 +34,7 @@ {{ bin_dir }}/kubectl get secrets --all-namespaces -o 'jsonpath={range .items[*]}{"\n"}{.metadata.namespace}{" "}{.metadata.name}{" "}{.type}{end}' | grep kubernetes.io/service-account-token - | egrep 'default-token|kube-proxy|kube-dns|dnsmasq|netchecker|weave|calico|canal|flannel|dashboard|cluster-proportional-autoscaler|efk|tiller' + | egrep 'default-token|kube-proxy|kube-dns|dnsmasq|netchecker|weave|calico|canal|flannel|dashboard|cluster-proportional-autoscaler|efk|tiller|local-volume-provisioner' register: tokens_to_delete when: needs_rotation From ebfee51acac7e9361a31cd86a8dfc56b76f665a9 Mon Sep 17 00:00:00 2001 From: Zobair Shahadat Date: Mon, 19 Mar 2018 15:42:24 -0400 Subject: [PATCH 090/177] Upgraded kubernetes from 1.9.3 to 1.9.5 --- README.md | 2 +- inventory/sample/group_vars/k8s-cluster.yml | 2 +- roles/download/defaults/main.yml | 4 ++-- roles/kubespray-defaults/defaults/main.yaml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index c9ff3f2c9..91843ce13 100644 --- a/README.md +++ b/README.md @@ -76,7 +76,7 @@ Note: Upstart/SysV init based OS types are not supported. Versions of supported components -------------------------------- -- [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.3 +- [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.5 - [etcd](https://github.com/coreos/etcd/releases) v3.2.4 - [flanneld](https://github.com/coreos/flannel/releases) v0.9.1 - [calico](https://docs.projectcalico.org/v2.5/releases/) v2.5.0 diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml index b0a602553..b6586c862 100644 --- a/inventory/sample/group_vars/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster.yml @@ -20,7 +20,7 @@ kube_users_dir: "{{ kube_config_dir }}/users" kube_api_anonymous_auth: true ## Change this to use another Kubernetes version, e.g. a current beta release -kube_version: v1.9.3 +kube_version: v1.9.5 # Where the binaries will be downloaded. # Note: ensure that you've enough disk space (about 1G) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index b43a5aa0b..a015fe92d 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -24,7 +24,7 @@ download_always_pull: False download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube-master'][0]}}{% endif %}" # Versions -kube_version: v1.9.3 +kube_version: v1.9.5 kubeadm_version: "{{ kube_version }}" etcd_version: v3.2.4 # TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults @@ -50,7 +50,7 @@ vault_download_url: "https://releases.hashicorp.com/vault/{{ vault_version }}/va # Checksums istioctl_checksum: fd703063c540b8c0ab943f478c05ab257d88ae27224c746a27d0526ddbf7c370 -kubeadm_checksum: 9ebbb1fbf3a9e72d7df3f0dc02500dc8f957f39489b22cf577498c8a7c6b39b1 +kubeadm_checksum: 12b6e9ac1624852b7c978bde70b9bde9ca0e4fc6581d09bddfb117bb41f93c74 vault_binary_checksum: 3c4d70ba71619a43229e65c67830e30e050eab7a81ac6b28325ff707e5914188 # Containers diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index b0203676d..7147b5470 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -14,7 +14,7 @@ is_atomic: false ## Change this to use another Kubernetes version, e.g. a current beta release -kube_version: v1.9.3 +kube_version: v1.9.5 ## Kube Proxy mode One of ['iptables','ipvs'] kube_proxy_mode: iptables From aa30fa8009e1b41176dbc1be3f0c99703171a7e6 Mon Sep 17 00:00:00 2001 From: gorazio Date: Tue, 20 Mar 2018 08:47:36 +0300 Subject: [PATCH 091/177] Add prometheus annotations to spec in ingress Added annotations from metadata to spec.template.metadata. Without it, pod does not get any annotations, and Prometheus didn't see it --- .../ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 | 3 +++ 1 file changed, 3 insertions(+) diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 index 7fd3a946c..3a4c7860b 100644 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 @@ -20,6 +20,9 @@ spec: labels: k8s-app: ingress-nginx version: v{{ ingress_nginx_controller_image_tag }} + annotations: + prometheus.io/port: '10254' + prometheus.io/scrape: 'true' spec: containers: - name: ingress-nginx-controller From 96e46c4209003bfa61decf9c40eed670d6eed704 Mon Sep 17 00:00:00 2001 From: gorazio Date: Tue, 20 Mar 2018 10:23:50 +0300 Subject: [PATCH 092/177] bump after CLA signing --- .../ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 index 3a4c7860b..f8fac3b09 100644 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 @@ -73,3 +73,4 @@ spec: {% if rbac_enabled %} serviceAccountName: ingress-nginx {% endif %} + From a94a407a4312138842c944f8bb60f90a0154399f Mon Sep 17 00:00:00 2001 From: woopstar Date: Tue, 20 Mar 2018 12:08:34 +0100 Subject: [PATCH 093/177] Fix duplicate --proxy-client-cert-file and --proxy-client-key-file --- .../master/templates/manifests/kube-apiserver.manifest.j2 | 2 -- 1 file changed, 2 deletions(-) diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index c1685410d..4f8bb6d4f 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -57,8 +57,6 @@ spec: {% endif %} - --tls-cert-file={{ kube_cert_dir }}/apiserver.pem - --tls-private-key-file={{ kube_cert_dir }}/apiserver-key.pem - - --proxy-client-cert-file={{ kube_cert_dir }}/apiserver.pem - - --proxy-client-key-file={{ kube_cert_dir }}/apiserver-key.pem {% if kube_token_auth|default(true) %} - --token-auth-file={{ kube_token_dir }}/known_tokens.csv {% endif %} From b787b76c6c58459a5cb52700e4ce4bacbb5b8d97 Mon Sep 17 00:00:00 2001 From: Bharat Kunwar Date: Tue, 20 Mar 2018 12:06:34 +0000 Subject: [PATCH 094/177] Update kube-apiserver.manifest.j2 Ensure that kube-apiserver will respond even if one of the nodes are down. --- .../master/templates/manifests/kube-apiserver.manifest.j2 | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index c1685410d..1d9d843fb 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -42,6 +42,7 @@ spec: - --insecure-bind-address={{ kube_apiserver_insecure_bind_address }} - --bind-address={{ kube_apiserver_bind_address }} - --apiserver-count={{ kube_apiserver_count }} + - --endpoint-reconciler-type=lease - --admission-control={{ kube_apiserver_admission_control | join(',') }} - --service-cluster-ip-range={{ kube_service_addresses }} - --service-node-port-range={{ kube_apiserver_node_port_range }} From d9453f323b3c591e8fc1a05d3b85559527c05f46 Mon Sep 17 00:00:00 2001 From: Bharat Kunwar Date: Tue, 20 Mar 2018 12:16:35 +0000 Subject: [PATCH 095/177] Update kube-apiserver.manifest.j2 --- .../master/templates/manifests/kube-apiserver.manifest.j2 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 1d9d843fb..45eaf6db0 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -42,7 +42,9 @@ spec: - --insecure-bind-address={{ kube_apiserver_insecure_bind_address }} - --bind-address={{ kube_apiserver_bind_address }} - --apiserver-count={{ kube_apiserver_count }} +{% if kube_version | version_compare('v1.9', '>=') %} - --endpoint-reconciler-type=lease +{% endif %} - --admission-control={{ kube_apiserver_admission_control | join(',') }} - --service-cluster-ip-range={{ kube_service_addresses }} - --service-node-port-range={{ kube_apiserver_node_port_range }} From d2fd7b74623cb45397f1e0dcd9f55e8a29b7c7f4 Mon Sep 17 00:00:00 2001 From: Bharat Kunwar Date: Tue, 20 Mar 2018 12:19:53 +0000 Subject: [PATCH 096/177] Update kube-apiserver.manifest.j2 --- .../master/templates/manifests/kube-apiserver.manifest.j2 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 45eaf6db0..350eeaabd 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -42,9 +42,9 @@ spec: - --insecure-bind-address={{ kube_apiserver_insecure_bind_address }} - --bind-address={{ kube_apiserver_bind_address }} - --apiserver-count={{ kube_apiserver_count }} -{% if kube_version | version_compare('v1.9', '>=') %} +{% if kube_version | version_compare('v1.9', '>=') %} - --endpoint-reconciler-type=lease -{% endif %} +{% endif %} - --admission-control={{ kube_apiserver_admission_control | join(',') }} - --service-cluster-ip-range={{ kube_service_addresses }} - --service-node-port-range={{ kube_apiserver_node_port_range }} From 6c4e5e0e3d020de86ec19fc744206fc79e84d0e0 Mon Sep 17 00:00:00 2001 From: Bharat Kunwar Date: Tue, 20 Mar 2018 13:30:57 +0000 Subject: [PATCH 097/177] Update kubeadm-config.yaml.j2 --- roles/kubernetes/master/templates/kubeadm-config.yaml.j2 | 3 +++ 1 file changed, 3 insertions(+) diff --git a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 index a15107c94..d3b77bb9b 100644 --- a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 +++ b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 @@ -36,6 +36,9 @@ apiServerExtraArgs: insecure-port: "{{ kube_apiserver_insecure_port }}" admission-control: {{ kube_apiserver_admission_control | join(',') }} apiserver-count: "{{ kube_apiserver_count }}" +{% if kube_version | version_compare('v1.9', '>=') %} + endpoint-reconciler-type: lease +{% endif %} service-node-port-range: {{ kube_apiserver_node_port_range }} kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}" {% if kube_basic_auth|default(true) %} From 13e47e73c8c890bc05ce2b6cad74161c6a74e49a Mon Sep 17 00:00:00 2001 From: Bharat Kunwar Date: Tue, 20 Mar 2018 13:33:36 +0000 Subject: [PATCH 098/177] Update kubeadm-config.yaml.j2 As requested --- roles/kubernetes/master/templates/kubeadm-config.yaml.j2 | 3 +++ 1 file changed, 3 insertions(+) diff --git a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 index a15107c94..844421d32 100644 --- a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 +++ b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 @@ -36,6 +36,9 @@ apiServerExtraArgs: insecure-port: "{{ kube_apiserver_insecure_port }}" admission-control: {{ kube_apiserver_admission_control | join(',') }} apiserver-count: "{{ kube_apiserver_count }}" +{% if kube_version | version_compare('v1.9', '>=') %} + endpoint-reconciler-type: lease +{% endif %} service-node-port-range: {{ kube_apiserver_node_port_range }} kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}" {% if kube_basic_auth|default(true) %} From 9d540165c0416d4e078cd2af2b2fc0b849aa6207 Mon Sep 17 00:00:00 2001 From: woopstar Date: Tue, 20 Mar 2018 16:28:01 +0100 Subject: [PATCH 099/177] Set kube_api_aggregator_routing to default false as we use kube-proxy --- roles/kubespray-defaults/defaults/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index b0203676d..a907beddd 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -128,7 +128,7 @@ kube_apiserver_insecure_bind_address: 127.0.0.1 kube_apiserver_insecure_port: 8080 # Aggregator -kube_api_aggregator_routing: true +kube_api_aggregator_routing: false # Path used to store Docker data docker_daemon_graph: "/var/lib/docker" From 158d7753069ac95f2a47d8b4cba2cf2fb793a603 Mon Sep 17 00:00:00 2001 From: melkosoft Date: Tue, 20 Mar 2018 12:43:26 -0700 Subject: [PATCH 100/177] changed cilium to 1.0.0-rc7. Set CI to use coreos for cilium test --- .gitlab-ci.yml | 6 +++--- roles/download/defaults/main.yml | 2 +- roles/network_plugin/cilium/templates/cilium-ds.yml.j2 | 7 +++++++ 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index c21bb0c43..510cfa6d9 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -257,7 +257,7 @@ before_script: # stage: deploy-special MOVED_TO_GROUP_VARS: "true" -.ubuntu_cilium_sep_variables: &ubuntu_cilium_sep_variables +.coreos_cilium_variables: &coreos_cilium_variables # stage: deploy-special MOVED_TO_GROUP_VARS: "true" @@ -459,13 +459,13 @@ gce_ubuntu-contiv-sep: except: ['triggers'] only: ['master', /^pr-.*$/] -gce_ubuntu-cilium-sep: +gce_coreos-cilium: stage: deploy-special <<: *job <<: *gce variables: <<: *gce_variables - <<: *ubuntu_cilium_sep_variables + <<: *coreos_cilium_variables when: manual except: ['triggers'] only: ['master', /^pr-.*$/] diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index b43a5aa0b..97e93647c 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -41,7 +41,7 @@ vault_version: 0.8.1 weave_version: 2.2.0 pod_infra_version: 3.0 contiv_version: 1.1.7 -cilium_version: "v1.0.0-rc4" +cilium_version: "v1.0.0-rc7" # Download URLs istioctl_download_url: "https://storage.googleapis.com/istio-release/releases/{{ istio_version }}/istioctl/istioctl-linux" diff --git a/roles/network_plugin/cilium/templates/cilium-ds.yml.j2 b/roles/network_plugin/cilium/templates/cilium-ds.yml.j2 index 9f48a62db..3d877a5cb 100755 --- a/roles/network_plugin/cilium/templates/cilium-ds.yml.j2 +++ b/roles/network_plugin/cilium/templates/cilium-ds.yml.j2 @@ -79,6 +79,13 @@ spec: optional: true key: prometheus-serve-addr {% endif %} + resources: + limits: + cpu: {{ cilium_cpu_limit }} + memory: {{ cilium_memory_limit }} + requests: + cpu: {{ cilium_cpu_requests }} + memory: {{ cilium_memory_requests }} livenessProbe: exec: command: From ae30009fbc766991b93a26d6ecaedcbe33732098 Mon Sep 17 00:00:00 2001 From: melkosoft Date: Tue, 20 Mar 2018 14:18:56 -0700 Subject: [PATCH 101/177] changed version to 1.0.0-rc8 --- roles/download/defaults/main.yml | 2 +- roles/network_plugin/cilium/templates/cilium-cr.yml.j2 | 2 ++ tests/files/gce_centos7-cilium.yml | 1 + tests/files/gce_coreos-cilium.yml | 1 + tests/files/gce_rhel7-cilium.yml | 1 + tests/files/gce_ubuntu-cilium-sep.yml | 1 + 6 files changed, 7 insertions(+), 1 deletion(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 97e93647c..099d9993c 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -41,7 +41,7 @@ vault_version: 0.8.1 weave_version: 2.2.0 pod_infra_version: 3.0 contiv_version: 1.1.7 -cilium_version: "v1.0.0-rc7" +cilium_version: "v1.0.0-rc8" # Download URLs istioctl_download_url: "https://storage.googleapis.com/istio-release/releases/{{ istio_version }}/istioctl/istioctl-linux" diff --git a/roles/network_plugin/cilium/templates/cilium-cr.yml.j2 b/roles/network_plugin/cilium/templates/cilium-cr.yml.j2 index 8eae0e8ed..11fd01087 100755 --- a/roles/network_plugin/cilium/templates/cilium-cr.yml.j2 +++ b/roles/network_plugin/cilium/templates/cilium-cr.yml.j2 @@ -54,9 +54,11 @@ rules: - get - list - watch + - update - apiGroups: - cilium.io resources: - ciliumnetworkpolicies + - ciliumendpoints verbs: - "*" diff --git a/tests/files/gce_centos7-cilium.yml b/tests/files/gce_centos7-cilium.yml index ca682f7ed..ec46a213d 100644 --- a/tests/files/gce_centos7-cilium.yml +++ b/tests/files/gce_centos7-cilium.yml @@ -7,5 +7,6 @@ mode: default # Deployment settings kube_network_plugin: cilium deploy_netchecker: true +enable_network_policy: true kubedns_min_replicas: 1 cloud_provider: gce diff --git a/tests/files/gce_coreos-cilium.yml b/tests/files/gce_coreos-cilium.yml index a09003970..1778929f0 100644 --- a/tests/files/gce_coreos-cilium.yml +++ b/tests/files/gce_coreos-cilium.yml @@ -9,5 +9,6 @@ kube_network_plugin: cilium bootstrap_os: coreos resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12 deploy_netchecker: true +enable_network_policy: true kubedns_min_replicas: 1 cloud_provider: gce diff --git a/tests/files/gce_rhel7-cilium.yml b/tests/files/gce_rhel7-cilium.yml index d67658a6c..0994d0099 100644 --- a/tests/files/gce_rhel7-cilium.yml +++ b/tests/files/gce_rhel7-cilium.yml @@ -6,5 +6,6 @@ mode: default # Deployment settings kube_network_plugin: cilium deploy_netchecker: true +enable_network_policy: true kubedns_min_replicas: 1 cloud_provider: gce diff --git a/tests/files/gce_ubuntu-cilium-sep.yml b/tests/files/gce_ubuntu-cilium-sep.yml index e7150a27e..0c0647743 100644 --- a/tests/files/gce_ubuntu-cilium-sep.yml +++ b/tests/files/gce_ubuntu-cilium-sep.yml @@ -6,6 +6,7 @@ mode: separate # Deployment settings kube_network_plugin: cilium deploy_netchecker: true +enable_network_policy: true kubedns_min_replicas: 1 cloud_provider: gce From c025ab4eb4dfad300b08c6a735cff018c233f4b6 Mon Sep 17 00:00:00 2001 From: Chad Swenson Date: Tue, 20 Mar 2018 19:59:51 -0500 Subject: [PATCH 102/177] Update flannel version to v0.10.0 --- README.md | 2 +- roles/download/defaults/main.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c9ff3f2c9..84afb1be6 100644 --- a/README.md +++ b/README.md @@ -78,7 +78,7 @@ Versions of supported components - [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.3 - [etcd](https://github.com/coreos/etcd/releases) v3.2.4 -- [flanneld](https://github.com/coreos/flannel/releases) v0.9.1 +- [flanneld](https://github.com/coreos/flannel/releases) v0.10.0 - [calico](https://docs.projectcalico.org/v2.5/releases/) v2.5.0 - [canal](https://github.com/projectcalico/canal) (given calico/flannel versions) - [cilium](https://github.com/cilium/cilium) v1.0.0-rc4 diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index b43a5aa0b..f9578ae72 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -34,7 +34,7 @@ calico_ctl_version: "v1.6.1" calico_cni_version: "v1.11.0" calico_policy_version: "v1.0.0" calico_rr_version: "v0.4.0" -flannel_version: "v0.9.1" +flannel_version: "v0.10.0" flannel_cni_version: "v0.3.0" istio_version: "0.2.6" vault_version: 0.8.1 From 6425c837d526e01fea4d5dc01c34683b18d2860b Mon Sep 17 00:00:00 2001 From: melkosoft Date: Wed, 21 Mar 2018 04:39:29 +0000 Subject: [PATCH 103/177] Added GCE Cilium Ubuntu test --- .gitlab-ci.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 510cfa6d9..1014440ab 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -261,6 +261,10 @@ before_script: # stage: deploy-special MOVED_TO_GROUP_VARS: "true" +.ubuntu_cilium_sep_variables: &ubuntu_cilium_sep_variables +# stage: deploy-special + MOVED_TO_GROUP_VARS: "true" + .rhel7_weave_variables: &rhel7_weave_variables # stage: deploy-part1 MOVED_TO_GROUP_VARS: "true" @@ -470,6 +474,17 @@ gce_coreos-cilium: except: ['triggers'] only: ['master', /^pr-.*$/] +gce_ubuntu-cilium-sep: + stage: deploy-special + <<: *job + <<: *gce + variables: + <<: *gce_variables + <<: *ubuntu_cilium_sep_variables + when: manual + except: ['triggers'] + only: ['master', /^pr-.*$/] + gce_rhel7-weave: stage: deploy-part2 <<: *job From ee8f678010e07881de3065cdddd746a0412556dd Mon Sep 17 00:00:00 2001 From: mirwan Date: Wed, 21 Mar 2018 08:50:32 +0100 Subject: [PATCH 104/177] Addition of the .creds extension to the credentials files generated by password lookup in order for Ansible not to consider them as inventory files with inventory_ignore_extensions set accordingly (#2446) --- ansible.cfg | 1 + docs/getting-started.md | 2 +- docs/vars.md | 2 +- inventory/sample/group_vars/k8s-cluster.yml | 2 +- roles/kubernetes/master/defaults/main.yml | 2 +- roles/vault/defaults/main.yml | 12 ++++++------ tests/testcases/010_check-apiserver.yml | 2 +- 7 files changed, 12 insertions(+), 11 deletions(-) diff --git a/ansible.cfg b/ansible.cfg index 732e3bf6e..d3102a6f4 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -12,3 +12,4 @@ library = ./library callback_whitelist = profile_tasks roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles deprecation_warnings=False +inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds diff --git a/docs/getting-started.md b/docs/getting-started.md index d93f29697..2402ac54f 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -74,7 +74,7 @@ authentication. One could generate a kubeconfig based on one installed kube-master hosts (needs improvement) or connect with a username and password. By default, a user with admin rights is created, named `kube`. The password can be viewed after deployment by looking at the file -`PATH_TO_KUBESPRAY/credentials/kube_user`. This contains a randomly generated +`PATH_TO_KUBESPRAY/credentials/kube_user.creds`. This contains a randomly generated password. If you wish to set your own password, just precreate/modify this file yourself. diff --git a/docs/vars.md b/docs/vars.md index f612b4f52..5ea76b0e5 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -137,6 +137,6 @@ The possible vars are: By default, a user with admin rights is created, named `kube`. The password can be viewed after deployment by looking at the file -`PATH_TO_KUBESPRAY/credentials/kube_user`. This contains a randomly generated +`PATH_TO_KUBESPRAY/credentials/kube_user.creds`. This contains a randomly generated password. If you wish to set your own password, just precreate/modify this file yourself or change `kube_api_pwd` var. diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml index b0a602553..3936cac05 100644 --- a/inventory/sample/group_vars/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster.yml @@ -37,7 +37,7 @@ kube_log_level: 2 # Users to create for basic auth in Kubernetes API via HTTP # Optionally add groups for user -kube_api_pwd: "{{ lookup('password', inventory_dir + '/credentials/kube_user length=15 chars=ascii_letters,digits') }}" +kube_api_pwd: "{{ lookup('password', inventory_dir + '/credentials/kube_user.creds length=15 chars=ascii_letters,digits') }}" kube_users: kube: pass: "{{kube_api_pwd}}" diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml index a1b506d4e..303c1a88a 100644 --- a/roles/kubernetes/master/defaults/main.yml +++ b/roles/kubernetes/master/defaults/main.yml @@ -95,5 +95,5 @@ volume_cross_zone_attachment: false ## Encrypting Secret Data at Rest kube_encrypt_secret_data: false -kube_encrypt_token: "{{ lookup('password', inventory_dir + '/credentials/kube_encrypt_token length=32 chars=ascii_letters,digits') }}" +kube_encrypt_token: "{{ lookup('password', inventory_dir + '/credentials/kube_encrypt_token.creds length=32 chars=ascii_letters,digits') }}" kube_encryption_algorithm: "aescbc" # Must be either: aescbc, secretbox or aesgcm diff --git a/roles/vault/defaults/main.yml b/roles/vault/defaults/main.yml index 4eb055f7e..9a3e83035 100644 --- a/roles/vault/defaults/main.yml +++ b/roles/vault/defaults/main.yml @@ -115,7 +115,7 @@ vault_pki_mounts: roles: - name: vault group: vault - password: "{{ lookup('password', inventory_dir + '/credentials/vault/vault length=15') }}" + password: "{{ lookup('password', inventory_dir + '/credentials/vault/vault.creds length=15') }}" policy_rules: default role_options: default etcd: @@ -127,7 +127,7 @@ vault_pki_mounts: roles: - name: etcd group: etcd - password: "{{ lookup('password', inventory_dir + '/credentials/vault/etcd length=15') }}" + password: "{{ lookup('password', inventory_dir + '/credentials/vault/etcd.creds length=15') }}" policy_rules: default role_options: allow_any_name: true @@ -142,7 +142,7 @@ vault_pki_mounts: roles: - name: kube-master group: kube-master - password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-master length=15') }}" + password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-master.creds length=15') }}" policy_rules: default role_options: allow_any_name: true @@ -150,7 +150,7 @@ vault_pki_mounts: organization: "system:masters" - name: kube-node group: k8s-cluster - password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-node length=15') }}" + password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-node.creds length=15') }}" policy_rules: default role_options: allow_any_name: true @@ -158,7 +158,7 @@ vault_pki_mounts: organization: "system:nodes" - name: kube-proxy group: k8s-cluster - password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-proxy length=15') }}" + password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-proxy.creds length=15') }}" policy_rules: default role_options: allow_any_name: true @@ -166,7 +166,7 @@ vault_pki_mounts: organization: "system:node-proxier" - name: front-proxy-client group: k8s-cluster - password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-proxy length=15') }}" + password: "{{ lookup('password', inventory_dir + '/credentials/vault/kube-proxy.creds length=15') }}" policy_rules: default role_options: allow_any_name: true diff --git a/tests/testcases/010_check-apiserver.yml b/tests/testcases/010_check-apiserver.yml index de5e3a84a..68ea2e35d 100644 --- a/tests/testcases/010_check-apiserver.yml +++ b/tests/testcases/010_check-apiserver.yml @@ -6,7 +6,7 @@ uri: url: "https://{{ access_ip | default(ansible_default_ipv4.address) }}:{{ kube_apiserver_port }}/api/v1" user: kube - password: "{{ lookup('password', inventory_dir + '/credentials/kube_user length=15 chars=ascii_letters,digits') }}" + password: "{{ lookup('password', inventory_dir + '/credentials/kube_user.creds length=15 chars=ascii_letters,digits') }}" validate_certs: no status_code: 200,401 when: not kubeadm_enabled|default(false) From 8b71ef8ceb46bcc93ad547f4ccfd452c53d40bee Mon Sep 17 00:00:00 2001 From: Erwan Miran Date: Wed, 21 Mar 2018 09:19:05 +0100 Subject: [PATCH 105/177] Labels from role (node-role.k8s.io/node) and labels from inventory are merged into node-labels parameter in kubelet --- docs/vars.md | 2 ++ .../node/templates/kubelet.standard.env.j2 | 16 ++++++++++++---- tests/ansible.cfg | 1 + 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/docs/vars.md b/docs/vars.md index 5ea76b0e5..f4956c882 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -118,6 +118,8 @@ Stack](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/dns-st * *kubelet_cgroup_driver* - Allows manual override of the cgroup-driver option for Kubelet. By default autodetection is used to match Docker configuration. +* *node_labels* - Labels applied to nodes via kubelet --node-labels parameter. + For example, labels can be set in the inventory as variables or more widely in group_vars ##### Custom flags for Kube Components For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. This can be done by providing a list of flags. Example: diff --git a/roles/kubernetes/node/templates/kubelet.standard.env.j2 b/roles/kubernetes/node/templates/kubelet.standard.env.j2 index d33adfba7..05874a5de 100644 --- a/roles/kubernetes/node/templates/kubelet.standard.env.j2 +++ b/roles/kubernetes/node/templates/kubelet.standard.env.j2 @@ -81,16 +81,24 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" {% endif %} {# Kubelet node labels #} +{% set role_node_labels = [] %} {% if inventory_hostname in groups['kube-master'] %} -{% set node_labels %}--node-labels=node-role.kubernetes.io/master=true{% endset %} +{% do role_node_labels.append('node-role.kubernetes.io/master=true') %} {% if not standalone_kubelet|bool %} -{% set node_labels %}{{ node_labels }},node-role.kubernetes.io/node=true{% endset %} +{% do role_node_labels.append('node-role.kubernetes.io/node=true') %} {% endif %} {% else %} -{% set node_labels %}--node-labels=node-role.kubernetes.io/node=true{% endset %} +{% do role_node_labels.append('node-role.kubernetes.io/node=true') %} {% endif %} +{% set inventory_node_labels = [] %} +{% if node_labels is defined %} +{% for labelname, labelvalue in node_labels.iteritems() %} +{% do inventory_node_labels.append(labelname + '=' + labelvalue) %} +{% endfor %} +{% endif %} +{% set all_node_labels = role_node_labels + inventory_node_labels %} -KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ kube_reserved }} {{ node_labels }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}" +KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ kube_reserved }} --node-labels={{ all_node_labels | join(',') }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}" {% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave", "contiv", "cilium"] %} KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin" {% elif kube_network_plugin is defined and kube_network_plugin == "weave" %} diff --git a/tests/ansible.cfg b/tests/ansible.cfg index 9e734403e..9c4057529 100644 --- a/tests/ansible.cfg +++ b/tests/ansible.cfg @@ -10,3 +10,4 @@ fact_caching_connection = /tmp stdout_callback = skippy library = ./library:../library callback_whitelist = profile_tasks +jinja2_extensions = jinja2.ext.do From d3780e181ef8cec712b843f0db4a8997c226fe03 Mon Sep 17 00:00:00 2001 From: woopstar Date: Wed, 21 Mar 2018 23:27:13 +0100 Subject: [PATCH 106/177] Switch hyperkube from CoreOS to Google --- roles/download/defaults/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index a9c767dc0..10522d9ec 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -70,8 +70,8 @@ calico_policy_image_repo: "quay.io/calico/kube-controllers" calico_policy_image_tag: "{{ calico_policy_version }}" calico_rr_image_repo: "quay.io/calico/routereflector" calico_rr_image_tag: "{{ calico_rr_version }}" -hyperkube_image_repo: "quay.io/coreos/hyperkube" -hyperkube_image_tag: "{{ kube_version }}_coreos.0" +hyperkube_image_repo: "gcr.io/google-containers/hyperkube" +hyperkube_image_tag: "v{{ kube_version }}" pod_infra_image_repo: "gcr.io/google_containers/pause-amd64" pod_infra_image_tag: "{{ pod_infra_version }}" install_socat_image_repo: "xueshanf/install-socat" From 405c711edb76392ca2cedff6ca392f95cd51bc8c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kr=C3=BCger?= Date: Thu, 22 Mar 2018 09:07:28 +0100 Subject: [PATCH 107/177] Remove v in tag --- roles/download/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 10522d9ec..1cfdd3e93 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -71,7 +71,7 @@ calico_policy_image_tag: "{{ calico_policy_version }}" calico_rr_image_repo: "quay.io/calico/routereflector" calico_rr_image_tag: "{{ calico_rr_version }}" hyperkube_image_repo: "gcr.io/google-containers/hyperkube" -hyperkube_image_tag: "v{{ kube_version }}" +hyperkube_image_tag: "{{ kube_version }}" pod_infra_image_repo: "gcr.io/google_containers/pause-amd64" pod_infra_image_tag: "{{ pod_infra_version }}" install_socat_image_repo: "xueshanf/install-socat" From 6ac7840195c072975e7c37c8b7484e3a3ba69df5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kr=C3=BCger?= Date: Thu, 22 Mar 2018 11:59:58 +0100 Subject: [PATCH 108/177] Update to correct versions in README Some of the versions does not match. Fix this. --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 83fd78faf..d8cf41a4d 100644 --- a/README.md +++ b/README.md @@ -79,11 +79,11 @@ Versions of supported components - [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.5 - [etcd](https://github.com/coreos/etcd/releases) v3.2.4 - [flanneld](https://github.com/coreos/flannel/releases) v0.10.0 -- [calico](https://docs.projectcalico.org/v2.5/releases/) v2.5.0 +- [calico](https://docs.projectcalico.org/v2.5/releases/) v2.6.2 - [canal](https://github.com/projectcalico/canal) (given calico/flannel versions) -- [cilium](https://github.com/cilium/cilium) v1.0.0-rc4 -- [contiv](https://github.com/contiv/install/releases) v1.0.3 -- [weave](http://weave.works/) v2.0.1 +- [cilium](https://github.com/cilium/cilium) v1.0.0-rc8 +- [contiv](https://github.com/contiv/install/releases) v1.1.7 +- [weave](http://weave.works/) v2.2.0 - [docker](https://www.docker.com/) v17.03 (see note) - [rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2) From f8ebd08e7542d152d684d98e9a1d140cfc3c7695 Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Fri, 9 Feb 2018 15:47:57 +0800 Subject: [PATCH 109/177] Registry Addon Fixup --- inventory/sample/group_vars/k8s-cluster.yml | 3 + roles/kubernetes-apps/registry/README.md | 89 +++++++------- .../registry/defaults/main.yml | 4 + .../registry/files/images/Dockerfile | 26 ---- .../registry/files/images/Makefile | 24 ---- .../registry/files/images/rootfs/bin/boot | 23 ---- .../rootfs/etc/nginx/conf.d/default.conf.in | 28 ----- .../rootfs/etc/nginx/docker-registry.conf | 6 - .../files/images/rootfs/etc/nginx/nginx.conf | 26 ---- roles/kubernetes-apps/registry/tasks/main.yml | 37 +++++- .../registry/templates/auth/README.md | 92 -------------- .../templates/auth/registry-auth-rc.yml.j2 | 56 --------- .../registry/templates/gcs/README.md | 81 ------------ .../templates/gcs/registry-gcs-rc.yml.j2 | 52 -------- .../registry/templates/registry-ns.yml.j2 | 5 + ...try-ds.yml.j2 => registry-proxy-ds.yml.j2} | 21 ++-- .../registry/templates/registry-pv.yml.j2 | 17 --- .../registry/templates/registry-pvc.yml.j2 | 10 +- ...{registry-rc.yml.j2 => registry-rs.yml.j2} | 27 ++-- .../registry/templates/registry-svc.yml.j2 | 14 +-- .../registry/templates/tls/README.md | 116 ------------------ .../templates/tls/registry-tls-rc.yml.j2 | 57 --------- .../templates/tls/registry-tls-svc.yml.j2 | 17 --- 23 files changed, 135 insertions(+), 696 deletions(-) delete mode 100644 roles/kubernetes-apps/registry/files/images/Dockerfile delete mode 100644 roles/kubernetes-apps/registry/files/images/Makefile delete mode 100755 roles/kubernetes-apps/registry/files/images/rootfs/bin/boot delete mode 100644 roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/conf.d/default.conf.in delete mode 100644 roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/docker-registry.conf delete mode 100644 roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/nginx.conf delete mode 100644 roles/kubernetes-apps/registry/templates/auth/README.md delete mode 100644 roles/kubernetes-apps/registry/templates/auth/registry-auth-rc.yml.j2 delete mode 100644 roles/kubernetes-apps/registry/templates/gcs/README.md delete mode 100644 roles/kubernetes-apps/registry/templates/gcs/registry-gcs-rc.yml.j2 create mode 100644 roles/kubernetes-apps/registry/templates/registry-ns.yml.j2 rename roles/kubernetes-apps/registry/templates/{registry-ds.yml.j2 => registry-proxy-ds.yml.j2} (56%) delete mode 100644 roles/kubernetes-apps/registry/templates/registry-pv.yml.j2 rename roles/kubernetes-apps/registry/templates/{registry-rc.yml.j2 => registry-rs.yml.j2} (62%) delete mode 100644 roles/kubernetes-apps/registry/templates/tls/README.md delete mode 100644 roles/kubernetes-apps/registry/templates/tls/registry-tls-rc.yml.j2 delete mode 100644 roles/kubernetes-apps/registry/templates/tls/registry-tls-svc.yml.j2 diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml index 8808aecfe..32b872ab5 100644 --- a/inventory/sample/group_vars/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster.yml @@ -171,6 +171,9 @@ istio_enabled: false # Registry deployment registry_enabled: false +# registry_namespace: "{{ system_namespace }}" +# registry_storage_class: "" +# registry_disk_size: "10Gi" # Local volume provisioner deployment local_volume_provisioner_enabled: false diff --git a/roles/kubernetes-apps/registry/README.md b/roles/kubernetes-apps/registry/README.md index 59542355e..81615631e 100644 --- a/roles/kubernetes-apps/registry/README.md +++ b/roles/kubernetes-apps/registry/README.md @@ -1,36 +1,39 @@ -# Private Docker Registry in Kubernetes +Private Docker Registry in Kubernetes +===================================== Kubernetes offers an optional private Docker registry addon, which you can turn -on when you bring up a cluster or install later. This gives you a place to +on when you bring up a cluster or install later. This gives you a place to store truly private Docker images for your cluster. -## How it works +How it works +------------ -The private registry runs as a `Pod` in your cluster. It does not currently +The private registry runs as a `Pod` in your cluster. It does not currently support SSL or authentication, which triggers Docker's "insecure registry" -logic. To work around this, we run a proxy on each node in the cluster, +logic. To work around this, we run a proxy on each node in the cluster, exposing a port onto the node (via a hostPort), which Docker accepts as "secure", since it is accessed by `localhost`. -## Turning it on +Turning it on +------------- -Some cluster installs (e.g. GCE) support this as a cluster-birth flag. The +Some cluster installs (e.g. GCE) support this as a cluster-birth flag. The `ENABLE_CLUSTER_REGISTRY` variable in `cluster/gce/config-default.sh` governs -whether the registry is run or not. To set this flag, you can specify -`KUBE_ENABLE_CLUSTER_REGISTRY=true` when running `kube-up.sh`. If your cluster -does not include this flag, the following steps should work. Note that some of +whether the registry is run or not. To set this flag, you can specify +`KUBE_ENABLE_CLUSTER_REGISTRY=true` when running `kube-up.sh`. If your cluster +does not include this flag, the following steps should work. Note that some of this is cloud-provider specific, so you may have to customize it a bit. ### Make some storage -The primary job of the registry is to store data. To do that we have to decide -where to store it. For cloud environments that have networked storage, we can -use Kubernetes's `PersistentVolume` abstraction. The following template is +The primary job of the registry is to store data. To do that we have to decide +where to store it. For cloud environments that have networked storage, we can +use Kubernetes's `PersistentVolume` abstraction. The following template is expanded by `salt` in the GCE cluster turnup, but can easily be adapted to other situations: -```yaml +``` yaml kind: PersistentVolume apiVersion: v1 metadata: @@ -64,14 +67,15 @@ just want to kick the tires on this without committing to it, you can easily adapt the `ReplicationController` specification below to use a simple `emptyDir` volume instead of a `persistentVolumeClaim`. -## Claim the storage +Claim the storage +----------------- Now that the Kubernetes cluster knows that some storage exists, you can put a -claim on that storage. As with the `PersistentVolume` above, you can start +claim on that storage. As with the `PersistentVolume` above, you can start with the `salt` template: -```yaml +``` yaml kind: PersistentVolumeClaim apiVersion: v1 metadata: @@ -90,15 +94,16 @@ spec: This tells Kubernetes that you want to use storage, and the `PersistentVolume` you created before will be bound to this claim (unless you have other -`PersistentVolumes` in which case those might get bound instead). This claim +`PersistentVolumes` in which case those might get bound instead). This claim gives you the right to use this storage until you release the claim. -## Run the registry +Run the registry +---------------- Now we can run a Docker registry: -```yaml +``` yaml apiVersion: v1 kind: ReplicationController metadata: @@ -146,12 +151,13 @@ spec: ``` -## Expose the registry in the cluster +Expose the registry in the cluster +---------------------------------- Now that we have a registry `Pod` running, we can expose it as a Service: -```yaml +``` yaml apiVersion: v1 kind: Service metadata: @@ -171,14 +177,15 @@ spec: ``` -## Expose the registry on each node +Expose the registry on each node +-------------------------------- Now that we have a running `Service`, we need to expose it onto each Kubernetes -`Node` so that Docker will see it as `localhost`. We can load a `Pod` on every +`Node` so that Docker will see it as `localhost`. We can load a `Pod` on every node by creating following daemonset. -```yaml +``` yaml apiVersion: extensions/v1beta1 kind: DaemonSet metadata: @@ -217,7 +224,7 @@ spec: When modifying replication-controller, service and daemon-set defintions, take -care to ensure _unique_ identifiers for the rc-svc couple and the daemon-set. +care to ensure *unique* identifiers for the rc-svc couple and the daemon-set. Failing to do so will have register the localhost proxy daemon-sets to the upstream service. As a result they will then try to proxy themselves, which will, for obvious reasons, not work. @@ -226,29 +233,30 @@ This ensures that port 5000 on each node is directed to the registry `Service`. You should be able to verify that it is running by hitting port 5000 with a web browser and getting a 404 error: -```console +``` console $ curl localhost:5000 404 page not found ``` -## Using the registry +Using the registry +------------------ To use an image hosted by this registry, simply say this in your `Pod`'s `spec.containers[].image` field: -```yaml +``` yaml image: localhost:5000/user/container ``` Before you can use the registry, you have to be able to get images into it, -though. If you are building an image on your Kubernetes `Node`, you can spell -out `localhost:5000` when you build and push. More likely, though, you are +though. If you are building an image on your Kubernetes `Node`, you can spell +out `localhost:5000` when you build and push. More likely, though, you are building locally and want to push to your cluster. You can use `kubectl` to set up a port-forward from your local node to a running Pod: -```console +``` console $ POD=$(kubectl get pods --namespace kube-system -l k8s-app=kube-registry-upstream \ -o template --template '{{range .items}}{{.metadata.name}} {{.status.phase}}{{"\n"}}{{end}}' \ | grep Running | head -1 | cut -f1 -d' ') @@ -260,15 +268,14 @@ Now you can build and push images on your local computer as `localhost:5000/yourname/container` and those images will be available inside your kubernetes cluster with the same name. -# More Extensions +More Extensions +=============== -- [Use GCS as storage backend](gcs/README.md) -- [Enable TLS/SSL](tls/README.md) -- [Enable Authentication](auth/README.md) +- [Use GCS as storage backend](gcs/README.md) +- [Enable TLS/SSL](tls/README.md) +- [Enable Authentication](auth/README.md) -## Future improvements +Future improvements +------------------- -* Allow port-forwarding to a Service rather than a pod (#15180) - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/README.md?pixel)]() +- Allow port-forwarding to a Service rather than a pod (\#15180) diff --git a/roles/kubernetes-apps/registry/defaults/main.yml b/roles/kubernetes-apps/registry/defaults/main.yml index d13290b3b..93d1cfa2a 100644 --- a/roles/kubernetes-apps/registry/defaults/main.yml +++ b/roles/kubernetes-apps/registry/defaults/main.yml @@ -3,3 +3,7 @@ registry_image_repo: registry registry_image_tag: 2.6 registry_proxy_image_repo: gcr.io/google_containers/kube-registry-proxy registry_proxy_image_tag: 0.4 + +registry_namespace: "{{ system_namespace }}" +registry_storage_class: "" +registry_disk_size: "10Gi" diff --git a/roles/kubernetes-apps/registry/files/images/Dockerfile b/roles/kubernetes-apps/registry/files/images/Dockerfile deleted file mode 100644 index 4223025a8..000000000 --- a/roles/kubernetes-apps/registry/files/images/Dockerfile +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2016 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FROM nginx:1.12 - -RUN apt-get update \ - && apt-get install -y \ - curl \ - --no-install-recommends \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* /usr/share/man /usr/share/doc - -COPY rootfs / - -CMD ["/bin/boot"] diff --git a/roles/kubernetes-apps/registry/files/images/Makefile b/roles/kubernetes-apps/registry/files/images/Makefile deleted file mode 100644 index c1b64de1c..000000000 --- a/roles/kubernetes-apps/registry/files/images/Makefile +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright 2016 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -.PHONY: build push vet test clean - -TAG = 0.4 -REPO = gcr.io/google_containers/kube-registry-proxy - -build: - docker build --pull -t $(REPO):$(TAG) . - -push: - gcloud docker -- push $(REPO):$(TAG) diff --git a/roles/kubernetes-apps/registry/files/images/rootfs/bin/boot b/roles/kubernetes-apps/registry/files/images/rootfs/bin/boot deleted file mode 100755 index 04262b464..000000000 --- a/roles/kubernetes-apps/registry/files/images/rootfs/bin/boot +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash - -# fail if no hostname is provided -REGISTRY_HOST=${REGISTRY_HOST:?no host} -REGISTRY_PORT=${REGISTRY_PORT:-5000} - -# we are always listening on port 80 -# https://github.com/nginxinc/docker-nginx/blob/43c112100750cbd1e9f2160324c64988e7920ac9/stable/jessie/Dockerfile#L25 -PORT=80 - -sed -e "s/%HOST%/$REGISTRY_HOST/g" \ - -e "s/%PORT%/$REGISTRY_PORT/g" \ - -e "s/%BIND_PORT%/$PORT/g" \ - /etc/nginx/conf.d/default.conf - -# wait for registry to come online -while ! curl -sS "$REGISTRY_HOST:$REGISTRY_PORT" &>/dev/null; do - printf "waiting for the registry (%s:%s) to come online...\n" "$REGISTRY_HOST" "$REGISTRY_PORT" - sleep 1 -done - -printf "starting proxy...\n" -exec nginx -g "daemon off;" "$@" diff --git a/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/conf.d/default.conf.in b/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/conf.d/default.conf.in deleted file mode 100644 index ecd95fd2f..000000000 --- a/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/conf.d/default.conf.in +++ /dev/null @@ -1,28 +0,0 @@ -# Docker registry proxy for api version 2 - -upstream docker-registry { - server %HOST%:%PORT%; -} - -# No client auth or TLS -# TODO(bacongobbler): experiment with authenticating the registry if it's using TLS -server { - listen %BIND_PORT%; - server_name localhost; - - # disable any limits to avoid HTTP 413 for large image uploads - client_max_body_size 0; - - # required to avoid HTTP 411: see Issue #1486 (https://github.com/docker/docker/issues/1486) - chunked_transfer_encoding on; - - location / { - # Do not allow connections from docker 1.5 and earlier - # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents - if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) { - return 404; - } - - include docker-registry.conf; - } -} diff --git a/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/docker-registry.conf b/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/docker-registry.conf deleted file mode 100644 index 7dc8cfff2..000000000 --- a/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/docker-registry.conf +++ /dev/null @@ -1,6 +0,0 @@ -proxy_pass http://docker-registry; -proxy_set_header Host $http_host; # required for docker client's sake -proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP -proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; -proxy_set_header X-Forwarded-Proto $scheme; -proxy_read_timeout 900; diff --git a/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/nginx.conf b/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/nginx.conf deleted file mode 100644 index 54ecc888e..000000000 --- a/roles/kubernetes-apps/registry/files/images/rootfs/etc/nginx/nginx.conf +++ /dev/null @@ -1,26 +0,0 @@ -user nginx; -worker_processes auto; - -error_log /var/log/nginx/error.log warn; -pid /var/run/nginx.pid; - -events { - worker_connections 1024; -} - -http { - include /etc/nginx/mime.types; - default_type application/octet-stream; - - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /var/log/nginx/access.log main; - - sendfile on; - - keepalive_timeout 65; - - include /etc/nginx/conf.d/*.conf; -} diff --git a/roles/kubernetes-apps/registry/tasks/main.yml b/roles/kubernetes-apps/registry/tasks/main.yml index a236d273c..a175064ee 100644 --- a/roles/kubernetes-apps/registry/tasks/main.yml +++ b/roles/kubernetes-apps/registry/tasks/main.yml @@ -3,29 +3,56 @@ - name: Registry | Create addon dir file: path: "{{ kube_config_dir }}/addons/registry" + state: directory owner: root group: root mode: 0755 - recurse: true - name: Registry | Create manifests template: src: "{{ item.file }}.j2" dest: "{{ kube_config_dir }}/addons/registry/{{ item.file }}" with_items: - - { name: registry-svc, file: registry-svc.yml, type: service } - - { name: registry-rc, file: registry-rc.yml, type: replicationcontroller } - - { name: registry-ds, file: registry-ds.yml, type: daemonset } + - { name: registry-ns, file: registry-ns.yml, type: ns } + - { name: registry-svc, file: registry-svc.yml, type: svc } + - { name: registry-rs, file: registry-rs.yml, type: rs } + - { name: registry-proxy-ds, file: registry-proxy-ds.yml, type: ds } register: registry_manifests when: inventory_hostname == groups['kube-master'][0] - name: Registry | Apply manifests kube: name: "{{ item.item.name }}" - namespace: "{{ system_namespace }}" + namespace: "{{ registry_namespace }}" kubectl: "{{ bin_dir }}/kubectl" resource: "{{ item.item.type }}" filename: "{{ kube_config_dir }}/addons/registry/{{ item.item.file }}" state: "latest" with_items: "{{ registry_manifests.results }}" when: inventory_hostname == groups['kube-master'][0] + +- name: Registry | Create PVC manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/registry/{{ item.file }}" + with_items: + - { name: registry-pvc, file: registry-pvc.yml, type: pvc } + register: registry_manifests + when: + - registry_storage_class != none + - registry_disk_size != none + - inventory_hostname == groups['kube-master'][0] + +- name: Registry | Apply PVC manifests + kube: + name: "{{ item.item.name }}" + namespace: "{{ registry_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/registry/{{ item.item.file }}" + state: "latest" + with_items: "{{ registry_manifests.results }}" + when: + - registry_storage_class != none + - registry_disk_size != none + - inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-apps/registry/templates/auth/README.md b/roles/kubernetes-apps/registry/templates/auth/README.md deleted file mode 100644 index 040c54bcb..000000000 --- a/roles/kubernetes-apps/registry/templates/auth/README.md +++ /dev/null @@ -1,92 +0,0 @@ -# Enable Authentication with Htpasswd for Kube-Registry - -Docker registry support a few authentication providers. Full list of supported provider can be found [here](https://docs.docker.com/registry/configuration/#auth). This document describes how to enable authentication with htpasswd for kube-registry. - -### Prepare Htpasswd Secret - -Please generate your own htpasswd file. Assuming the file you generated is `htpasswd`. -Creating secret to hold htpasswd... -```console -$ kubectl --namespace=kube-system create secret generic registry-auth-secret --from-file=htpasswd=htpasswd -``` - -### Run Registry - -Please be noted that this sample rc is using emptyDir as storage backend for simplicity. - - -```yaml -apiVersion: v1 -kind: ReplicationController -metadata: - name: kube-registry-v0 - namespace: kube-system - labels: - k8s-app: kube-registry - version: v0 -# kubernetes.io/cluster-service: "true" -spec: - replicas: 1 - selector: - k8s-app: kube-registry - version: v0 - template: - metadata: - labels: - k8s-app: kube-registry - version: v0 -# kubernetes.io/cluster-service: "true" - spec: - containers: - - name: registry - image: registry:2 - resources: - # keep request = limit to keep this container in guaranteed class - limits: - cpu: 100m - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi - env: - - name: REGISTRY_HTTP_ADDR - value: :5000 - - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY - value: /var/lib/registry - - name: REGISTRY_AUTH_HTPASSWD_REALM - value: basic_realm - - name: REGISTRY_AUTH_HTPASSWD_PATH - value: /auth/htpasswd - volumeMounts: - - name: image-store - mountPath: /var/lib/registry - - name: auth-dir - mountPath: /auth - ports: - - containerPort: 5000 - name: registry - protocol: TCP - volumes: - - name: image-store - emptyDir: {} - - name: auth-dir - secret: - secretName: registry-auth-secret -``` - - -No changes are needed for other components (kube-registry service and proxy). - -### To Verify - -Setup proxy or port-forwarding to the kube-registry. Image push/pull should fail without authentication. Then use `docker login` to authenticate with kube-registry and see if it works. - -### Configure Nodes to Authenticate with Kube-Registry - -By default, nodes assume no authentication is required by kube-registry. Without authentication, nodes cannot pull images from kube-registry. To solve this, more documentation can be found [Here](https://github.com/kubernetes/kubernetes.github.io/blob/master/docs/concepts/containers/images.md#configuring-nodes-to-authenticate-to-a-private-repository). - - - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/auth/README.md?pixel)]() diff --git a/roles/kubernetes-apps/registry/templates/auth/registry-auth-rc.yml.j2 b/roles/kubernetes-apps/registry/templates/auth/registry-auth-rc.yml.j2 deleted file mode 100644 index 1af623d09..000000000 --- a/roles/kubernetes-apps/registry/templates/auth/registry-auth-rc.yml.j2 +++ /dev/null @@ -1,56 +0,0 @@ -apiVersion: v1 -kind: ReplicationController -metadata: - name: kube-registry-v0 - namespace: kube-system - labels: - k8s-app: kube-registry - version: v0 -# kubernetes.io/cluster-service: "true" -spec: - replicas: 1 - selector: - k8s-app: kube-registry - version: v0 - template: - metadata: - labels: - k8s-app: kube-registry - version: v0 -# kubernetes.io/cluster-service: "true" - spec: - containers: - - name: registry - image: registry:2 - resources: - # keep request = limit to keep this container in guaranteed class - limits: - cpu: 100m - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi - env: - - name: REGISTRY_HTTP_ADDR - value: :5000 - - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY - value: /var/lib/registry - - name: REGISTRY_AUTH_HTPASSWD_REALM - value: basic_realm - - name: REGISTRY_AUTH_HTPASSWD_PATH - value: /auth/htpasswd - volumeMounts: - - name: image-store - mountPath: /var/lib/registry - - name: auth-dir - mountPath: /auth - ports: - - containerPort: 5000 - name: registry - protocol: TCP - volumes: - - name: image-store - emptyDir: {} - - name: auth-dir - secret: - secretName: registry-auth-secret \ No newline at end of file diff --git a/roles/kubernetes-apps/registry/templates/gcs/README.md b/roles/kubernetes-apps/registry/templates/gcs/README.md deleted file mode 100644 index 5706a848f..000000000 --- a/roles/kubernetes-apps/registry/templates/gcs/README.md +++ /dev/null @@ -1,81 +0,0 @@ -# Kube-Registry with GCS storage backend - -Besides local file system, docker registry also supports a number of cloud storage backends. Full list of supported backend can be found [here](https://docs.docker.com/registry/configuration/#storage). This document describes how to enable GCS for kube-registry as storage backend. - -A few preparation steps are needed. - 1. Create a bucket named kube-registry in GCS. - 1. Create a service account for GCS access and create key file in json format. Detail instruction can be found [here](https://cloud.google.com/storage/docs/authentication#service_accounts). - - -### Pack Keyfile into a Secret - -Assuming you have downloaded the keyfile as `keyfile.json`. Create secret with the `keyfile.json`... -```console -$ kubectl --namespace=kube-system create secret generic gcs-key-secret --from-file=keyfile=keyfile.json -``` - - -### Run Registry - - -```yaml -apiVersion: v1 -kind: ReplicationController -metadata: - name: kube-registry-v0 - namespace: kube-system - labels: - k8s-app: kube-registry - version: v0 -# kubernetes.io/cluster-service: "true" -spec: - replicas: 1 - selector: - k8s-app: kube-registry - version: v0 - template: - metadata: - labels: - k8s-app: kube-registry - version: v0 -# kubernetes.io/cluster-service: "true" - spec: - containers: - - name: registry - image: registry:2 - resources: - # keep request = limit to keep this container in guaranteed class - limits: - cpu: 100m - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi - env: - - name: REGISTRY_HTTP_ADDR - value: :5000 - - name: REGISTRY_STORAGE - value: gcs - - name: REGISTRY_STORAGE_GCS_BUCKET - value: kube-registry - - name: REGISTRY_STORAGE_GCS_KEYFILE - value: /gcs/keyfile - ports: - - containerPort: 5000 - name: registry - protocol: TCP - volumeMounts: - - name: gcs-key - mountPath: /gcs - volumes: - - name: gcs-key - secret: - secretName: gcs-key-secret -``` - - - -No changes are needed for other components (kube-registry service and proxy). - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/gcs/README.md?pixel)]() diff --git a/roles/kubernetes-apps/registry/templates/gcs/registry-gcs-rc.yml.j2 b/roles/kubernetes-apps/registry/templates/gcs/registry-gcs-rc.yml.j2 deleted file mode 100644 index e69740335..000000000 --- a/roles/kubernetes-apps/registry/templates/gcs/registry-gcs-rc.yml.j2 +++ /dev/null @@ -1,52 +0,0 @@ -apiVersion: v1 -kind: ReplicationController -metadata: - name: kube-registry-v0 - namespace: kube-system - labels: - k8s-app: kube-registry - version: v0 -# kubernetes.io/cluster-service: "true" -spec: - replicas: 1 - selector: - k8s-app: kube-registry - version: v0 - template: - metadata: - labels: - k8s-app: kube-registry - version: v0 -# kubernetes.io/cluster-service: "true" - spec: - containers: - - name: registry - image: registry:2 - resources: - # keep request = limit to keep this container in guaranteed class - limits: - cpu: 100m - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi - env: - - name: REGISTRY_HTTP_ADDR - value: :5000 - - name: REGISTRY_STORAGE - value: gcs - - name: REGISTRY_STORAGE_GCS_BUCKET - value: kube-registry - - name: REGISTRY_STORAGE_GCS_KEYFILE - value: /gcs/keyfile - ports: - - containerPort: 5000 - name: registry - protocol: TCP - volumeMounts: - - name: gcs-key - mountPath: /gcs - volumes: - - name: gcs-key - secret: - secretName: gcs-key-secret diff --git a/roles/kubernetes-apps/registry/templates/registry-ns.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-ns.yml.j2 new file mode 100644 index 000000000..bb1b209e9 --- /dev/null +++ b/roles/kubernetes-apps/registry/templates/registry-ns.yml.j2 @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ registry_namespace }} diff --git a/roles/kubernetes-apps/registry/templates/registry-ds.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-proxy-ds.yml.j2 similarity index 56% rename from roles/kubernetes-apps/registry/templates/registry-ds.yml.j2 rename to roles/kubernetes-apps/registry/templates/registry-proxy-ds.yml.j2 index 4d6a7800b..de55d3018 100644 --- a/roles/kubernetes-apps/registry/templates/registry-ds.yml.j2 +++ b/roles/kubernetes-apps/registry/templates/registry-proxy-ds.yml.j2 @@ -1,28 +1,33 @@ --- -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: DaemonSet metadata: - name: kube-registry-proxy - namespace: {{ system_namespace }} + name: registry-proxy + namespace: {{ registry_namespace }} labels: - k8s-app: kube-registry-proxy + k8s-app: registry-proxy kubernetes.io/cluster-service: "true" version: v{{ registry_proxy_image_tag }} spec: + selector: + matchLabels: + k8s-app: registry-proxy + version: v{{ registry_proxy_image_tag }} template: metadata: labels: - k8s-app: kube-registry-proxy - kubernetes.io/name: "kube-registry-proxy" + k8s-app: registry-proxy + kubernetes.io/name: "registry-proxy" kubernetes.io/cluster-service: "true" version: v{{ registry_proxy_image_tag }} spec: containers: - - name: kube-registry-proxy + - name: registry-proxy image: {{ registry_proxy_image_repo }}:{{ registry_proxy_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} env: - name: REGISTRY_HOST - value: kube-registry.kube-system.svc.cluster.local + value: registry.{{ registry_namespace }}.svc.cluster.local - name: REGISTRY_PORT value: "5000" ports: diff --git a/roles/kubernetes-apps/registry/templates/registry-pv.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-pv.yml.j2 deleted file mode 100644 index 196efa928..000000000 --- a/roles/kubernetes-apps/registry/templates/registry-pv.yml.j2 +++ /dev/null @@ -1,17 +0,0 @@ -kind: PersistentVolume -apiVersion: v1 -metadata: - name: kube-system-kube-registry-pv - labels: - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile -spec: -{% if pillar.get('cluster_registry_disk_type', '') == 'gce' %} - capacity: - storage: {{ pillar['cluster_registry_disk_size'] }} - accessModes: - - ReadWriteOnce - gcePersistentDisk: - pdName: "{{ pillar['cluster_registry_disk_name'] }}" - fsType: "ext4" -{% endif %} diff --git a/roles/kubernetes-apps/registry/templates/registry-pvc.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-pvc.yml.j2 index 35c787177..0db26db96 100644 --- a/roles/kubernetes-apps/registry/templates/registry-pvc.yml.j2 +++ b/roles/kubernetes-apps/registry/templates/registry-pvc.yml.j2 @@ -1,14 +1,16 @@ -kind: PersistentVolumeClaim +--- apiVersion: v1 +kind: PersistentVolumeClaim metadata: - name: kube-registry-pvc - namespace: kube-system + name: registry-pvc + namespace: {{ registry_namespace }} labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile spec: accessModes: - ReadWriteOnce + storageClassName: {{ registry_storage_class }} resources: requests: - storage: {{ pillar['cluster_registry_disk_size'] }} + storage: {{ registry_disk_size }} diff --git a/roles/kubernetes-apps/registry/templates/registry-rc.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-rs.yml.j2 similarity index 62% rename from roles/kubernetes-apps/registry/templates/registry-rc.yml.j2 rename to roles/kubernetes-apps/registry/templates/registry-rs.yml.j2 index 90c01c4aa..730ce272b 100644 --- a/roles/kubernetes-apps/registry/templates/registry-rc.yml.j2 +++ b/roles/kubernetes-apps/registry/templates/registry-rs.yml.j2 @@ -1,41 +1,48 @@ --- -apiVersion: v1 -kind: ReplicationController +apiVersion: apps/v1 +kind: ReplicaSet metadata: - name: kube-registry-v{{ registry_image_tag }} - namespace: {{ system_namespace }} + name: registry-v{{ registry_image_tag }} + namespace: {{ registry_namespace }} labels: - k8s-app: kube-registry-upstream + k8s-app: registry version: v{{ registry_image_tag }} kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile spec: replicas: 1 selector: - k8s-app: kube-registry-upstream - version: v{{ registry_image_tag }} + matchLabels: + k8s-app: registry + version: v{{ registry_image_tag }} template: metadata: labels: - k8s-app: kube-registry-upstream + k8s-app: registry version: v{{ registry_image_tag }} kubernetes.io/cluster-service: "true" spec: containers: - name: registry image: {{ registry_image_repo }}:{{ registry_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} env: - name: REGISTRY_HTTP_ADDR value: :5000 - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY value: /var/lib/registry volumeMounts: - - name: image-store + - name: registry-pvc mountPath: /var/lib/registry ports: - containerPort: 5000 name: registry protocol: TCP volumes: - - name: image-store + - name: registry-pvc +{% if registry_storage_class != none %} + persistentVolumeClaim: + claimName: registry-pvc +{% else %} emptyDir: {} +{% endif %} diff --git a/roles/kubernetes-apps/registry/templates/registry-svc.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-svc.yml.j2 index 566962469..58d101d29 100644 --- a/roles/kubernetes-apps/registry/templates/registry-svc.yml.j2 +++ b/roles/kubernetes-apps/registry/templates/registry-svc.yml.j2 @@ -2,17 +2,17 @@ apiVersion: v1 kind: Service metadata: - name: kube-registry - namespace: {{ system_namespace }} + name: registry + namespace: {{ registry_namespace }} labels: - k8s-app: kube-registry-upstream + k8s-app: registry kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "KubeRegistry" spec: selector: - k8s-app: kube-registry-upstream + k8s-app: registry ports: - - name: registry - port: 5000 - protocol: TCP + - name: registry + port: 5000 + protocol: TCP diff --git a/roles/kubernetes-apps/registry/templates/tls/README.md b/roles/kubernetes-apps/registry/templates/tls/README.md deleted file mode 100644 index 7ba5cc628..000000000 --- a/roles/kubernetes-apps/registry/templates/tls/README.md +++ /dev/null @@ -1,116 +0,0 @@ -# Enable TLS for Kube-Registry - -This document describes how to enable TLS for kube-registry. Before you start, please check if you have all the prerequisite: - -- A domain for kube-registry. Assuming it is ` myregistrydomain.com`. -- Domain certificate and key. Assuming they are `domain.crt` and `domain.key` - -### Pack domain.crt and domain.key into a Secret - -```console -$ kubectl --namespace=kube-system create secret generic registry-tls-secret --from-file=domain.crt=domain.crt --from-file=domain.key=domain.key -``` - -### Run Registry - -Please be noted that this sample rc is using emptyDir as storage backend for simplicity. - - -```yaml -apiVersion: v1 -kind: ReplicationController -metadata: - name: kube-registry-v0 - namespace: kube-system - labels: - k8s-app: kube-registry - version: v0 -# kubernetes.io/cluster-service: "true" -spec: - replicas: 1 - selector: - k8s-app: kube-registry - version: v0 - template: - metadata: - labels: - k8s-app: kube-registry - version: v0 -# kubernetes.io/cluster-service: "true" - spec: - containers: - - name: registry - image: registry:2 - resources: - # keep request = limit to keep this container in guaranteed class - limits: - cpu: 100m - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi - env: - - name: REGISTRY_HTTP_ADDR - value: :5000 - - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY - value: /var/lib/registry - - name: REGISTRY_HTTP_TLS_CERTIFICATE - value: /certs/domain.crt - - name: REGISTRY_HTTP_TLS_KEY - value: /certs/domain.key - volumeMounts: - - name: image-store - mountPath: /var/lib/registry - - name: cert-dir - mountPath: /certs - ports: - - containerPort: 5000 - name: registry - protocol: TCP - volumes: - - name: image-store - emptyDir: {} - - name: cert-dir - secret: - secretName: registry-tls-secret -``` - - -### Expose External IP for Kube-Registry - -Modify the default kube-registry service to `LoadBalancer` type and point the DNS record of `myregistrydomain.com` to the service external ip. - - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: kube-registry - namespace: kube-system - labels: - k8s-app: kube-registry -# kubernetes.io/cluster-service: "true" - kubernetes.io/name: "KubeRegistry" -spec: - selector: - k8s-app: kube-registry - type: LoadBalancer - ports: - - name: registry - port: 5000 - protocol: TCP -``` - - -### To Verify - -Now you should be able to access your kube-registry from another docker host. -```console -docker pull busybox -docker tag busybox myregistrydomain.com:5000/busybox -docker push myregistrydomain.com:5000/busybox -docker pull myregistrydomain.com:5000/busybox -``` - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/registry/tls/README.md?pixel)]() diff --git a/roles/kubernetes-apps/registry/templates/tls/registry-tls-rc.yml.j2 b/roles/kubernetes-apps/registry/templates/tls/registry-tls-rc.yml.j2 deleted file mode 100644 index c2411c052..000000000 --- a/roles/kubernetes-apps/registry/templates/tls/registry-tls-rc.yml.j2 +++ /dev/null @@ -1,57 +0,0 @@ -apiVersion: v1 -kind: ReplicationController -metadata: - name: kube-registry-v0 - namespace: kube-system - labels: - k8s-app: kube-registry - version: v0 -# kubernetes.io/cluster-service: "true" -spec: - replicas: 1 - selector: - k8s-app: kube-registry - version: v0 - template: - metadata: - labels: - k8s-app: kube-registry - version: v0 -# kubernetes.io/cluster-service: "true" - spec: - containers: - - name: registry - image: registry:2 - resources: - # keep request = limit to keep this container in guaranteed class - limits: - cpu: 100m - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi - env: - - name: REGISTRY_HTTP_ADDR - value: :5000 - - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY - value: /var/lib/registry - - name: REGISTRY_HTTP_TLS_CERTIFICATE - value: /certs/domain.crt - - name: REGISTRY_HTTP_TLS_KEY - value: /certs/domain.key - volumeMounts: - - name: image-store - mountPath: /var/lib/registry - - name: cert-dir - mountPath: /certs - ports: - - containerPort: 5000 - name: registry - protocol: TCP - volumes: - - name: image-store - emptyDir: {} - - name: cert-dir - secret: - secretName: registry-tls-secret - diff --git a/roles/kubernetes-apps/registry/templates/tls/registry-tls-svc.yml.j2 b/roles/kubernetes-apps/registry/templates/tls/registry-tls-svc.yml.j2 deleted file mode 100644 index a9d59f117..000000000 --- a/roles/kubernetes-apps/registry/templates/tls/registry-tls-svc.yml.j2 +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: kube-registry - namespace: kube-system - labels: - k8s-app: kube-registry -# kubernetes.io/cluster-service: "true" - kubernetes.io/name: "KubeRegistry" -spec: - selector: - k8s-app: kube-registry - type: LoadBalancer - ports: - - name: registry - port: 5000 - protocol: TCP From b0d7115e9b95460afdc9f97f52b0e9c031b9821d Mon Sep 17 00:00:00 2001 From: Keyvan Hedayati Date: Sun, 18 Feb 2018 11:16:03 +0330 Subject: [PATCH 110/177] hswong3i/kubespray#3: Use {{ cluster_name }} for valid FQDN in REGISTRY_HOST --- .../kubernetes-apps/registry/templates/registry-proxy-ds.yml.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes-apps/registry/templates/registry-proxy-ds.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-proxy-ds.yml.j2 index de55d3018..84bf1cf5a 100644 --- a/roles/kubernetes-apps/registry/templates/registry-proxy-ds.yml.j2 +++ b/roles/kubernetes-apps/registry/templates/registry-proxy-ds.yml.j2 @@ -27,7 +27,7 @@ spec: imagePullPolicy: {{ k8s_image_pull_policy }} env: - name: REGISTRY_HOST - value: registry.{{ registry_namespace }}.svc.cluster.local + value: registry.{{ registry_namespace }}.svc.{{ cluster_name }} - name: REGISTRY_PORT value: "5000" ports: From bb1eb9fec813a638d069283bebe83b73e70df612 Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Mon, 5 Mar 2018 23:05:19 +0800 Subject: [PATCH 111/177] Add labels for namespace --- roles/kubernetes-apps/registry/templates/registry-ns.yml.j2 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/kubernetes-apps/registry/templates/registry-ns.yml.j2 b/roles/kubernetes-apps/registry/templates/registry-ns.yml.j2 index bb1b209e9..c224337af 100644 --- a/roles/kubernetes-apps/registry/templates/registry-ns.yml.j2 +++ b/roles/kubernetes-apps/registry/templates/registry-ns.yml.j2 @@ -3,3 +3,5 @@ apiVersion: v1 kind: Namespace metadata: name: {{ registry_namespace }} + labels: + name: {{ registry_namespace }} From 206e24448b7c27756ab83f5a85445900172a9ab2 Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Tue, 13 Feb 2018 09:55:59 +0800 Subject: [PATCH 112/177] CephFS Provisioner Addon Fixup --- .../cephfs_provisioner/README.md | 78 +++++++++++++++++++ .../cephfs_provisioner/defaults/main.yml | 0 .../cephfs_provisioner/tasks/main.yml | 7 +- .../cephfs-provisioner-clusterrole.yml.j2 | 2 +- ...phfs-provisioner-clusterrolebinding.yml.j2 | 0 .../templates/cephfs-provisioner-ns.yml.j2 | 7 ++ .../templates/cephfs-provisioner-role.yml.j2 | 0 .../cephfs-provisioner-rolebinding.yml.j2 | 1 + .../templates/cephfs-provisioner-rs.yml.j2} | 21 +++-- .../templates/cephfs-provisioner-sa.yml.j2 | 0 .../templates/cephfs-provisioner-sc.yml.j2 | 0 .../cephfs-provisioner-secret.yml.j2 | 0 .../external_provisioner/meta/main.yml | 7 ++ roles/kubernetes-apps/meta/main.yml | 7 -- 14 files changed, 113 insertions(+), 17 deletions(-) create mode 100644 roles/kubernetes-apps/external_provisioner/cephfs_provisioner/README.md rename roles/kubernetes-apps/{ => external_provisioner}/cephfs_provisioner/defaults/main.yml (100%) rename roles/kubernetes-apps/{ => external_provisioner}/cephfs_provisioner/tasks/main.yml (86%) rename roles/kubernetes-apps/{ => external_provisioner}/cephfs_provisioner/templates/cephfs-provisioner-clusterrole.yml.j2 (92%) rename roles/kubernetes-apps/{ => external_provisioner}/cephfs_provisioner/templates/cephfs-provisioner-clusterrolebinding.yml.j2 (100%) create mode 100644 roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-ns.yml.j2 rename roles/kubernetes-apps/{ => external_provisioner}/cephfs_provisioner/templates/cephfs-provisioner-role.yml.j2 (100%) rename roles/kubernetes-apps/{ => external_provisioner}/cephfs_provisioner/templates/cephfs-provisioner-rolebinding.yml.j2 (85%) rename roles/kubernetes-apps/{cephfs_provisioner/templates/cephfs-provisioner-deploy.yml.j2 => external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-rs.yml.j2} (52%) rename roles/kubernetes-apps/{ => external_provisioner}/cephfs_provisioner/templates/cephfs-provisioner-sa.yml.j2 (100%) rename roles/kubernetes-apps/{ => external_provisioner}/cephfs_provisioner/templates/cephfs-provisioner-sc.yml.j2 (100%) rename roles/kubernetes-apps/{ => external_provisioner}/cephfs_provisioner/templates/cephfs-provisioner-secret.yml.j2 (100%) diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/README.md b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/README.md new file mode 100644 index 000000000..5b338a793 --- /dev/null +++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/README.md @@ -0,0 +1,78 @@ +CephFS Volume Provisioner for Kubernetes 1.5+ +============================================= + +[![Docker Repository on Quay](https://quay.io/repository/external_storage/cephfs-provisioner/status "Docker Repository on Quay")](https://quay.io/repository/external_storage/cephfs-provisioner) + +Using Ceph volume client + +Development +----------- + +Compile the provisioner + +``` console +make +``` + +Make the container image and push to the registry + +``` console +make push +``` + +Test instruction +---------------- + +- Start Kubernetes local cluster + +See https://kubernetes.io/. + +- Create a Ceph admin secret + +``` bash +ceph auth get client.admin 2>&1 |grep "key = " |awk '{print $3'} |xargs echo -n > /tmp/secret +kubectl create ns cephfs +kubectl create secret generic ceph-secret-admin --from-file=/tmp/secret --namespace=cephfs +``` + +- Start CephFS provisioner + +The following example uses `cephfs-provisioner-1` as the identity for the instance and assumes kubeconfig is at `/root/.kube`. The identity should remain the same if the provisioner restarts. If there are multiple provisioners, each should have a different identity. + +``` bash +docker run -ti -v /root/.kube:/kube -v /var/run/kubernetes:/var/run/kubernetes --privileged --net=host cephfs-provisioner /usr/local/bin/cephfs-provisioner -master=http://127.0.0.1:8080 -kubeconfig=/kube/config -id=cephfs-provisioner-1 +``` + +Alternatively, deploy it in kubernetes, see [deployment](deploy/README.md). + +- Create a CephFS Storage Class + +Replace Ceph monitor's IP in example/class.yaml with your own and create storage class: + +``` bash +kubectl create -f example/class.yaml +``` + +- Create a claim + +``` bash +kubectl create -f example/claim.yaml +``` + +- Create a Pod using the claim + +``` bash +kubectl create -f example/test-pod.yaml +``` + +Known limitations +----------------- + +- Kernel CephFS doesn't work with SELinux, setting SELinux label in Pod's securityContext will not work. +- Kernel CephFS doesn't support quota or capacity, capacity requested by PVC is not enforced or validated. +- Currently each Ceph user created by the provisioner has `allow r` MDS cap to permit CephFS mount. + +Acknowledgement +--------------- + +Inspired by CephFS Manila provisioner and conversation with John Spray diff --git a/roles/kubernetes-apps/cephfs_provisioner/defaults/main.yml b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml similarity index 100% rename from roles/kubernetes-apps/cephfs_provisioner/defaults/main.yml rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml diff --git a/roles/kubernetes-apps/cephfs_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml similarity index 86% rename from roles/kubernetes-apps/cephfs_provisioner/tasks/main.yml rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml index 6e854f05e..c1fdc624c 100644 --- a/roles/kubernetes-apps/cephfs_provisioner/tasks/main.yml +++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/tasks/main.yml @@ -3,22 +3,23 @@ - name: CephFS Provisioner | Create addon dir file: path: "{{ kube_config_dir }}/addons/cephfs_provisioner" + state: directory owner: root group: root mode: 0755 - recurse: true - name: CephFS Provisioner | Create manifests template: src: "{{ item.file }}.j2" dest: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.file }}" with_items: + - { name: cephfs-provisioner-ns, file: cephfs-provisioner-ns.yml, type: ns } - { name: cephfs-provisioner-sa, file: cephfs-provisioner-sa.yml, type: sa } - { name: cephfs-provisioner-role, file: cephfs-provisioner-role.yml, type: role } - { name: cephfs-provisioner-rolebinding, file: cephfs-provisioner-rolebinding.yml, type: rolebinding } - { name: cephfs-provisioner-clusterrole, file: cephfs-provisioner-clusterrole.yml, type: clusterrole } - { name: cephfs-provisioner-clusterrolebinding, file: cephfs-provisioner-clusterrolebinding.yml, type: clusterrolebinding } - - { name: cephfs-provisioner-deploy, file: cephfs-provisioner-deploy.yml, type: deploy } + - { name: cephfs-provisioner-rs, file: cephfs-provisioner-rs.yml, type: rs } - { name: cephfs-provisioner-secret, file: cephfs-provisioner-secret.yml, type: secret } - { name: cephfs-provisioner-sc, file: cephfs-provisioner-sc.yml, type: sc } register: cephfs_manifests @@ -27,7 +28,7 @@ - name: CephFS Provisioner | Apply manifests kube: name: "{{ item.item.name }}" - namespace: "{{ system_namespace }}" + namespace: "{{ cephfs_provisioner_namespace }}" kubectl: "{{ bin_dir }}/kubectl" resource: "{{ item.item.type }}" filename: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.item.file }}" diff --git a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-clusterrole.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-clusterrole.yml.j2 similarity index 92% rename from roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-clusterrole.yml.j2 rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-clusterrole.yml.j2 index 272db0f70..e714c3cb2 100644 --- a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-clusterrole.yml.j2 +++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-clusterrole.yml.j2 @@ -3,7 +3,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: cephfs-provisioner - namespace: {{ system_namespace }} + namespace: {{ cephfs_provisioner_namespace }} rules: - apiGroups: [""] resources: ["persistentvolumes"] diff --git a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-clusterrolebinding.yml.j2 similarity index 100% rename from roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-clusterrolebinding.yml.j2 rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-clusterrolebinding.yml.j2 diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-ns.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-ns.yml.j2 new file mode 100644 index 000000000..2a2a67cf6 --- /dev/null +++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-ns.yml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ cephfs_provisioner_namespace }} + labels: + name: {{ cephfs_provisioner_namespace }} diff --git a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-role.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-role.yml.j2 similarity index 100% rename from roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-role.yml.j2 rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-role.yml.j2 diff --git a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-rolebinding.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-rolebinding.yml.j2 similarity index 85% rename from roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-rolebinding.yml.j2 rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-rolebinding.yml.j2 index f84ed32ba..01ab87b7d 100644 --- a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-rolebinding.yml.j2 +++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-rolebinding.yml.j2 @@ -7,6 +7,7 @@ metadata: subjects: - kind: ServiceAccount name: cephfs-provisioner + namespace: {{ cephfs_provisioner_namespace }} roleRef: apiGroup: rbac.authorization.k8s.io kind: Role diff --git a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-deploy.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-rs.yml.j2 similarity index 52% rename from roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-deploy.yml.j2 rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-rs.yml.j2 index bfe211754..976f29c05 100644 --- a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-deploy.yml.j2 +++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-rs.yml.j2 @@ -1,21 +1,28 @@ --- -apiVersion: extensions/v1beta1 -kind: Deployment +apiVersion: apps/v1 +kind: ReplicaSet metadata: - name: cephfs-provisioner + name: cephfs-provisioner-v{{ cephfs_provisioner_image_tag }} namespace: {{ cephfs_provisioner_namespace }} + labels: + k8s-app: cephfs-provisioner + version: v{{ cephfs_provisioner_image_tag }} spec: replicas: 1 - strategy: - type: Recreate + selector: + matchLabels: + k8s-app: cephfs-provisioner + version: v{{ cephfs_provisioner_image_tag }} template: metadata: labels: - app: cephfs-provisioner + k8s-app: cephfs-provisioner + version: v{{ cephfs_provisioner_image_tag }} spec: containers: - name: cephfs-provisioner image: {{ cephfs_provisioner_image_repo }}:{{ cephfs_provisioner_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} env: - name: PROVISIONER_NAME value: ceph.com/cephfs @@ -23,4 +30,6 @@ spec: - "/usr/local/bin/cephfs-provisioner" args: - "-id=cephfs-provisioner-1" +{% if rbac_enabled %} serviceAccount: cephfs-provisioner +{% endif %} diff --git a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-sa.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-sa.yml.j2 similarity index 100% rename from roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-sa.yml.j2 rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-sa.yml.j2 diff --git a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-sc.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-sc.yml.j2 similarity index 100% rename from roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-sc.yml.j2 rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-sc.yml.j2 diff --git a/roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-secret.yml.j2 b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-secret.yml.j2 similarity index 100% rename from roles/kubernetes-apps/cephfs_provisioner/templates/cephfs-provisioner-secret.yml.j2 rename to roles/kubernetes-apps/external_provisioner/cephfs_provisioner/templates/cephfs-provisioner-secret.yml.j2 diff --git a/roles/kubernetes-apps/external_provisioner/meta/main.yml b/roles/kubernetes-apps/external_provisioner/meta/main.yml index 3daa461d8..b520922d6 100644 --- a/roles/kubernetes-apps/external_provisioner/meta/main.yml +++ b/roles/kubernetes-apps/external_provisioner/meta/main.yml @@ -6,3 +6,10 @@ dependencies: - apps - local-volume-provisioner - external-provisioner + + - role: kubernetes-apps/external_provisioner/cephfs_provisioner + when: cephfs_provisioner_enabled + tags: + - apps + - cephfs-provisioner + - external-provisioner diff --git a/roles/kubernetes-apps/meta/main.yml b/roles/kubernetes-apps/meta/main.yml index fa3b1f1a6..fca51a3b6 100644 --- a/roles/kubernetes-apps/meta/main.yml +++ b/roles/kubernetes-apps/meta/main.yml @@ -27,13 +27,6 @@ dependencies: - apps - registry - - role: kubernetes-apps/cephfs_provisioner - when: cephfs_provisioner_enabled - tags: - - apps - - cephfs_provisioner - - storage - # istio role should be last because it takes a long time to initialize and # will cause timeouts trying to start other addons. - role: kubernetes-apps/istio From 60bfc56e8e7e554f5d40b1e1d1bb6a09dd9a5fb7 Mon Sep 17 00:00:00 2001 From: Erik Stidham Date: Mon, 5 Feb 2018 13:34:59 -0600 Subject: [PATCH 113/177] Update Calico and Canal - Updating to use calico-node v2.6.7 - A few updates to their manifests too --- roles/download/defaults/main.yml | 4 ++-- .../calico/templates/calico-node.yml.j2 | 8 +++++++ .../canal/templates/canal-node.yaml.j2 | 23 +++++++++++++++++-- 3 files changed, 31 insertions(+), 4 deletions(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index e97297958..02b11cf64 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -29,9 +29,9 @@ kubeadm_version: "{{ kube_version }}" etcd_version: v3.2.4 # TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults # after migration to container download -calico_version: "v2.6.2" +calico_version: "v2.6.7" calico_ctl_version: "v1.6.1" -calico_cni_version: "v1.11.0" +calico_cni_version: "v1.11.2" calico_policy_version: "v1.0.0" calico_rr_version: "v0.4.0" flannel_version: "v0.10.0" diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2 index 3a01648f7..3ba3e75d8 100644 --- a/roles/network_plugin/calico/templates/calico-node.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-node.yml.j2 @@ -28,6 +28,9 @@ spec: tolerations: - effect: NoSchedule operator: Exists + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 containers: # Runs calico/node container on each Kubernetes node. This # container programs network policy and routes on each @@ -53,6 +56,11 @@ spec: configMapKeyRef: name: calico-config key: cluster_type + # Set noderef for node controller. + - name: CALICO_K8S_NODE_REF + valueFrom: + fieldRef: + fieldPath: spec.nodeName # Disable file logging so `kubectl logs` works. - name: CALICO_DISABLE_FILE_LOGGING value: "true" diff --git a/roles/network_plugin/canal/templates/canal-node.yaml.j2 b/roles/network_plugin/canal/templates/canal-node.yaml.j2 index 07754c089..d63bf99b0 100644 --- a/roles/network_plugin/canal/templates/canal-node.yaml.j2 +++ b/roles/network_plugin/canal/templates/canal-node.yaml.j2 @@ -148,14 +148,21 @@ spec: name: canal-config key: etcd_endpoints # Disable Calico BGP. Calico is simply enforcing policy. - - name: CALICO_NETWORKING - value: "false" + - name: CALICO_NETWORKING_BACKEND + value: "none" # Cluster type to identify the deployment type - name: CLUSTER_TYPE value: "kubespray,canal" # Disable file logging so `kubectl logs` works. - name: CALICO_DISABLE_FILE_LOGGING value: "true" + # Set noderef for node controller. + - name: CALICO_K8S_NODE_REF + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: FELIX_HEALTHENABLED + value: "true" # Etcd SSL vars - name: ETCD_CA_CERT_FILE valueFrom: @@ -178,6 +185,18 @@ spec: fieldPath: spec.nodeName securityContext: privileged: true + livenessProbe: + httpGet: + path: /liveness + port: 9099 + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + readinessProbe: + httpGet: + path: /readiness + port: 9099 + periodSeconds: 10 volumeMounts: - mountPath: /lib/modules name: lib-modules From caec3de364c008c2bd5d7ae51dd125cf613fe48f Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Sun, 4 Mar 2018 21:24:20 +0800 Subject: [PATCH 114/177] Updating to use calico-node v2.6.8 --- roles/download/defaults/main.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 02b11cf64..28bdf1d7a 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -29,11 +29,11 @@ kubeadm_version: "{{ kube_version }}" etcd_version: v3.2.4 # TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults # after migration to container download -calico_version: "v2.6.7" -calico_ctl_version: "v1.6.1" -calico_cni_version: "v1.11.2" -calico_policy_version: "v1.0.0" -calico_rr_version: "v0.4.0" +calico_version: "v2.6.8" +calico_ctl_version: "v1.6.3" +calico_cni_version: "v1.11.4" +calico_policy_version: "v1.0.3" +calico_rr_version: "v0.4.2" flannel_version: "v0.10.0" flannel_cni_version: "v0.3.0" istio_version: "0.2.6" From 9fa995ac9d595cc75695fb8b977ac2dd75328c46 Mon Sep 17 00:00:00 2001 From: Dann Bohn Date: Fri, 23 Mar 2018 08:33:25 -0400 Subject: [PATCH 115/177] only sets nodeName in kubeadm-config when kube_override_hostname is set --- roles/kubernetes/master/templates/kubeadm-config.yaml.j2 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 index dc842a5e6..b2d2cd2e7 100644 --- a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 +++ b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 @@ -83,5 +83,7 @@ apiServerCertSANs: {% endfor %} certificatesDir: {{ kube_config_dir }}/ssl unifiedControlPlaneImage: "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}" +{% if kube_override_hostname|default('') %} nodeName: {{ inventory_hostname }} +{% endif %} From dd9d0c05301fe1229aeabc5e53a1bc41c1bd68a0 Mon Sep 17 00:00:00 2001 From: Anton Fayzrahmanov Date: Fri, 23 Mar 2018 16:33:20 +0300 Subject: [PATCH 116/177] optional calico_ip_auto_method variable with IP_AUTODETECTION_METHOD can be set to one of first-found can-reach interface --- roles/network_plugin/calico/templates/calico-node.yml.j2 | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2 index 3a01648f7..45023d0cf 100644 --- a/roles/network_plugin/calico/templates/calico-node.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-node.yml.j2 @@ -108,10 +108,15 @@ spec: configMapKeyRef: name: calico-config key: etcd_cert +{% if calico_ip_auto_method is defined %} + - name: IP_AUTODETECTION_METHOD + value: "{{ calico_ip_auto_method }}" +{% else %} - name: IP valueFrom: fieldRef: fieldPath: status.hostIP +{% endif %} - name: NODENAME valueFrom: fieldRef: From 60a057cace3c36699e19e98516a9280d2161496a Mon Sep 17 00:00:00 2001 From: Anton Fayzrahmanov Date: Sat, 24 Mar 2018 01:46:26 +0300 Subject: [PATCH 117/177] Update calico-node.yml.j2 --- roles/network_plugin/calico/templates/calico-node.yml.j2 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2 index 45023d0cf..139493b62 100644 --- a/roles/network_plugin/calico/templates/calico-node.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-node.yml.j2 @@ -109,8 +109,8 @@ spec: name: calico-config key: etcd_cert {% if calico_ip_auto_method is defined %} - - name: IP_AUTODETECTION_METHOD - value: "{{ calico_ip_auto_method }}" + - name: IP_AUTODETECTION_METHOD + value: "{{ calico_ip_auto_method }}" {% else %} - name: IP valueFrom: From a75598b3f44dc04a408156c226bfb36bce9cf9a4 Mon Sep 17 00:00:00 2001 From: Anton Fayzrahmanov Date: Sat, 24 Mar 2018 01:54:17 +0300 Subject: [PATCH 118/177] IP_AUTODETECTION_METHOD docs --- roles/network_plugin/calico/defaults/main.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/roles/network_plugin/calico/defaults/main.yml b/roles/network_plugin/calico/defaults/main.yml index a44b3d315..1b0cd0421 100644 --- a/roles/network_plugin/calico/defaults/main.yml +++ b/roles/network_plugin/calico/defaults/main.yml @@ -45,3 +45,9 @@ rbac_resources: - sa - clusterrole - clusterrolebinding + +# If you want to use non default IP_AUTODETECTION_METHOD for calico node set this option to one of: +# * can-reach=DESTINATION +# * interface=INTERFACE-REGEX +# see https://docs.projectcalico.org/v3.0/reference/node/configuration#ip-autodetection-methods +#calico_ip_auto_method: "interface=eth.*" From 3f5c60886bc9604c827d5deee6663edf1c87d9d8 Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Sat, 24 Mar 2018 10:52:21 +0800 Subject: [PATCH 119/177] Upgrade Weave to 2.2.1 - Fix #2414, so namespace isolation should now works - Update weave-net.yml.j2 as per latest https://cloud.weave.works/k8s/net - Other minor fixup --- README.md | 2 +- roles/download/defaults/main.yml | 2 +- roles/network_plugin/weave/defaults/main.yml | 4 +- .../weave/templates/weave-net.yml.j2 | 65 ++++++++++++------- tests/files/gce_centos-weave-kubeadm.yml | 2 +- tests/files/gce_coreos-alpha-weave-ha.yml | 2 +- tests/files/gce_rhel7-weave.yml | 2 +- tests/files/gce_ubuntu-weave-sep.yml | 2 +- 8 files changed, 50 insertions(+), 31 deletions(-) diff --git a/README.md b/README.md index d8cf41a4d..f19af0ae2 100644 --- a/README.md +++ b/README.md @@ -83,7 +83,7 @@ Versions of supported components - [canal](https://github.com/projectcalico/canal) (given calico/flannel versions) - [cilium](https://github.com/cilium/cilium) v1.0.0-rc8 - [contiv](https://github.com/contiv/install/releases) v1.1.7 -- [weave](http://weave.works/) v2.2.0 +- [weave](http://weave.works/) v2.2.1 - [docker](https://www.docker.com/) v17.03 (see note) - [rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index e97297958..819525d56 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -38,7 +38,7 @@ flannel_version: "v0.10.0" flannel_cni_version: "v0.3.0" istio_version: "0.2.6" vault_version: 0.8.1 -weave_version: 2.2.0 +weave_version: 2.2.1 pod_infra_version: 3.0 contiv_version: 1.1.7 cilium_version: "v1.0.0-rc8" diff --git a/roles/network_plugin/weave/defaults/main.yml b/roles/network_plugin/weave/defaults/main.yml index b59f0ab63..eecb06171 100644 --- a/roles/network_plugin/weave/defaults/main.yml +++ b/roles/network_plugin/weave/defaults/main.yml @@ -1,7 +1,7 @@ --- # Limits -weave_memory_limit: 400M -weave_cpu_limit: 30m +weave_memory_limits: 400M +weave_cpu_limits: 30m weave_memory_requests: 64M weave_cpu_requests: 10m diff --git a/roles/network_plugin/weave/templates/weave-net.yml.j2 b/roles/network_plugin/weave/templates/weave-net.yml.j2 index b292339b5..70b70b3f8 100644 --- a/roles/network_plugin/weave/templates/weave-net.yml.j2 +++ b/roles/network_plugin/weave/templates/weave-net.yml.j2 @@ -9,12 +9,13 @@ items: labels: name: weave-net namespace: {{ system_namespace }} - - apiVersion: rbac.authorization.k8s.io/v1 + - apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: name: weave-net labels: name: weave-net + namespace: {{ system_namespace }} rules: - apiGroups: - '' @@ -27,35 +28,42 @@ items: - list - watch - apiGroups: - - extensions - resources: - - networkpolicies - verbs: - - get - - list - - watch - - apiGroups: - - 'networking.k8s.io' + - networking.k8s.io resources: - networkpolicies verbs: - get - list - watch + - apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: ClusterRoleBinding + metadata: + name: weave-net + labels: + name: weave-net + namespace: {{ system_namespace }} + roleRef: + kind: ClusterRole + name: weave-net + apiGroup: rbac.authorization.k8s.io + subjects: + - kind: ServiceAccount + name: weave-net + namespace: {{ system_namespace }} - apiVersion: rbac.authorization.k8s.io/v1beta1 kind: Role metadata: name: weave-net - namespace: kube-system labels: name: weave-net + namespace: {{ system_namespace }} rules: - apiGroups: - '' - resources: - - configmaps resourceNames: - weave-net + resources: + - configmaps verbs: - get - update @@ -65,29 +73,31 @@ items: - configmaps verbs: - create - - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding + - apiVersion: rbac.authorization.k8s.io/v1beta1 + kind: RoleBinding metadata: name: weave-net labels: name: weave-net + namespace: {{ system_namespace }} roleRef: - kind: ClusterRole + kind: Role name: weave-net apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount name: weave-net - namespace: kube-system + namespace: {{ system_namespace }} - apiVersion: extensions/v1beta1 kind: DaemonSet metadata: name: weave-net labels: name: weave-net - version: {{ weave_version }} + version: v{{ weave_version }} namespace: {{ system_namespace }} spec: + minReadySeconds: 5 template: metadata: labels: @@ -122,7 +132,7 @@ items: - name: WEAVE_PASSWORD value: {{ weave_password }} image: {{ weave_kube_image_repo }}:{{ weave_kube_image_tag }} - imagePullPolicy: Always + imagePullPolicy: {{ k8s_image_pull_policy }} livenessProbe: httpGet: host: 127.0.0.1 @@ -149,19 +159,28 @@ items: mountPath: /lib/modules - name: xtables-lock mountPath: /run/xtables.lock - readOnly: false - name: weave-npc + args: [] + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName image: {{ weave_npc_image_repo }}:{{ weave_npc_image_tag }} - imagePullPolicy: Always + imagePullPolicy: {{ k8s_image_pull_policy }} resources: requests: cpu: {{ weave_cpu_requests }} memory: {{ weave_memory_requests }} limits: - cpu: {{ weave_cpu_limit }} - memory: {{ weave_memory_limit }} + cpu: {{ weave_cpu_limits }} + memory: {{ weave_memory_limits }} securityContext: privileged: true + volumeMounts: + - name: xtables-lock + mountPath: /run/xtables.lock hostNetwork: true hostPID: true restartPolicy: Always diff --git a/tests/files/gce_centos-weave-kubeadm.yml b/tests/files/gce_centos-weave-kubeadm.yml index b4cd8e17c..a1c88e976 100644 --- a/tests/files/gce_centos-weave-kubeadm.yml +++ b/tests/files/gce_centos-weave-kubeadm.yml @@ -7,7 +7,7 @@ startup_script: "" # Deployment settings kube_network_plugin: weave -weave_cpu_limit: "100m" +weave_cpu_limits: "100m" weave_cpu_requests: "100m" kubeadm_enabled: true deploy_netchecker: true diff --git a/tests/files/gce_coreos-alpha-weave-ha.yml b/tests/files/gce_coreos-alpha-weave-ha.yml index dd579c032..1666e0927 100644 --- a/tests/files/gce_coreos-alpha-weave-ha.yml +++ b/tests/files/gce_coreos-alpha-weave-ha.yml @@ -7,7 +7,7 @@ startup_script: 'systemctl disable locksmithd && systemctl stop locksmithd' # Deployment settings kube_network_plugin: weave -weave_cpu_limit: "100m" +weave_cpu_limits: "100m" weave_cpu_requests: "100m" bootstrap_os: coreos resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12 diff --git a/tests/files/gce_rhel7-weave.yml b/tests/files/gce_rhel7-weave.yml index df80a556f..e6928b7a2 100644 --- a/tests/files/gce_rhel7-weave.yml +++ b/tests/files/gce_rhel7-weave.yml @@ -5,7 +5,7 @@ mode: default # Deployment settings kube_network_plugin: weave -weave_cpu_limit: "100m" +weave_cpu_limits: "100m" weave_cpu_requests: "100m" deploy_netchecker: true kubedns_min_replicas: 1 diff --git a/tests/files/gce_ubuntu-weave-sep.yml b/tests/files/gce_ubuntu-weave-sep.yml index 133bd907a..6e701cb23 100644 --- a/tests/files/gce_ubuntu-weave-sep.yml +++ b/tests/files/gce_ubuntu-weave-sep.yml @@ -6,7 +6,7 @@ mode: separate # Deployment settings bootstrap_os: ubuntu kube_network_plugin: weave -weave_cpu_limit: "100m" +weave_cpu_limits: "100m" weave_cpu_requests: "100m" deploy_netchecker: true kubedns_min_replicas: 1 From 1d0415a6cf5015373a35f2f50adc7749d1a014e0 Mon Sep 17 00:00:00 2001 From: Dann Bohn Date: Sat, 24 Mar 2018 13:29:07 -0400 Subject: [PATCH 120/177] fixes typo in kube_override_hostname for kubeadm --- roles/kubernetes/master/templates/kubeadm-config.yaml.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 index b2d2cd2e7..5ea5d712c 100644 --- a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 +++ b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 @@ -84,6 +84,6 @@ apiServerCertSANs: certificatesDir: {{ kube_config_dir }}/ssl unifiedControlPlaneImage: "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}" {% if kube_override_hostname|default('') %} -nodeName: {{ inventory_hostname }} +nodeName: {{ kube_override_hostname }} {% endif %} From b8d1652baf6e6f2dcd4051b2b78a2f9105b77710 Mon Sep 17 00:00:00 2001 From: Michael Zehrer Date: Sun, 25 Mar 2018 16:08:07 +0200 Subject: [PATCH 121/177] Remove kibana_base_url The default for kibana_base_url does not make sense an makes kibana unusable. The default path forces a 404 when you try to open kibana in the browser. Not setting kibana_base_url works just fine. --- roles/kubernetes-apps/efk/kibana/defaults/main.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/roles/kubernetes-apps/efk/kibana/defaults/main.yml b/roles/kubernetes-apps/efk/kibana/defaults/main.yml index baf07cdf2..0651a032d 100644 --- a/roles/kubernetes-apps/efk/kibana/defaults/main.yml +++ b/roles/kubernetes-apps/efk/kibana/defaults/main.yml @@ -4,4 +4,3 @@ kibana_mem_limit: 0M kibana_cpu_requests: 100m kibana_mem_requests: 0M kibana_service_port: 5601 -kibana_base_url: "/api/v1/proxy/namespaces/kube-system/services/kibana-logging" From 9511178666107121edca537061f785a68003b951 Mon Sep 17 00:00:00 2001 From: dvazar Date: Mon, 26 Mar 2018 12:00:02 +0700 Subject: [PATCH 122/177] fixed: creation of an inventory template --- .../azurerm/roles/generate-inventory_2/templates/inventory.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 b/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 index 696be6d57..3e9728e71 100644 --- a/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 +++ b/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 @@ -1,6 +1,6 @@ {% for vm in vm_ip_list %} -{% if not use_bastion or vm.virtualMachinename == 'bastion' %} +{% if not use_bastion or vm.virtualMachine.name == 'bastion' %} {{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.publicIpAddresses[0].ipAddress }} ip={{ vm.virtualMachine.network.privateIpAddresses[0] }} {% else %} {{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.privateIpAddresses[0] }} From 4f7479d94de81c0633d9ff4247bbb455582b35e1 Mon Sep 17 00:00:00 2001 From: Sergey Bondarev Date: Mon, 26 Mar 2018 17:25:51 +0300 Subject: [PATCH 123/177] add etc tunning options https://coreos.com/etcd/docs/latest/tuning.html etcd_snapshot_count and ionice priority --- roles/etcd/defaults/main.yml | 7 +++++++ roles/etcd/templates/etcd-events.env.j2 | 3 +++ roles/etcd/templates/etcd.env.j2 | 3 +++ roles/etcd/templates/etcd.j2 | 15 +++++++++------ 4 files changed, 22 insertions(+), 6 deletions(-) diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml index 4986ad257..5f16db1d1 100644 --- a/roles/etcd/defaults/main.yml +++ b/roles/etcd/defaults/main.yml @@ -22,6 +22,13 @@ etcd_script_dir: "{{ bin_dir }}/etcd-scripts" etcd_heartbeat_interval: "250" etcd_election_timeout: "5000" +#etcd_snapshot_count: "10000" + +# Parameters for ionice +# -c takes an integer between 0 and 3 or one of the strings none, realtime, best-effort or idle. +# -n takes an integer between 0 (highest priority) and 7 (lowest priority) +#etcd_ionice: "-c2 -n0" + etcd_metrics: "basic" # Limits diff --git a/roles/etcd/templates/etcd-events.env.j2 b/roles/etcd/templates/etcd-events.env.j2 index c168ab03e..e7dffbbfe 100644 --- a/roles/etcd/templates/etcd-events.env.j2 +++ b/roles/etcd/templates/etcd-events.env.j2 @@ -13,6 +13,9 @@ ETCD_NAME={{ etcd_member_name }}-events ETCD_PROXY=off ETCD_INITIAL_CLUSTER={{ etcd_events_peer_addresses }} ETCD_AUTO_COMPACTION_RETENTION={{ etcd_compaction_retention }} +{% if etcd_snapshot_count is defined %} +ETCD_SNAPSHOT_COUNT={{ etcd_snapshot_count }} +{% endif %} # TLS settings ETCD_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem diff --git a/roles/etcd/templates/etcd.env.j2 b/roles/etcd/templates/etcd.env.j2 index 6a917d127..178366d00 100644 --- a/roles/etcd/templates/etcd.env.j2 +++ b/roles/etcd/templates/etcd.env.j2 @@ -13,6 +13,9 @@ ETCD_NAME={{ etcd_member_name }} ETCD_PROXY=off ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }} ETCD_AUTO_COMPACTION_RETENTION={{ etcd_compaction_retention }} +{% if etcd_snapshot_count is defined %} +ETCD_SNAPSHOT_COUNT={{ etcd_snapshot_count }} +{% endif %} # TLS settings ETCD_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem diff --git a/roles/etcd/templates/etcd.j2 b/roles/etcd/templates/etcd.j2 index 9ac08e073..a6628d8fb 100644 --- a/roles/etcd/templates/etcd.j2 +++ b/roles/etcd/templates/etcd.j2 @@ -6,16 +6,19 @@ -v /etc/ssl/certs:/etc/ssl/certs:ro \ -v {{ etcd_cert_dir }}:{{ etcd_cert_dir }}:ro \ -v {{ etcd_data_dir }}:{{ etcd_data_dir }}:rw \ - {% if etcd_memory_limit is defined %} +{% if etcd_memory_limit is defined %} --memory={{ etcd_memory_limit|regex_replace('Mi', 'M') }} \ - {% endif %} - {% if etcd_cpu_limit is defined %} +{% endif %} +{% if etcd_cpu_limit is defined %} --cpu-shares={{ etcd_cpu_limit|regex_replace('m', '') }} \ - {% endif %} - {% if etcd_blkio_weight is defined %} +{% endif %} +{% if etcd_blkio_weight is defined %} --blkio-weight={{ etcd_blkio_weight }} \ - {% endif %} +{% endif %} --name={{ etcd_member_name | default("etcd") }} \ {{ etcd_image_repo }}:{{ etcd_image_tag }} \ +{% if etcd_ionice is defined %} + /bin/ionice {{ etcd_ionice }} \ +{% endif %} /usr/local/bin/etcd \ "$@" From 5f5d0ffe14c0c8b5d919cb17fe6c0d22ab485a24 Mon Sep 17 00:00:00 2001 From: Erwan SEITE Date: Tue, 12 Dec 2017 11:47:04 +0100 Subject: [PATCH 124/177] replace sudo by become --- Vagrantfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Vagrantfile b/Vagrantfile index 9db4be3a1..64a3009c0 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -164,7 +164,7 @@ Vagrant.configure("2") do |config| if File.exist?(File.join(File.dirname($inventory), "hosts")) ansible.inventory_path = $inventory end - ansible.sudo = true + ansible.become = true ansible.limit = "all" ansible.host_key_checking = false ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache"] From 31705a502d672b0cd917e78b2ff5573b96dd3161 Mon Sep 17 00:00:00 2001 From: Erwan SEITE Date: Wed, 20 Dec 2017 15:40:35 +0100 Subject: [PATCH 125/177] change vagrant version --- Vagrantfile | 8 +------- docs/vagrant.md | 2 +- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index 64a3009c0..536bbff2b 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -3,7 +3,7 @@ require 'fileutils' -Vagrant.require_version ">= 1.9.0" +Vagrant.require_version ">= 2.0.0" CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb") @@ -135,12 +135,6 @@ Vagrant.configure("2") do |config| config.vm.network :private_network, ip: ip - # workaround for Vagrant 1.9.1 and centos vm - # https://github.com/hashicorp/vagrant/issues/8096 - if Vagrant::VERSION == "1.9.1" && $os == "centos" - config.vm.provision "shell", inline: "service network restart", run: "always" - end - # Disable swap for each vm config.vm.provision "shell", inline: "swapoff -a" diff --git a/docs/vagrant.md b/docs/vagrant.md index 042e8137b..de47159fa 100644 --- a/docs/vagrant.md +++ b/docs/vagrant.md @@ -1,7 +1,7 @@ Vagrant Install ================= -Assuming you have Vagrant (1.9+) installed with virtualbox (it may work +Assuming you have Vagrant (2.0+) installed with virtualbox (it may work with vmware, but is untested) you should be able to launch a 3 node Kubernetes cluster by simply running `$ vagrant up`.
From 076b5c153ffaecbdc20f588b5ed672fa3a3690cf Mon Sep 17 00:00:00 2001 From: avoidik Date: Tue, 27 Mar 2018 11:13:36 +0300 Subject: [PATCH 126/177] Return subnet_id as defined in kubespray.tf --- contrib/terraform/openstack/modules/network/outputs.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/terraform/openstack/modules/network/outputs.tf b/contrib/terraform/openstack/modules/network/outputs.tf index a426202b9..e56a792c2 100644 --- a/contrib/terraform/openstack/modules/network/outputs.tf +++ b/contrib/terraform/openstack/modules/network/outputs.tf @@ -2,6 +2,6 @@ output "router_id" { value = "${openstack_networking_router_interface_v2.k8s.id}" } -output "network_id" { +output "subnet_id" { value = "${openstack_networking_subnet_v2.k8s.id}" } From e375678674793722eda2beb01c2988e5e54ad7d0 Mon Sep 17 00:00:00 2001 From: avoidik Date: Tue, 27 Mar 2018 11:13:52 +0300 Subject: [PATCH 127/177] Set exact user for Kubelet services --- roles/kubernetes/node/templates/kubelet.docker.service.j2 | 1 + roles/kubernetes/node/templates/kubelet.host.service.j2 | 1 + roles/kubernetes/node/templates/kubelet.rkt.service.j2 | 1 + 3 files changed, 3 insertions(+) diff --git a/roles/kubernetes/node/templates/kubelet.docker.service.j2 b/roles/kubernetes/node/templates/kubelet.docker.service.j2 index fdbdb8969..bba1a5fc4 100644 --- a/roles/kubernetes/node/templates/kubelet.docker.service.j2 +++ b/roles/kubernetes/node/templates/kubelet.docker.service.j2 @@ -5,6 +5,7 @@ After=docker.service Wants=docker.socket [Service] +User=root EnvironmentFile={{kube_config_dir}}/kubelet.env ExecStart={{ bin_dir }}/kubelet \ $KUBE_LOGTOSTDERR \ diff --git a/roles/kubernetes/node/templates/kubelet.host.service.j2 b/roles/kubernetes/node/templates/kubelet.host.service.j2 index 78ba51f70..c7dad4e29 100644 --- a/roles/kubernetes/node/templates/kubelet.host.service.j2 +++ b/roles/kubernetes/node/templates/kubelet.host.service.j2 @@ -5,6 +5,7 @@ After=docker.service Wants=docker.socket [Service] +User=root EnvironmentFile=-{{kube_config_dir}}/kubelet.env {% if kubelet_flexvolumes_plugins_dir is defined %} ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }} diff --git a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 b/roles/kubernetes/node/templates/kubelet.rkt.service.j2 index 7e0c2f942..4286d9470 100644 --- a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 +++ b/roles/kubernetes/node/templates/kubelet.rkt.service.j2 @@ -4,6 +4,7 @@ Documentation=https://github.com/GoogleCloudPlatform/kubernetes Wants=network.target [Service] +User=root Restart=on-failure RestartSec=10s TimeoutStartSec=0 From e6f57f27ee14756c656142c5da03da143dd8a526 Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Wed, 28 Mar 2018 10:44:54 +0800 Subject: [PATCH 128/177] Fixup #2262: Update README.md for calico v2.6.8 --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f19af0ae2..56210a8f9 100644 --- a/README.md +++ b/README.md @@ -79,7 +79,7 @@ Versions of supported components - [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.5 - [etcd](https://github.com/coreos/etcd/releases) v3.2.4 - [flanneld](https://github.com/coreos/flannel/releases) v0.10.0 -- [calico](https://docs.projectcalico.org/v2.5/releases/) v2.6.2 +- [calico](https://docs.projectcalico.org/v2.6/releases/) v2.6.8 - [canal](https://github.com/projectcalico/canal) (given calico/flannel versions) - [cilium](https://github.com/cilium/cilium) v1.0.0-rc8 - [contiv](https://github.com/contiv/install/releases) v1.1.7 From 848fc323dbe1be461ef75b0b659e2c1e83374a12 Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Wed, 28 Mar 2018 11:16:42 +0800 Subject: [PATCH 129/177] Fixup for #2523: - Rename template for /etc/cni/net.d/00-weave.conflist to 00-weave.conflist.j2 - Apply resources requests/limits to both container weave and weave-npc --- roles/network_plugin/weave/tasks/main.yml | 2 +- .../{weavenet.conflist.j2 => 00-weave.conflist.j2} | 0 roles/network_plugin/weave/templates/weave-net.yml.j2 | 6 +++++- 3 files changed, 6 insertions(+), 2 deletions(-) rename roles/network_plugin/weave/templates/{weavenet.conflist.j2 => 00-weave.conflist.j2} (100%) diff --git a/roles/network_plugin/weave/tasks/main.yml b/roles/network_plugin/weave/tasks/main.yml index a8dfa0586..c2c5d82c0 100644 --- a/roles/network_plugin/weave/tasks/main.yml +++ b/roles/network_plugin/weave/tasks/main.yml @@ -4,7 +4,7 @@ - name: template weavenet conflist template: - src: weavenet.conflist.j2 + src: 00-weave.conflist.j2 dest: /etc/cni/net.d/00-weave.conflist owner: kube diff --git a/roles/network_plugin/weave/templates/weavenet.conflist.j2 b/roles/network_plugin/weave/templates/00-weave.conflist.j2 similarity index 100% rename from roles/network_plugin/weave/templates/weavenet.conflist.j2 rename to roles/network_plugin/weave/templates/00-weave.conflist.j2 diff --git a/roles/network_plugin/weave/templates/weave-net.yml.j2 b/roles/network_plugin/weave/templates/weave-net.yml.j2 index 70b70b3f8..699ba3128 100644 --- a/roles/network_plugin/weave/templates/weave-net.yml.j2 +++ b/roles/network_plugin/weave/templates/weave-net.yml.j2 @@ -141,7 +141,11 @@ items: initialDelaySeconds: 30 resources: requests: - cpu: 10m + cpu: {{ weave_cpu_requests }} + memory: {{ weave_memory_requests }} + limits: + cpu: {{ weave_cpu_limits }} + memory: {{ weave_memory_limits }} securityContext: privileged: true volumeMounts: From 72a42238849d00f1e40aee50ea11c4d628ddb272 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Wed, 28 Mar 2018 16:26:36 +0300 Subject: [PATCH 130/177] Write cloud-config during kubelet configuration This file should only be updated during kubelet upgrade so that master components are not accidentally restarted first during preinstall stage. --- roles/kubernetes/node/tasks/main.yml | 13 +++++++++++++ roles/kubernetes/preinstall/tasks/main.yml | 13 ------------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index 4d5fa5df5..78e6d92d6 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -134,6 +134,19 @@ tags: - kube-proxy +- name: Write cloud-config + template: + src: "{{ cloud_provider }}-cloud-config.j2" + dest: "{{ kube_config_dir }}/cloud_config" + group: "{{ kube_cert_group }}" + mode: 0640 + when: + - cloud_provider is defined + - cloud_provider in [ 'openstack', 'azure', 'vsphere' ] + notify: restart kubelet + tags: + - cloud-provider + # reload-systemd - meta: flush_handlers diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml index f23040751..aca0c9606 100644 --- a/roles/kubernetes/preinstall/tasks/main.yml +++ b/roles/kubernetes/preinstall/tasks/main.yml @@ -256,19 +256,6 @@ tags: - bootstrap-os -- name: Write cloud-config - template: - src: "{{ cloud_provider }}-cloud-config.j2" - dest: "{{ kube_config_dir }}/cloud_config" - group: "{{ kube_cert_group }}" - mode: 0640 - when: - - inventory_hostname in groups['k8s-cluster'] - - cloud_provider is defined - - cloud_provider in [ 'openstack', 'azure', 'vsphere' ] - tags: - - cloud-provider - - import_tasks: etchosts.yml tags: - bootstrap-os From 0df32b03cadab6965322964b067c1e93eb2cb206 Mon Sep 17 00:00:00 2001 From: woopstar Date: Wed, 28 Mar 2018 17:42:12 +0200 Subject: [PATCH 131/177] Update openssl.conf to count better and work with Jinja 2.9 --- roles/etcd/templates/openssl.conf.j2 | 21 +++++---- .../secrets/templates/openssl.conf.j2 | 44 +++++++++++-------- 2 files changed, 36 insertions(+), 29 deletions(-) diff --git a/roles/etcd/templates/openssl.conf.j2 b/roles/etcd/templates/openssl.conf.j2 index 48327f0bf..2f4f7e262 100644 --- a/roles/etcd/templates/openssl.conf.j2 +++ b/roles/etcd/templates/openssl.conf.j2 @@ -1,4 +1,4 @@ -[req] +{% set counter = {'dns': 2,'ip': 1,} %}{% macro increment(dct, key, inc=1)%}{% if dct.update({key: dct[key] + inc}) %} {% endif %}{% endmacro %}[req] req_extensions = v3_req distinguished_name = req_distinguished_name @@ -25,19 +25,18 @@ authorityKeyIdentifier=keyid:always,issuer [alt_names] DNS.1 = localhost {% for host in groups['etcd'] %} -DNS.{{ 1 + loop.index }} = {{ host }} +DNS.{{ counter["dns"] }} = {{ host }}{{ increment(counter, 'dns') }} {% endfor %} -{% if loadbalancer_apiserver is defined %} -{% set idx = groups['etcd'] | length | int + 2 %} -DNS.{{ idx | string }} = {{ apiserver_loadbalancer_domain_name }} +{% if apiserver_loadbalancer_domain_name is defined %} +DNS.{{ counter["dns"] }} = {{ apiserver_loadbalancer_domain_name }}{{ increment(counter, 'dns') }} {% endif %} -{% set idx = groups['etcd'] | length | int + 3 %} {% for etcd_alt_name in etcd_cert_alt_names %} -DNS.{{ idx + 1 + loop.index }} = {{ etcd_alt_name }} +DNS.{{ counter["dns"] }} = {{ etcd_alt_name }}{{ increment(counter, 'dns') }} {% endfor %} {% for host in groups['etcd'] %} -IP.{{ 2 * loop.index - 1 }} = {{ hostvars[host]['access_ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }} -IP.{{ 2 * loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }} +{% if hostvars[host]['access_ip'] is defined %} +IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip'] }}{{ increment(counter, 'ip') }} +{% endif %} +IP.{{ counter["ip"] }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}{{ increment(counter, 'ip') }} {% endfor %} -{% set idx = groups['etcd'] | length | int * 2 + 1 %} -IP.{{ idx }} = 127.0.0.1 +IP.{{ counter["ip"] }} = 127.0.0.1 diff --git a/roles/kubernetes/secrets/templates/openssl.conf.j2 b/roles/kubernetes/secrets/templates/openssl.conf.j2 index adc875ba6..579e2aad1 100644 --- a/roles/kubernetes/secrets/templates/openssl.conf.j2 +++ b/roles/kubernetes/secrets/templates/openssl.conf.j2 @@ -1,4 +1,4 @@ -[req] +{% set counter = {'dns': 6,'ip': 1,} %}{% macro increment(dct, key, inc=1)%}{% if dct.update({key: dct[key] + inc}) %} {% endif %}{% endmacro %}[req] req_extensions = v3_req distinguished_name = req_distinguished_name [req_distinguished_name] @@ -13,31 +13,39 @@ DNS.3 = kubernetes.default.svc DNS.4 = kubernetes.default.svc.{{ dns_domain }} DNS.5 = localhost {% for host in groups['kube-master'] %} -DNS.{{ 5 + loop.index }} = {{ host }} +DNS.{{ counter["dns"] }} = {{ host }}{{ increment(counter, 'dns') }} {% endfor %} -{% set idns = groups['kube-master'] | length | int + 5 %} -{% if loadbalancer_apiserver is defined %} -{% set idns = idns + 1 %} -DNS.{{ idns | string }} = {{ apiserver_loadbalancer_domain_name }} +{% for host in groups['kube-node'] %} +DNS.{{ counter["dns"] }} = {{ host }}{{ increment(counter, 'dns') }} +{% endfor %} +{% if apiserver_loadbalancer_domain_name is defined %} +DNS.{{ counter["dns"] }} = {{ apiserver_loadbalancer_domain_name }}{{ increment(counter, 'dns') }} {% endif %} {% for host in groups['kube-master'] %} -IP.{{ 2 * loop.index - 1 }} = {{ hostvars[host]['access_ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }} -IP.{{ 2 * loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }} -{% endfor %} -{% set idx = groups['kube-master'] | length | int * 2 + 1 %} -IP.{{ idx }} = {{ kube_apiserver_ip }} -{% if loadbalancer_apiserver is defined %} -IP.{{ idx + 1 }} = {{ loadbalancer_apiserver.address }} -{% set idx = idx + 1 %} +{% if hostvars[host]['access_ip'] is defined %} +IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip'] }}{{ increment(counter, 'ip') }} +{% endif %} +IP.{{ counter["ip"] }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}{{ increment(counter, 'ip') }} +{% endfor %} +{% for host in groups['kube-node'] %} +{% if hostvars[host]['access_ip'] is defined %} +IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip'] }}{{ increment(counter, 'ip') }} +{% endif %} +IP.{{ counter["ip"] }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}{{ increment(counter, 'ip') }} +{% endfor %} +{% if kube_apiserver_ip is defined %} +IP.{{ counter["ip"] }} = {{ kube_apiserver_ip }}{{ increment(counter, 'ip') }} +{% endif %} +{% if loadbalancer_apiserver.address is defined %} +IP.{{ counter["ip"] }} = {{ loadbalancer_apiserver.address }}{{ increment(counter, 'ip') }} {% endif %} -IP.{{ idx + 1 }} = 127.0.0.1 {% if supplementary_addresses_in_ssl_keys is defined %} -{% set is = idx + 1 %} {% for addr in supplementary_addresses_in_ssl_keys %} {% if addr | ipaddr %} -IP.{{ is + loop.index }} = {{ addr }} +IP.{{ counter["ip"] }} = {{ addr }}{{ increment(counter, 'ip') }} {% else %} -DNS.{{ idns + loop.index }} = {{ addr }} +DNS.{{ counter["dns"] }} = {{ addr }}{{ increment(counter, 'dns') }} {% endif %} {% endfor %} {% endif %} +IP.{{ counter["ip"] }} = 127.0.0.1 From 19e1b11d98c471e0c1841a327fc4cfce10182b29 Mon Sep 17 00:00:00 2001 From: Vladimir Vasilkin Date: Wed, 28 Mar 2018 21:23:30 +0300 Subject: [PATCH 132/177] prometheus operator, metrics for k8s cluster install using Helm: - Prometheus Operator - metrics for k8s cluster including: grafana dashboard, alertmanager, node exporters base project: https://github.com/coreos/prometheus-operator the issue: https://github.com/kubernetes-incubator/kubespray/issues/2042 Previous PR, raw ansible without Helm: https://github.com/kubernetes-incubator/kubespray/pull/2499 --- inventory/sample/group_vars/k8s-cluster.yml | 8 +++++++- roles/kubernetes-apps/meta/main.yml | 6 ++++++ roles/kubernetes-apps/metrics/defaults/main.yml | 9 +++++++++ roles/kubernetes-apps/metrics/tasks/main.yml | 17 +++++++++++++++++ 4 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 roles/kubernetes-apps/metrics/defaults/main.yml create mode 100644 roles/kubernetes-apps/metrics/tasks/main.yml diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml index 5f4889e8b..3e208bdaf 100644 --- a/inventory/sample/group_vars/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster.yml @@ -163,9 +163,15 @@ dashboard_enabled: true # Monitoring apps for k8s efk_enabled: false -# Helm deployment +# Helm deployment. Needs for Prometheus Operator, k8s metrics. helm_enabled: false +# Prometheus Operator. Needs for k8s metrics. Installed Helm is required. +prometheus_operator_enabled: false + +# K8s cluster metrics. Installed Helm and Prometheus Operator are required. +k8s_metrics_enabled: false + # Istio deployment istio_enabled: false diff --git a/roles/kubernetes-apps/meta/main.yml b/roles/kubernetes-apps/meta/main.yml index fca51a3b6..bc05e6f8c 100644 --- a/roles/kubernetes-apps/meta/main.yml +++ b/roles/kubernetes-apps/meta/main.yml @@ -27,6 +27,12 @@ dependencies: - apps - registry + - role: kubernetes-apps/metrics + when: prometheus_operator_enabled + tags: + - apps + - metrics + # istio role should be last because it takes a long time to initialize and # will cause timeouts trying to start other addons. - role: kubernetes-apps/istio diff --git a/roles/kubernetes-apps/metrics/defaults/main.yml b/roles/kubernetes-apps/metrics/defaults/main.yml new file mode 100644 index 000000000..72018e6f5 --- /dev/null +++ b/roles/kubernetes-apps/metrics/defaults/main.yml @@ -0,0 +1,9 @@ +--- +# Prometheus Operator. Needs for k8s metrics. Installed Helm is required. +prometheus_operator_enabled: false + +# K8s cluster metrics. Installed Helm and Prometheus Operators are required. +k8s_metrics_enabled: false + +# Separate namespace for monitoring/metrics +monitoring_namespace: "monitoring" diff --git a/roles/kubernetes-apps/metrics/tasks/main.yml b/roles/kubernetes-apps/metrics/tasks/main.yml new file mode 100644 index 000000000..f64433c69 --- /dev/null +++ b/roles/kubernetes-apps/metrics/tasks/main.yml @@ -0,0 +1,17 @@ +--- +- name: Metrics | Make sure Helm is installed + command: "{{ bin_dir }}/helm version" + +- name: Metrics | Add coreos repo + command: "{{ bin_dir }}/helm repo add coreos https://s3-eu-west-1.amazonaws.com/coreos-charts/stable/" + run_once: true + +- name: Metrics | Install Prometheus Operator + command: "{{ bin_dir }}/helm upgrade --install prometheus-operator coreos/prometheus-operator --namespace {{ monitoring_namespace }}" + when: prometheus_operator_enabled + run_once: true + +- name: Metrics | Install K8s cluster metrics + command: "{{ bin_dir }}/helm upgrade --install kube-prometheus coreos/kube-prometheus --namespace {{ monitoring_namespace }}" + when: k8s_metrics_enabled + run_once: true From 0b5404b2b7561e268e10bf96796170a4a326658c Mon Sep 17 00:00:00 2001 From: woopstar Date: Wed, 28 Mar 2018 20:28:02 +0200 Subject: [PATCH 133/177] Fix --- roles/kubernetes/secrets/templates/openssl.conf.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes/secrets/templates/openssl.conf.j2 b/roles/kubernetes/secrets/templates/openssl.conf.j2 index 579e2aad1..d9720c3fe 100644 --- a/roles/kubernetes/secrets/templates/openssl.conf.j2 +++ b/roles/kubernetes/secrets/templates/openssl.conf.j2 @@ -36,7 +36,7 @@ IP.{{ counter["ip"] }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansib {% if kube_apiserver_ip is defined %} IP.{{ counter["ip"] }} = {{ kube_apiserver_ip }}{{ increment(counter, 'ip') }} {% endif %} -{% if loadbalancer_apiserver.address is defined %} +{% if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined %} IP.{{ counter["ip"] }} = {{ loadbalancer_apiserver.address }}{{ increment(counter, 'ip') }} {% endif %} {% if supplementary_addresses_in_ssl_keys is defined %} From ef7f5edbb3643dd23009c35e78e6efaae77f1f08 Mon Sep 17 00:00:00 2001 From: Chad Swenson Date: Wed, 28 Mar 2018 15:10:39 -0500 Subject: [PATCH 134/177] Remove old docker packages and other docker upgrade fixes (#2536) * Remove old docker packages This removes docker packages that are obsolete if docker-ce packages are to be installed, which fixes some package conflict issues that can occur during upgrades. * Add support for setting obsoletes=0 when installing docker with yum --- roles/docker/defaults/main.yml | 4 ++++ roles/docker/tasks/main.yml | 18 ++++++++++++++++++ roles/docker/tasks/pre-upgrade.yml | 20 ++++++++++++++++++++ roles/docker/vars/redhat.yml | 2 ++ 4 files changed, 44 insertions(+) create mode 100644 roles/docker/tasks/pre-upgrade.yml diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index aa10371f5..3ed3e9ce7 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -21,6 +21,10 @@ docker_dns_servers_strict: yes docker_container_storage_setup: false +# Used to override obsoletes=0 +yum_conf: /etc/yum.conf +docker_yum_conf: /etc/yum_docker.conf + # CentOS/RedHat docker-ce repo docker_rh_repo_base_url: 'https://download.docker.com/linux/centos/7/$basearch/stable' docker_rh_repo_gpgkey: 'https://download.docker.com/linux/centos/gpg' diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 80b917114..729397b44 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -30,6 +30,8 @@ tags: - facts +- import_tasks: pre-upgrade.yml + - name: ensure docker-ce repository public key is installed action: "{{ docker_repo_key_info.pkg_key }}" args: @@ -78,11 +80,27 @@ dest: "/etc/yum.repos.d/docker.repo" when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic +- name: Copy yum.conf for editing + copy: + src: "{{ yum_conf }}" + dest: "{{ docker_yum_conf }}" + remote_src: yes + when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic + +- name: Edit copy of yum.conf to set obsoletes=0 + lineinfile: + path: "{{ docker_yum_conf }}" + state: present + regexp: '^obsoletes=' + line: 'obsoletes=0' + when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic + - name: ensure docker packages are installed action: "{{ docker_package_info.pkg_mgr }}" args: pkg: "{{item.name}}" force: "{{item.force|default(omit)}}" + conf_file: "{{item.yum_conf|default(omit)}}" state: present register: docker_task_result until: docker_task_result|succeeded diff --git a/roles/docker/tasks/pre-upgrade.yml b/roles/docker/tasks/pre-upgrade.yml new file mode 100644 index 000000000..9315da305 --- /dev/null +++ b/roles/docker/tasks/pre-upgrade.yml @@ -0,0 +1,20 @@ +--- +- name: Ensure old versions of Docker are not installed. | Debian + package: + name: '{{ item }}' + state: absent + with_items: + - docker + - docker-engine + when: ansible_os_family == 'Debian' and (docker_versioned_pkg[docker_version | string] | search('docker-ce')) + +- name: Ensure old versions of Docker are not installed. | RedHat + package: + name: '{{ item }}' + state: absent + with_items: + - docker + - docker-common + - docker-engine + - docker-selinux + when: ansible_os_family == 'RedHat' and (docker_versioned_pkg[docker_version | string] | search('docker-ce')) \ No newline at end of file diff --git a/roles/docker/vars/redhat.yml b/roles/docker/vars/redhat.yml index 39ba211d8..cd53e284c 100644 --- a/roles/docker/vars/redhat.yml +++ b/roles/docker/vars/redhat.yml @@ -28,7 +28,9 @@ docker_package_info: pkg_mgr: yum pkgs: - name: "{{ docker_selinux_versioned_pkg[docker_selinux_version | string] }}" + yum_conf: "{{ docker_yum_conf }}" - name: "{{ docker_versioned_pkg[docker_version | string] }}" + yum_conf: "{{ docker_yum_conf }}" docker_repo_key_info: pkg_key: '' From 9ebbf1c3cdd0f192d12a2359ba681fdf59b259b4 Mon Sep 17 00:00:00 2001 From: Kuldip Madnani Date: Wed, 28 Mar 2018 16:24:11 -0500 Subject: [PATCH 135/177] Added a fix in openssl.conf template to check if IP of loadbalncer is available or not. --- roles/kubernetes/secrets/templates/openssl.conf.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes/secrets/templates/openssl.conf.j2 b/roles/kubernetes/secrets/templates/openssl.conf.j2 index adc875ba6..b02970d1e 100644 --- a/roles/kubernetes/secrets/templates/openssl.conf.j2 +++ b/roles/kubernetes/secrets/templates/openssl.conf.j2 @@ -26,7 +26,7 @@ IP.{{ 2 * loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansi {% endfor %} {% set idx = groups['kube-master'] | length | int * 2 + 1 %} IP.{{ idx }} = {{ kube_apiserver_ip }} -{% if loadbalancer_apiserver is defined %} +{% if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined %} IP.{{ idx + 1 }} = {{ loadbalancer_apiserver.address }} {% set idx = idx + 1 %} {% endif %} From c8f857eae430d42057b661dcbbb86843a0a0df10 Mon Sep 17 00:00:00 2001 From: georgejdli Date: Thu, 29 Mar 2018 09:35:28 -0500 Subject: [PATCH 136/177] configure kubespray to sign service account tokens with a dedicated and stable key --- .../templates/manifests/kube-apiserver.manifest.j2 | 2 +- .../manifests/kube-controller-manager.manifest.j2 | 2 +- roles/kubernetes/secrets/files/make-ssl.sh | 11 +++++++++++ roles/kubernetes/secrets/tasks/gen_certs_script.yml | 2 ++ 4 files changed, 15 insertions(+), 2 deletions(-) diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 350a27a18..a9cd1cc60 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -63,7 +63,7 @@ spec: {% if kube_token_auth|default(true) %} - --token-auth-file={{ kube_token_dir }}/known_tokens.csv {% endif %} - - --service-account-key-file={{ kube_cert_dir }}/apiserver-key.pem + - --service-account-key-file={{ kube_cert_dir }}/service-account-key.pem {% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %} - --oidc-issuer-url={{ kube_oidc_url }} - --oidc-client-id={{ kube_oidc_client_id }} diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 index 2b4282a2e..2ead625cb 100644 --- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 @@ -29,7 +29,7 @@ spec: - controller-manager - --kubeconfig={{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml - --leader-elect=true - - --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem + - --service-account-private-key-file={{ kube_cert_dir }}/service-account-key.pem - --root-ca-file={{ kube_cert_dir }}/ca.pem - --cluster-signing-cert-file={{ kube_cert_dir }}/ca.pem - --cluster-signing-key-file={{ kube_cert_dir }}/ca-key.pem diff --git a/roles/kubernetes/secrets/files/make-ssl.sh b/roles/kubernetes/secrets/files/make-ssl.sh index 724c6f369..1c34fc69d 100755 --- a/roles/kubernetes/secrets/files/make-ssl.sh +++ b/roles/kubernetes/secrets/files/make-ssl.sh @@ -82,6 +82,17 @@ gen_key_and_cert() { # Admins if [ -n "$MASTERS" ]; then + + # service-account + # If --service-account-private-key-file was previously configured to use apiserver-key.pem then copy that to the new dedicated service-account signing key location to avoid disruptions + if [ -e "$SSLDIR/apiserver-key.pem" ] && ! [ -e "$SSLDIR/service-account-key.pem" ]; then + cp $SSLDIR/apiserver-key.pem $SSLDIR/service-account-key.pem + fi + # Generate dedicated service account signing key if one doesn't exist + if ! [ -e "$SSLDIR/apiserver-key.pem" ] && ! [ -e "$SSLDIR/service-account-key.pem" ]; then + openssl genrsa -out service-account-key.pem 2048 > /dev/null 2>&1 + fi + # kube-apiserver # Generate only if we don't have existing ca and apiserver certs if ! [ -e "$SSLDIR/ca-key.pem" ] || ! [ -e "$SSLDIR/apiserver-key.pem" ]; then diff --git a/roles/kubernetes/secrets/tasks/gen_certs_script.yml b/roles/kubernetes/secrets/tasks/gen_certs_script.yml index 011575358..c39f606ad 100644 --- a/roles/kubernetes/secrets/tasks/gen_certs_script.yml +++ b/roles/kubernetes/secrets/tasks/gen_certs_script.yml @@ -75,6 +75,7 @@ 'kube-controller-manager-key.pem', 'front-proxy-client.pem', 'front-proxy-client-key.pem', + 'service-account-key.pem', {% for node in groups['kube-master'] %} 'admin-{{ node }}.pem', 'admin-{{ node }}-key.pem', @@ -86,6 +87,7 @@ 'apiserver-key.pem', 'front-proxy-client.pem', 'front-proxy-client-key.pem', + 'service-account-key.pem', 'kube-scheduler.pem', 'kube-scheduler-key.pem', 'kube-controller-manager.pem', From daeeae1a91aad8d633304f5961ee33df5ed813f1 Mon Sep 17 00:00:00 2001 From: Kuldip Madnani Date: Thu, 29 Mar 2018 11:37:32 -0500 Subject: [PATCH 137/177] Added retries in pre-upgrade.yml and retries while applying kube-dns.yml (#2553) * Added retries in pre-upgrade.yml and retries while applying kube-dns.yml * Removed trailing spaces --- roles/kubernetes-apps/ansible/tasks/main.yml | 4 ++++ roles/kubernetes/master/tasks/pre-upgrade.yml | 5 ++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index 55d417982..c03a78722 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -50,6 +50,10 @@ - dns_mode != 'none' - inventory_hostname == groups['kube-master'][0] - not item|skipped + register: resource_result + until: resource_result|succeeded + retries: 4 + delay: 5 tags: - dnsmasq diff --git a/roles/kubernetes/master/tasks/pre-upgrade.yml b/roles/kubernetes/master/tasks/pre-upgrade.yml index 3a9fe6417..56e57b015 100644 --- a/roles/kubernetes/master/tasks/pre-upgrade.yml +++ b/roles/kubernetes/master/tasks/pre-upgrade.yml @@ -30,4 +30,7 @@ with_items: - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"] when: kube_apiserver_manifest_replaced.changed - run_once: true + register: remove_master_container + retries: 4 + until: remove_master_container.rc == 0 + delay: 5 \ No newline at end of file From 23b3833806697c7488c1c6865e7ed4eb2e99db95 Mon Sep 17 00:00:00 2001 From: Vladimir Vasilkin Date: Thu, 29 Mar 2018 22:51:46 +0300 Subject: [PATCH 138/177] running on the first master only. --- roles/kubernetes-apps/metrics/tasks/main.yml | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/roles/kubernetes-apps/metrics/tasks/main.yml b/roles/kubernetes-apps/metrics/tasks/main.yml index f64433c69..bda7d36e6 100644 --- a/roles/kubernetes-apps/metrics/tasks/main.yml +++ b/roles/kubernetes-apps/metrics/tasks/main.yml @@ -1,17 +1,25 @@ --- - name: Metrics | Make sure Helm is installed command: "{{ bin_dir }}/helm version" + when: + - inventory_hostname == groups['kube-master'][0] - name: Metrics | Add coreos repo command: "{{ bin_dir }}/helm repo add coreos https://s3-eu-west-1.amazonaws.com/coreos-charts/stable/" + when: + - inventory_hostname == groups['kube-master'][0] run_once: true - name: Metrics | Install Prometheus Operator command: "{{ bin_dir }}/helm upgrade --install prometheus-operator coreos/prometheus-operator --namespace {{ monitoring_namespace }}" - when: prometheus_operator_enabled + when: + - prometheus_operator_enabled + - inventory_hostname == groups['kube-master'][0] run_once: true - name: Metrics | Install K8s cluster metrics command: "{{ bin_dir }}/helm upgrade --install kube-prometheus coreos/kube-prometheus --namespace {{ monitoring_namespace }}" - when: k8s_metrics_enabled + when: + - k8s_metrics_enabled + - inventory_hostname == groups['kube-master'][0] run_once: true From 760ca1c3a956fb0033510633c0ea67b8786fcb41 Mon Sep 17 00:00:00 2001 From: Vladimir Vasilkin Date: Thu, 29 Mar 2018 23:03:43 +0300 Subject: [PATCH 139/177] adding checking for prometheus_operator_enabled --- roles/kubernetes-apps/metrics/tasks/main.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/roles/kubernetes-apps/metrics/tasks/main.yml b/roles/kubernetes-apps/metrics/tasks/main.yml index bda7d36e6..ee858602b 100644 --- a/roles/kubernetes-apps/metrics/tasks/main.yml +++ b/roles/kubernetes-apps/metrics/tasks/main.yml @@ -2,11 +2,13 @@ - name: Metrics | Make sure Helm is installed command: "{{ bin_dir }}/helm version" when: + - prometheus_operator_enabled - inventory_hostname == groups['kube-master'][0] - name: Metrics | Add coreos repo command: "{{ bin_dir }}/helm repo add coreos https://s3-eu-west-1.amazonaws.com/coreos-charts/stable/" when: + - prometheus_operator_enabled - inventory_hostname == groups['kube-master'][0] run_once: true @@ -20,6 +22,7 @@ - name: Metrics | Install K8s cluster metrics command: "{{ bin_dir }}/helm upgrade --install kube-prometheus coreos/kube-prometheus --namespace {{ monitoring_namespace }}" when: + - prometheus_operator_enabled - k8s_metrics_enabled - inventory_hostname == groups['kube-master'][0] run_once: true From f0a04b4d65bdfe7c3c4a403eaa629c2446864c30 Mon Sep 17 00:00:00 2001 From: Vladimir Vasilkin Date: Fri, 30 Mar 2018 00:09:36 +0300 Subject: [PATCH 140/177] wait 5 * 4 secs until Tiller starts --- roles/kubernetes-apps/metrics/tasks/main.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/roles/kubernetes-apps/metrics/tasks/main.yml b/roles/kubernetes-apps/metrics/tasks/main.yml index ee858602b..e2280e98b 100644 --- a/roles/kubernetes-apps/metrics/tasks/main.yml +++ b/roles/kubernetes-apps/metrics/tasks/main.yml @@ -1,6 +1,10 @@ --- - name: Metrics | Make sure Helm is installed command: "{{ bin_dir }}/helm version" + register: helm_ready_result + until: helm_ready_result|succeeded + retries: 4 + delay: 5 when: - prometheus_operator_enabled - inventory_hostname == groups['kube-master'][0] @@ -21,7 +25,7 @@ - name: Metrics | Install K8s cluster metrics command: "{{ bin_dir }}/helm upgrade --install kube-prometheus coreos/kube-prometheus --namespace {{ monitoring_namespace }}" - when: + when: - prometheus_operator_enabled - k8s_metrics_enabled - inventory_hostname == groups['kube-master'][0] From 4d85e3765e1c3aefdca224edf3b60e0b0e8e5ebb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=99=88=E5=AE=8F?= Date: Fri, 30 Mar 2018 09:19:00 +0800 Subject: [PATCH 141/177] remove redundancy code --- roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 | 1 - 1 file changed, 1 deletion(-) diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 index 7c8e0062d..57c2269a9 100644 --- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 @@ -48,7 +48,6 @@ spec: {% elif kube_proxy_mode == 'ipvs' %} - --masquerade-all - --feature-gates=SupportIPVSProxyMode=true - - --proxy-mode=ipvs - --ipvs-min-sync-period=5s - --ipvs-sync-period=5s - --ipvs-scheduler=rr From 4a705b3fbab6516952f1ec0775a6843994dc48e6 Mon Sep 17 00:00:00 2001 From: Chen Hong Date: Fri, 30 Mar 2018 16:42:08 +0800 Subject: [PATCH 142/177] May vault health check needs delay --- roles/vault/tasks/cluster/systemd.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/vault/tasks/cluster/systemd.yml b/roles/vault/tasks/cluster/systemd.yml index 8df52f982..f7139d336 100644 --- a/roles/vault/tasks/cluster/systemd.yml +++ b/roles/vault/tasks/cluster/systemd.yml @@ -55,3 +55,4 @@ register: vault_health_check until: vault_health_check|succeeded retries: 10 + delay: "{{ retry_stagger | random + 3 }}" From 94a0562c93ac6e5a17fa1b6bf8caa44316d5500d Mon Sep 17 00:00:00 2001 From: Vladimir Vasilkin Date: Fri, 30 Mar 2018 12:29:04 +0300 Subject: [PATCH 143/177] adding prometheus_operator_enabled, k8s_metrics_enabled parameters to tests --- tests/files/gce_centos7-flannel-addons.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/files/gce_centos7-flannel-addons.yml b/tests/files/gce_centos7-flannel-addons.yml index 8ac8a901b..467bee2d0 100644 --- a/tests/files/gce_centos7-flannel-addons.yml +++ b/tests/files/gce_centos7-flannel-addons.yml @@ -16,3 +16,5 @@ deploy_netchecker: true kubedns_min_replicas: 1 cloud_provider: gce kube_encrypt_secret_data: true +prometheus_operator_enabled: true +k8s_metrics_enabled: true From 004b0a3fcf47b601bfe2bf76c2a49b8144199858 Mon Sep 17 00:00:00 2001 From: woopstar Date: Fri, 30 Mar 2018 11:38:06 +0200 Subject: [PATCH 144/177] Fix merge conflict --- .../master/templates/kubeadm-config.yaml.j2 | 74 ++++++++++++++++++- 1 file changed, 70 insertions(+), 4 deletions(-) diff --git a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 index 393eaf99f..c2339d890 100644 --- a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 +++ b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 @@ -16,8 +16,11 @@ networking: serviceSubnet: {{ kube_service_addresses }} podSubnet: {{ kube_pods_subnet }} kubernetesVersion: {{ kube_version }} -{% if cloud_provider is defined and cloud_provider != "gce" %} -cloudProvider: {{ cloud_provider }} +{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} +cloud-provider: {{ cloud_provider }} +cloud-config: {{ kube_config_dir }}/cloud_config +{% elif cloud_provider is defined and cloud_provider == "aws" %} +cloud-provider: {{ cloud_provider }} {% endif %} {% if kube_proxy_mode == 'ipvs' %} kubeProxy: @@ -38,12 +41,24 @@ apiServerExtraArgs: apiserver-count: "{{ kube_apiserver_count }}" {% if kube_version | version_compare('v1.9', '>=') %} endpoint-reconciler-type: lease -{% endif %} +{% endif %} service-node-port-range: {{ kube_apiserver_node_port_range }} kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}" + profiling: "{{ kube_profiling }}" + enable-aggregator-routing: "{{ kube_api_aggregator_routing }}" + repair-malformed-updates: "false" +{% if kube_api_anonymous_auth is defined and kube_version | version_compare('v1.5', '>=') %} + anonymous-auth: "{{ kube_api_anonymous_auth }}" +{% endif %} +{% if kube_feature_gates %} + feature-gates: {{ kube_feature_gates|join(',') }} +{% endif %} {% if kube_basic_auth|default(true) %} basic-auth-file: {{ kube_users_dir }}/known_users.csv {% endif %} +{% if kube_token_auth|default(true) %} + token-auth-file: {{ kube_token_dir }}/known_tokens.csv +{% endif %} {% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %} oidc-issuer-url: {{ kube_oidc_url }} oidc-client-id: {{ kube_oidc_client_id }} @@ -72,6 +87,23 @@ controllerManagerExtraArgs: node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }} node-monitor-period: {{ kube_controller_node_monitor_period }} pod-eviction-timeout: {{ kube_controller_pod_eviction_timeout }} + enable-hostpath-provisioner: "{{ kube_hostpath_dynamic_provisioner }}" + profiling: "{{ kube_profiling }}" +{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} + cloud-provider: {{cloud_provider}} + cloud-config: {{ kube_config_dir }}/cloud_config +{% elif cloud_provider is defined and cloud_provider in ["aws", "external"] %} + cloud-provider: {{cloud_provider}} +{% endif %} +{% if kube_network_plugin is defined and kube_network_plugin == 'cloud' %} + configure-cloud-routes: "true" +{% endif %} +{% if kube_network_plugin is defined and kube_network_plugin in ["cloud", "flannel", "canal", "cilium"] %} + allocate-node-cidrs: true + cluster-cidr: {{ kube_pods_subnet }} + service-cluster-ip-range: {{ kube_service_addresses }} + node-cidr-mask-size: {{ kube_network_node_prefix }} +{% endif %} {% if kube_feature_gates %} feature-gates: {{ kube_feature_gates|join(',') }} {% endif %} @@ -80,6 +112,13 @@ controllerManagerExtraArgs: {% endfor %} {% if kube_kubeadm_scheduler_extra_args|length > 0 %} schedulerExtraArgs: +{% if volume_cross_zone_attachment %} + policy-config-file: {{ kube_config_dir }}/kube-scheduler-policy.yaml +{% endif %} + profiling: "{{ kube_profiling }}" +{% if kube_feature_gates %} + feature-gates: {{ kube_feature_gates|join(',') }} +{% endif %} {% for key in kube_kubeadm_scheduler_extra_args %} {{ key }}: "{{ kube_kubeadm_scheduler_extra_args[key] }}" {% endfor %} @@ -93,4 +132,31 @@ unifiedControlPlaneImage: "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}" {% if kube_override_hostname|default('') %} nodeName: {{ kube_override_hostname }} {% endif %} - +apiServerExtraVolumes: +{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} + - name: cloud-config + hostPath: {{ kube_config_dir }} + mountPath: {{ kube_config_dir }} +{% endif %} +{% if kube_basic_auth|default(true) %} +- name: basic-auth-config + hostPath: {{ kube_users_dir }} + mountPath: {{ kube_users_dir }} +{% endif %} +{% if kube_token_auth|default(true) %} +- name: token-auth-config + hostPath: {{ kube_token_dir }} + mountPath: {{ kube_token_dir }} +{% endif %} +controllerManagerExtraVolumes: +{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} + - name: cloud-config + hostPath: {{ kube_config_dir }} + mountPath: {{ kube_config_dir }} +{% endif %} +schedulerExtraVolumes: +{% if (cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"]) or volume_cross_zone_attachment %} + - name: cloud-config + hostPath: {{ kube_config_dir }} + mountPath: {{ kube_config_dir }} +{% endif %} From af5f376163af5f4f4bfe20efe04610c78f3e657b Mon Sep 17 00:00:00 2001 From: Andreas Kruger Date: Fri, 30 Mar 2018 11:42:20 +0200 Subject: [PATCH 145/177] Revert --- .../master/templates/kubeadm-config.yaml.j2 | 71 +------------------ 1 file changed, 2 insertions(+), 69 deletions(-) diff --git a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 index c2339d890..0eccb4918 100644 --- a/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 +++ b/roles/kubernetes/master/templates/kubeadm-config.yaml.j2 @@ -16,11 +16,8 @@ networking: serviceSubnet: {{ kube_service_addresses }} podSubnet: {{ kube_pods_subnet }} kubernetesVersion: {{ kube_version }} -{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} -cloud-provider: {{ cloud_provider }} -cloud-config: {{ kube_config_dir }}/cloud_config -{% elif cloud_provider is defined and cloud_provider == "aws" %} -cloud-provider: {{ cloud_provider }} +{% if cloud_provider is defined and cloud_provider != "gce" %} +cloudProvider: {{ cloud_provider }} {% endif %} {% if kube_proxy_mode == 'ipvs' %} kubeProxy: @@ -44,21 +41,9 @@ apiServerExtraArgs: {% endif %} service-node-port-range: {{ kube_apiserver_node_port_range }} kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}" - profiling: "{{ kube_profiling }}" - enable-aggregator-routing: "{{ kube_api_aggregator_routing }}" - repair-malformed-updates: "false" -{% if kube_api_anonymous_auth is defined and kube_version | version_compare('v1.5', '>=') %} - anonymous-auth: "{{ kube_api_anonymous_auth }}" -{% endif %} -{% if kube_feature_gates %} - feature-gates: {{ kube_feature_gates|join(',') }} -{% endif %} {% if kube_basic_auth|default(true) %} basic-auth-file: {{ kube_users_dir }}/known_users.csv {% endif %} -{% if kube_token_auth|default(true) %} - token-auth-file: {{ kube_token_dir }}/known_tokens.csv -{% endif %} {% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %} oidc-issuer-url: {{ kube_oidc_url }} oidc-client-id: {{ kube_oidc_client_id }} @@ -87,23 +72,6 @@ controllerManagerExtraArgs: node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }} node-monitor-period: {{ kube_controller_node_monitor_period }} pod-eviction-timeout: {{ kube_controller_pod_eviction_timeout }} - enable-hostpath-provisioner: "{{ kube_hostpath_dynamic_provisioner }}" - profiling: "{{ kube_profiling }}" -{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} - cloud-provider: {{cloud_provider}} - cloud-config: {{ kube_config_dir }}/cloud_config -{% elif cloud_provider is defined and cloud_provider in ["aws", "external"] %} - cloud-provider: {{cloud_provider}} -{% endif %} -{% if kube_network_plugin is defined and kube_network_plugin == 'cloud' %} - configure-cloud-routes: "true" -{% endif %} -{% if kube_network_plugin is defined and kube_network_plugin in ["cloud", "flannel", "canal", "cilium"] %} - allocate-node-cidrs: true - cluster-cidr: {{ kube_pods_subnet }} - service-cluster-ip-range: {{ kube_service_addresses }} - node-cidr-mask-size: {{ kube_network_node_prefix }} -{% endif %} {% if kube_feature_gates %} feature-gates: {{ kube_feature_gates|join(',') }} {% endif %} @@ -112,13 +80,6 @@ controllerManagerExtraArgs: {% endfor %} {% if kube_kubeadm_scheduler_extra_args|length > 0 %} schedulerExtraArgs: -{% if volume_cross_zone_attachment %} - policy-config-file: {{ kube_config_dir }}/kube-scheduler-policy.yaml -{% endif %} - profiling: "{{ kube_profiling }}" -{% if kube_feature_gates %} - feature-gates: {{ kube_feature_gates|join(',') }} -{% endif %} {% for key in kube_kubeadm_scheduler_extra_args %} {{ key }}: "{{ kube_kubeadm_scheduler_extra_args[key] }}" {% endfor %} @@ -132,31 +93,3 @@ unifiedControlPlaneImage: "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}" {% if kube_override_hostname|default('') %} nodeName: {{ kube_override_hostname }} {% endif %} -apiServerExtraVolumes: -{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} - - name: cloud-config - hostPath: {{ kube_config_dir }} - mountPath: {{ kube_config_dir }} -{% endif %} -{% if kube_basic_auth|default(true) %} -- name: basic-auth-config - hostPath: {{ kube_users_dir }} - mountPath: {{ kube_users_dir }} -{% endif %} -{% if kube_token_auth|default(true) %} -- name: token-auth-config - hostPath: {{ kube_token_dir }} - mountPath: {{ kube_token_dir }} -{% endif %} -controllerManagerExtraVolumes: -{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %} - - name: cloud-config - hostPath: {{ kube_config_dir }} - mountPath: {{ kube_config_dir }} -{% endif %} -schedulerExtraVolumes: -{% if (cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"]) or volume_cross_zone_attachment %} - - name: cloud-config - hostPath: {{ kube_config_dir }} - mountPath: {{ kube_config_dir }} -{% endif %} From 03bcfa7ff544991da1a39af34a37ef644d27af91 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Fri, 30 Mar 2018 14:29:13 +0300 Subject: [PATCH 146/177] Stop templating kube-system namespace and creating it (#2545) Kubernetes makes this namespace automatically, so there is no need for kubespray to manage it. --- inventory/sample/group_vars/k8s-cluster.yml | 1 - roles/dnsmasq/tasks/main.yml | 2 +- .../templates/dnsmasq-clusterrolebinding.yml | 4 +-- roles/dnsmasq/templates/dnsmasq-deploy.yml | 2 +- .../templates/dnsmasq-serviceaccount.yml | 2 +- roles/dnsmasq/templates/dnsmasq-svc.yml | 2 +- roles/etcd/defaults/main.yml | 6 ++-- .../ansible/tasks/cleanup_dns.yml | 8 ++--- .../ansible/tasks/dashboard.yml | 2 +- roles/kubernetes-apps/ansible/tasks/main.yml | 2 +- .../coredns-clusterrolebinding.yml.j2 | 2 +- .../ansible/templates/coredns-config.yml.j2 | 2 +- .../templates/coredns-deployment.yml.j2 | 2 +- .../ansible/templates/coredns-sa.yml.j2 | 2 +- .../ansible/templates/coredns-svc.yml.j2 | 2 +- .../ansible/templates/dashboard.yml.j2 | 16 +++++----- .../kubedns-autoscaler-clusterrole.yml.j2 | 2 +- ...bedns-autoscaler-clusterrolebinding.yml.j2 | 4 +-- .../templates/kubedns-autoscaler-sa.yml.j2 | 2 +- .../templates/kubedns-autoscaler.yml.j2 | 4 +-- .../ansible/templates/kubedns-deploy.yml.j2 | 2 +- .../ansible/templates/kubedns-sa.yml.j2 | 2 +- .../ansible/templates/kubedns-svc.yml.j2 | 2 +- .../cluster_roles/tasks/main.yml | 29 ------------------- .../cluster_roles/templates/namespace.j2 | 2 +- .../efk/elasticsearch/tasks/main.yml | 6 ++-- .../templates/efk-clusterrolebinding.yml | 4 +-- .../efk/elasticsearch/templates/efk-sa.yml | 2 +- .../templates/elasticsearch-deployment.yml.j2 | 2 +- .../templates/elasticsearch-service.yml.j2 | 2 +- .../efk/fluentd/tasks/main.yml | 2 +- .../fluentd/templates/fluentd-config.yml.j2 | 2 +- .../efk/fluentd/templates/fluentd-ds.yml.j2 | 2 +- .../kubernetes-apps/efk/kibana/tasks/main.yml | 4 +-- .../kibana/templates/kibana-deployment.yml.j2 | 2 +- .../kibana/templates/kibana-service.yml.j2 | 2 +- .../cephfs_provisioner/defaults/main.yml | 2 +- .../defaults/main.yml | 2 +- roles/kubernetes-apps/helm/tasks/main.yml | 4 +-- .../templates/tiller-clusterrolebinding.yml | 4 +-- .../helm/templates/tiller-sa.yml | 2 +- .../network_plugin/calico/tasks/main.yml | 2 +- .../network_plugin/canal/tasks/main.yml | 2 +- .../network_plugin/cilium/tasks/main.yml | 4 +-- .../network_plugin/contiv/tasks/main.yml | 2 +- .../network_plugin/flannel/tasks/main.yml | 2 +- .../network_plugin/weave/tasks/main.yml | 2 +- .../policy_controller/calico/tasks/main.yml | 4 +-- .../templates/calico-kube-controllers.yml.j2 | 4 +-- .../calico/templates/calico-kube-cr.yml.j2 | 2 +- .../calico/templates/calico-kube-crb.yml.j2 | 2 +- .../calico/templates/calico-kube-sa.yml.j2 | 2 +- .../registry/defaults/main.yml | 2 +- .../rotate_tokens/tasks/main.yml | 2 +- .../manifests/kube-apiserver.manifest.j2 | 2 +- .../kube-controller-manager.manifest.j2 | 2 +- .../manifests/kube-scheduler.manifest.j2 | 2 +- roles/kubernetes/master/vars/main.yml | 6 ---- .../manifests/kube-proxy.manifest.j2 | 2 +- .../manifests/nginx-proxy.manifest.j2 | 2 +- roles/kubespray-defaults/defaults/main.yaml | 1 - .../calico/templates/calico-config.yml.j2 | 2 +- .../calico/templates/calico-cr.yml.j2 | 2 +- .../calico/templates/calico-crb.yml.j2 | 2 +- .../calico/templates/calico-node-sa.yml.j2 | 2 +- .../calico/templates/calico-node.yml.j2 | 2 +- .../canal/templates/canal-cr-calico.yml.j2 | 2 +- .../canal/templates/canal-crb-calico.yml.j2 | 2 +- .../canal/templates/canal-crb-flannel.yml.j2 | 2 +- .../canal/templates/canal-node-sa.yml.j2 | 2 +- .../canal/templates/canal-node.yaml.j2 | 2 +- .../cilium/templates/cilium-config.yml.j2 | 2 +- .../cilium/templates/cilium-crb.yml.j2 | 2 +- .../cilium/templates/cilium-ds.yml.j2 | 2 +- .../cilium/templates/cilium-sa.yml.j2 | 2 +- .../contiv/templates/contiv-api-proxy.yml.j2 | 4 +-- .../contiv/templates/contiv-config.yml.j2 | 2 +- .../contiv/templates/contiv-etcd-proxy.yml.j2 | 2 +- .../contiv/templates/contiv-etcd.yml.j2 | 2 +- .../contiv-netmaster-clusterrole.yml.j2 | 2 +- ...contiv-netmaster-clusterrolebinding.yml.j2 | 2 +- .../contiv-netmaster-serviceaccount.yml.j2 | 2 +- .../contiv/templates/contiv-netmaster.yml.j2 | 4 +-- .../contiv-netplugin-clusterrole.yml.j2 | 2 +- ...contiv-netplugin-clusterrolebinding.yml.j2 | 2 +- .../contiv-netplugin-serviceaccount.yml.j2 | 2 +- .../contiv/templates/contiv-netplugin.yml.j2 | 2 +- .../flannel/templates/cni-flannel-rbac.yml.j2 | 4 +-- .../flannel/templates/cni-flannel.yml.j2 | 4 +-- .../weave/templates/weave-net.yml.j2 | 16 +++++----- roles/vault/defaults/main.yml | 2 +- 91 files changed, 122 insertions(+), 159 deletions(-) delete mode 100644 roles/kubernetes/master/vars/main.yml diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml index 5f4889e8b..694368954 100644 --- a/inventory/sample/group_vars/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster.yml @@ -6,7 +6,6 @@ kube_config_dir: /etc/kubernetes kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" kube_manifest_dir: "{{ kube_config_dir }}/manifests" -system_namespace: kube-system # This is where all the cert scripts and certs will be located kube_cert_dir: "{{ kube_config_dir }}/ssl" diff --git a/roles/dnsmasq/tasks/main.yml b/roles/dnsmasq/tasks/main.yml index b6574fd27..831330175 100644 --- a/roles/dnsmasq/tasks/main.yml +++ b/roles/dnsmasq/tasks/main.yml @@ -91,7 +91,7 @@ - name: Start Resources kube: name: "{{item.item.name}}" - namespace: "{{system_namespace}}" + namespace: "kube-system" kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" diff --git a/roles/dnsmasq/templates/dnsmasq-clusterrolebinding.yml b/roles/dnsmasq/templates/dnsmasq-clusterrolebinding.yml index 817de877b..0fa300989 100644 --- a/roles/dnsmasq/templates/dnsmasq-clusterrolebinding.yml +++ b/roles/dnsmasq/templates/dnsmasq-clusterrolebinding.yml @@ -3,11 +3,11 @@ kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: dnsmasq - namespace: "{{ system_namespace }}" + namespace: "kube-system" subjects: - kind: ServiceAccount name: dnsmasq - namespace: "{{ system_namespace}}" + namespace: "kube-system" roleRef: kind: ClusterRole name: cluster-admin diff --git a/roles/dnsmasq/templates/dnsmasq-deploy.yml b/roles/dnsmasq/templates/dnsmasq-deploy.yml index 838471050..0fb6045e8 100644 --- a/roles/dnsmasq/templates/dnsmasq-deploy.yml +++ b/roles/dnsmasq/templates/dnsmasq-deploy.yml @@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: dnsmasq - namespace: "{{system_namespace}}" + namespace: "kube-system" labels: k8s-app: dnsmasq kubernetes.io/cluster-service: "true" diff --git a/roles/dnsmasq/templates/dnsmasq-serviceaccount.yml b/roles/dnsmasq/templates/dnsmasq-serviceaccount.yml index bce8a232f..91e98feee 100644 --- a/roles/dnsmasq/templates/dnsmasq-serviceaccount.yml +++ b/roles/dnsmasq/templates/dnsmasq-serviceaccount.yml @@ -3,6 +3,6 @@ apiVersion: v1 kind: ServiceAccount metadata: name: dnsmasq - namespace: "{{ system_namespace }}" + namespace: "kube-system" labels: kubernetes.io/cluster-service: "true" diff --git a/roles/dnsmasq/templates/dnsmasq-svc.yml b/roles/dnsmasq/templates/dnsmasq-svc.yml index 54dc0aa97..f00d3d3dd 100644 --- a/roles/dnsmasq/templates/dnsmasq-svc.yml +++ b/roles/dnsmasq/templates/dnsmasq-svc.yml @@ -6,7 +6,7 @@ metadata: kubernetes.io/cluster-service: 'true' k8s-app: dnsmasq name: dnsmasq - namespace: {{system_namespace}} + namespace: kube-system spec: ports: - port: 53 diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml index 5f16db1d1..1268c13c7 100644 --- a/roles/etcd/defaults/main.yml +++ b/roles/etcd/defaults/main.yml @@ -12,9 +12,9 @@ etcd_cert_group: root # Note: This does not set up DNS entries. It simply adds the following DNS # entries to the certificate etcd_cert_alt_names: - - "etcd.{{ system_namespace }}.svc.{{ dns_domain }}" - - "etcd.{{ system_namespace }}.svc" - - "etcd.{{ system_namespace }}" + - "etcd.kube-system.svc.{{ dns_domain }}" + - "etcd.kube-system.svc" + - "etcd.kube-system" - "etcd" etcd_script_dir: "{{ bin_dir }}/etcd-scripts" diff --git a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml index 5f8356cf9..e77f1e799 100644 --- a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml +++ b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml @@ -2,7 +2,7 @@ - name: Kubernetes Apps | Delete old CoreDNS resources kube: name: "coredns" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{ bin_dir }}/kubectl" resource: "{{ item }}" state: absent @@ -16,7 +16,7 @@ - name: Kubernetes Apps | Delete kubeadm CoreDNS kube: name: "coredns" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{ bin_dir }}/kubectl" resource: "deploy" state: absent @@ -28,7 +28,7 @@ - name: Kubernetes Apps | Delete old KubeDNS resources kube: name: "kube-dns" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{ bin_dir }}/kubectl" resource: "{{ item }}" state: absent @@ -41,7 +41,7 @@ - name: Kubernetes Apps | Delete kubeadm KubeDNS kube: name: "kube-dns" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{ bin_dir }}/kubectl" resource: "{{ item }}" state: absent diff --git a/roles/kubernetes-apps/ansible/tasks/dashboard.yml b/roles/kubernetes-apps/ansible/tasks/dashboard.yml index ce56bd5d1..4c9ad5c74 100644 --- a/roles/kubernetes-apps/ansible/tasks/dashboard.yml +++ b/roles/kubernetes-apps/ansible/tasks/dashboard.yml @@ -22,7 +22,7 @@ - name: Kubernetes Apps | Start dashboard kube: name: "{{ item.item.name }}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{ bin_dir }}/kubectl" resource: "{{ item.item.type }}" filename: "{{ kube_config_dir }}/{{ item.item.file }}" diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index c03a78722..ceb667f69 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -37,7 +37,7 @@ - name: Kubernetes Apps | Start Resources kube: name: "{{ item.item.name }}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{ bin_dir }}/kubectl" resource: "{{ item.item.type }}" filename: "{{ kube_config_dir }}/{{ item.item.file }}" diff --git a/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2 index 6c49d047f..89becd5b4 100644 --- a/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/coredns-clusterrolebinding.yml.j2 @@ -15,4 +15,4 @@ roleRef: subjects: - kind: ServiceAccount name: coredns - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 index 983d2579f..360480c1e 100644 --- a/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 @@ -3,7 +3,7 @@ apiVersion: v1 kind: ConfigMap metadata: name: coredns - namespace: {{ system_namespace }} + namespace: kube-system labels: addonmanager.kubernetes.io/mode: EnsureExists data: diff --git a/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 index 30128d566..5cba6f1f0 100644 --- a/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/coredns-deployment.yml.j2 @@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: coredns{{ coredns_ordinal_suffix | default('') }} - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: coredns{{ coredns_ordinal_suffix | default('') }} kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2 index db5682354..64d9c4dae 100644 --- a/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/coredns-sa.yml.j2 @@ -3,7 +3,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: coredns - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile diff --git a/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 index c5b76b0b5..193de10eb 100644 --- a/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/coredns-svc.yml.j2 @@ -3,7 +3,7 @@ apiVersion: v1 kind: Service metadata: name: coredns{{ coredns_ordinal_suffix | default('') }} - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: coredns{{ coredns_ordinal_suffix | default('') }} kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 b/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 index b1ba1481d..5f0a40cb3 100644 --- a/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 @@ -25,7 +25,7 @@ metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard-certs - namespace: {{ system_namespace }} + namespace: kube-system type: Opaque --- @@ -37,7 +37,7 @@ metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard - namespace: {{ system_namespace }} + namespace: kube-system --- # ------------------- Dashboard Role & Role Binding ------------------- # @@ -46,7 +46,7 @@ kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: name: kubernetes-dashboard-minimal - namespace: {{ system_namespace }} + namespace: kube-system rules: # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret. - apiGroups: [""] @@ -81,7 +81,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: kubernetes-dashboard-minimal - namespace: {{ system_namespace }} + namespace: kube-system roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -89,7 +89,7 @@ roleRef: subjects: - kind: ServiceAccount name: kubernetes-dashboard - namespace: {{ system_namespace }} + namespace: kube-system --- # ------------------- Gross Hack For anonymous auth through api proxy ------------------- # @@ -103,7 +103,7 @@ rules: resources: ["services/proxy"] resourceNames: ["https:kubernetes-dashboard:"] verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] -- nonResourceURLs: ["/ui", "/ui/*", "/api/v1/namespaces/{{ system_namespace }}/services/https:kubernetes-dashboard:/proxy/*"] +- nonResourceURLs: ["/ui", "/ui/*", "/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/*"] verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] --- @@ -128,7 +128,7 @@ metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard - namespace: {{ system_namespace }} + namespace: kube-system spec: replicas: 1 revisionHistoryLimit: 10 @@ -200,7 +200,7 @@ metadata: labels: k8s-app: kubernetes-dashboard name: kubernetes-dashboard - namespace: {{ system_namespace }} + namespace: kube-system spec: ports: - port: 443 diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml.j2 index f80d3d90c..e29ed4dac 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml.j2 @@ -17,7 +17,7 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: cluster-proportional-autoscaler - namespace: {{ system_namespace }} + namespace: kube-system rules: - apiGroups: [""] resources: ["nodes"] diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml.j2 index eb76f2d4e..3b11c6b9f 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml.j2 @@ -17,11 +17,11 @@ kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: cluster-proportional-autoscaler - namespace: {{ system_namespace }} + namespace: kube-system subjects: - kind: ServiceAccount name: cluster-proportional-autoscaler - namespace: {{ system_namespace }} + namespace: kube-system roleRef: kind: ClusterRole name: cluster-proportional-autoscaler diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml.j2 index 542ae86ce..4c440f653 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml.j2 @@ -17,4 +17,4 @@ kind: ServiceAccount apiVersion: v1 metadata: name: cluster-proportional-autoscaler - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 index df92ee615..d7c30eceb 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 @@ -17,7 +17,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: kubedns-autoscaler - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: kubedns-autoscaler kubernetes.io/cluster-service: "true" @@ -40,7 +40,7 @@ spec: memory: "10Mi" command: - /cluster-proportional-autoscaler - - --namespace={{ system_namespace }} + - --namespace=kube-system - --configmap=kubedns-autoscaler # Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base - --target=Deployment/kube-dns diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 index 682bdf491..cfce65f0e 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 @@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: kube-dns - namespace: "{{system_namespace}}" + namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml.j2 index f399fd6f4..296a3a938 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml.j2 @@ -3,6 +3,6 @@ apiVersion: v1 kind: ServiceAccount metadata: name: kube-dns - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml.j2 index 1c4710db1..6bc5f9240 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml.j2 @@ -3,7 +3,7 @@ apiVersion: v1 kind: Service metadata: name: kube-dns - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml index c576586a2..fefa7caeb 100644 --- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml +++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml @@ -126,32 +126,3 @@ - kube_version | version_compare('v1.9.3', '<=') - inventory_hostname == groups['kube-master'][0] tags: vsphere - -# This is not a cluster role, but should be run after kubeconfig is set on master -- name: Write kube system namespace manifest - template: - src: namespace.j2 - dest: "{{kube_config_dir}}/{{system_namespace}}-ns.yml" - when: inventory_hostname == groups['kube-master'][0] - tags: - - apps - -- name: Check if kube system namespace exists - command: "{{ bin_dir }}/kubectl get ns {{system_namespace}}" - register: 'kubesystem' - changed_when: False - failed_when: False - when: inventory_hostname == groups['kube-master'][0] - tags: - - apps - -- name: Create kube system namespace - command: "{{ bin_dir }}/kubectl create -f {{kube_config_dir}}/{{system_namespace}}-ns.yml" - retries: 4 - delay: "{{ retry_stagger | random + 3 }}" - register: create_system_ns - until: create_system_ns.rc == 0 - changed_when: False - when: inventory_hostname == groups['kube-master'][0] and kubesystem.rc != 0 - tags: - - apps diff --git a/roles/kubernetes-apps/cluster_roles/templates/namespace.j2 b/roles/kubernetes-apps/cluster_roles/templates/namespace.j2 index 9bdf201a2..f2e115a6a 100644 --- a/roles/kubernetes-apps/cluster_roles/templates/namespace.j2 +++ b/roles/kubernetes-apps/cluster_roles/templates/namespace.j2 @@ -1,4 +1,4 @@ apiVersion: v1 kind: Namespace metadata: - name: "{{system_namespace}}" + name: "kube-system" diff --git a/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml b/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml index 8abbe2317..b6055132b 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml +++ b/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml @@ -10,7 +10,7 @@ when: rbac_enabled - name: "ElasticSearch | Create Serviceaccount and Clusterrolebinding (RBAC)" - command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }} -n {{ system_namespace }}" + command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }} -n kube-system" with_items: - "efk-sa.yml" - "efk-clusterrolebinding.yml" @@ -24,7 +24,7 @@ register: es_deployment_manifest - name: "ElasticSearch | Create ES deployment" - command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-deployment.yaml -n {{ system_namespace }}" + command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-deployment.yaml -n kube-system" run_once: true when: es_deployment_manifest.changed @@ -35,6 +35,6 @@ register: es_service_manifest - name: "ElasticSearch | Create ES service" - command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-service.yaml -n {{ system_namespace }}" + command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-service.yaml -n kube-system" run_once: true when: es_service_manifest.changed diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml index a5aba61ae..dd5b9b630 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml +++ b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml @@ -3,11 +3,11 @@ kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: efk - namespace: {{ system_namespace }} + namespace: kube-system subjects: - kind: ServiceAccount name: efk - namespace: {{ system_namespace }} + namespace: kube-system roleRef: kind: ClusterRole name: cluster-admin diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml index e79e26be8..75d75f650 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml +++ b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml @@ -3,6 +3,6 @@ apiVersion: v1 kind: ServiceAccount metadata: name: efk - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2 b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2 index 6d5382e09..ee2eb8b21 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2 +++ b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-deployment.yml.j2 @@ -4,7 +4,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: elasticsearch-logging-v1 - namespace: "{{ system_namespace }}" + namespace: kube-system labels: k8s-app: elasticsearch-logging version: "{{ elasticsearch_image_tag }}" diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-service.yml.j2 b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-service.yml.j2 index b7558f9d9..789ecb215 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-service.yml.j2 +++ b/roles/kubernetes-apps/efk/elasticsearch/templates/elasticsearch-service.yml.j2 @@ -3,7 +3,7 @@ apiVersion: v1 kind: Service metadata: name: elasticsearch-logging - namespace: "{{ system_namespace }}" + namespace: "kube-system" labels: k8s-app: elasticsearch-logging kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/efk/fluentd/tasks/main.yml b/roles/kubernetes-apps/efk/fluentd/tasks/main.yml index c91bf6827..f444c79b6 100644 --- a/roles/kubernetes-apps/efk/fluentd/tasks/main.yml +++ b/roles/kubernetes-apps/efk/fluentd/tasks/main.yml @@ -17,6 +17,6 @@ register: fluentd_ds_manifest - name: "Fluentd | Create fluentd daemonset" - command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/fluentd-ds.yaml -n {{ system_namespace }}" + command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/fluentd-ds.yaml -n kube-system" run_once: true when: fluentd_ds_manifest.changed diff --git a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2 b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2 index 8a8ebbcec..b7de44dc0 100644 --- a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2 +++ b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-config.yml.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: ConfigMap metadata: name: fluentd-config - namespace: "{{ system_namespace }}" + namespace: "kube-system" data: {{ fluentd_config_file }}: | # This configuration file for Fluentd / td-agent is used diff --git a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 index 960a79e89..f23a8851c 100644 --- a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 +++ b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 @@ -4,7 +4,7 @@ apiVersion: extensions/v1beta1 kind: DaemonSet metadata: name: "fluentd-es-v{{ fluentd_version }}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" labels: k8s-app: fluentd-es kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/efk/kibana/tasks/main.yml b/roles/kubernetes-apps/efk/kibana/tasks/main.yml index ea8568286..424b313b8 100644 --- a/roles/kubernetes-apps/efk/kibana/tasks/main.yml +++ b/roles/kubernetes-apps/efk/kibana/tasks/main.yml @@ -10,7 +10,7 @@ filename: "{{kube_config_dir}}/kibana-deployment.yaml" kubectl: "{{bin_dir}}/kubectl" name: "kibana-logging" - namespace: "{{system_namespace}}" + namespace: "kube-system" resource: "deployment" state: "latest" with_items: "{{ kibana_deployment_manifest.changed }}" @@ -27,7 +27,7 @@ filename: "{{kube_config_dir}}/kibana-service.yaml" kubectl: "{{bin_dir}}/kubectl" name: "kibana-logging" - namespace: "{{system_namespace}}" + namespace: "kube-system" resource: "svc" state: "latest" with_items: "{{ kibana_service_manifest.changed }}" diff --git a/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2 b/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2 index c48413bd0..4fdf54c04 100644 --- a/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2 +++ b/roles/kubernetes-apps/efk/kibana/templates/kibana-deployment.yml.j2 @@ -4,7 +4,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: kibana-logging - namespace: "{{ system_namespace }}" + namespace: "kube-system" labels: k8s-app: kibana-logging kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/efk/kibana/templates/kibana-service.yml.j2 b/roles/kubernetes-apps/efk/kibana/templates/kibana-service.yml.j2 index 241b896f0..5cff3c628 100644 --- a/roles/kubernetes-apps/efk/kibana/templates/kibana-service.yml.j2 +++ b/roles/kubernetes-apps/efk/kibana/templates/kibana-service.yml.j2 @@ -3,7 +3,7 @@ apiVersion: v1 kind: Service metadata: name: kibana-logging - namespace: "{{ system_namespace }}" + namespace: "kube-system" labels: k8s-app: kibana-logging kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml index 9a3bca1ef..3b80ecbb2 100644 --- a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml +++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml @@ -2,7 +2,7 @@ cephfs_provisioner_image_repo: quay.io/kubespray/cephfs-provisioner cephfs_provisioner_image_tag: 92295a30 -cephfs_provisioner_namespace: "{{ system_namespace }}" +cephfs_provisioner_namespace: "kube-system" cephfs_provisioner_cluster: ceph cephfs_provisioner_monitors: [] cephfs_provisioner_admin_id: admin diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml index dd2e8a147..ea5dcb079 100644 --- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml @@ -2,7 +2,7 @@ local_volume_provisioner_image_repo: quay.io/external_storage/local-volume-provisioner local_volume_provisioner_image_tag: v2.0.0 -local_volume_provisioner_namespace: "{{ system_namespace }}" +local_volume_provisioner_namespace: "kube-system" local_volume_provisioner_base_dir: /mnt/disks local_volume_provisioner_mount_dir: /mnt/disks local_volume_provisioner_storage_class: local-storage diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml index 06e97aff2..e7b387944 100644 --- a/roles/kubernetes-apps/helm/tasks/main.yml +++ b/roles/kubernetes-apps/helm/tasks/main.yml @@ -18,7 +18,7 @@ - name: Helm | Apply Helm Manifests (RBAC) kube: name: "{{item.item.name}}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" @@ -28,7 +28,7 @@ - name: Helm | Install/upgrade helm command: > - {{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }} --tiller-namespace={{ system_namespace }} + {{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }} --tiller-namespace=kube-system {% if helm_skip_refresh %} --skip-refresh{% endif %} {% if helm_stable_repo_url is defined %} --stable-repo-url {{ helm_stable_repo_url }}{% endif %} {% if rbac_enabled %} --service-account=tiller{% endif %} diff --git a/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml b/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml index 0c8db4c78..00694181e 100644 --- a/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml +++ b/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml @@ -3,11 +3,11 @@ kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: tiller - namespace: {{ system_namespace }} + namespace: kube-system subjects: - kind: ServiceAccount name: tiller - namespace: {{ system_namespace }} + namespace: kube-system roleRef: kind: ClusterRole name: cluster-admin diff --git a/roles/kubernetes-apps/helm/templates/tiller-sa.yml b/roles/kubernetes-apps/helm/templates/tiller-sa.yml index 26e575fb6..606dbb147 100644 --- a/roles/kubernetes-apps/helm/templates/tiller-sa.yml +++ b/roles/kubernetes-apps/helm/templates/tiller-sa.yml @@ -3,6 +3,6 @@ apiVersion: v1 kind: ServiceAccount metadata: name: tiller - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml index f17e45c7a..4c8295c1e 100644 --- a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml @@ -2,7 +2,7 @@ - name: Start Calico resources kube: name: "{{item.item.name}}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" diff --git a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml index cbe4f0ac7..3640fe762 100644 --- a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml @@ -2,7 +2,7 @@ - name: Canal | Start Resources kube: name: "{{item.item.name}}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" diff --git a/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml b/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml index 2359fe2d4..5d90bdb01 100755 --- a/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/cilium/tasks/main.yml @@ -2,7 +2,7 @@ - name: Cilium | Start Resources kube: name: "{{item.item.name}}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" @@ -11,7 +11,7 @@ when: inventory_hostname == groups['kube-master'][0] and not item|skipped - name: Cilium | Wait for pods to run - command: "{{bin_dir}}/kubectl -n {{system_namespace}} get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" + command: "{{bin_dir}}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" register: pods_not_ready until: pods_not_ready.stdout.find("cilium")==-1 retries: 30 diff --git a/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml b/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml index 330acc1cd..5289296dc 100644 --- a/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml @@ -3,7 +3,7 @@ - name: Contiv | Create Kubernetes resources kube: name: "{{ item.item.name }}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{ bin_dir }}/kubectl" resource: "{{ item.item.type }}" filename: "{{ contiv_config_dir }}/{{ item.item.file }}" diff --git a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml index 09603a794..bdf954bf9 100644 --- a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml @@ -2,7 +2,7 @@ - name: Flannel | Start Resources kube: name: "{{item.item.name}}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" diff --git a/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml b/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml index 66d900d55..53ad953b5 100644 --- a/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml @@ -5,7 +5,7 @@ kubectl: "{{ bin_dir }}/kubectl" filename: "{{ kube_config_dir }}/weave-net.yml" resource: "ds" - namespace: "{{system_namespace}}" + namespace: "kube-system" state: "latest" when: inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml index ba1162799..62e929f41 100644 --- a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml +++ b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml @@ -12,7 +12,7 @@ name: calico-policy-controller kubectl: "{{bin_dir}}/kubectl" resource: rs - namespace: "{{ system_namespace }}" + namespace: "kube-system" state: absent run_once: true @@ -32,7 +32,7 @@ - name: Start of Calico kube controllers kube: name: "{{item.item.name}}" - namespace: "{{ system_namespace }}" + namespace: "kube-system" kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 index 7e1311b92..d7083e3e6 100644 --- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 @@ -2,7 +2,7 @@ apiVersion: apps/v1beta2 kind: Deployment metadata: name: calico-kube-controllers - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: calico-kube-controllers kubernetes.io/cluster-service: "true" @@ -15,7 +15,7 @@ spec: template: metadata: name: calico-kube-controllers - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" k8s-app: calico-kube-controllers diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2 index 82c2f3e44..d05e986a4 100644 --- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2 +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-cr.yml.j2 @@ -3,7 +3,7 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: calico-kube-controllers - namespace: {{ system_namespace }} + namespace: kube-system rules: - apiGroups: - "" diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2 index 38853a413..2e5118481 100644 --- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2 +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-crb.yml.j2 @@ -10,4 +10,4 @@ roleRef: subjects: - kind: ServiceAccount name: calico-kube-controllers - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2 index bf8958976..e42e89d18 100644 --- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2 +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-sa.yml.j2 @@ -3,6 +3,6 @@ apiVersion: v1 kind: ServiceAccount metadata: name: calico-kube-controllers - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes-apps/registry/defaults/main.yml b/roles/kubernetes-apps/registry/defaults/main.yml index 93d1cfa2a..a626435d5 100644 --- a/roles/kubernetes-apps/registry/defaults/main.yml +++ b/roles/kubernetes-apps/registry/defaults/main.yml @@ -4,6 +4,6 @@ registry_image_tag: 2.6 registry_proxy_image_repo: gcr.io/google_containers/kube-registry-proxy registry_proxy_image_tag: 0.4 -registry_namespace: "{{ system_namespace }}" +registry_namespace: "kube-system" registry_storage_class: "" registry_disk_size: "10Gi" diff --git a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml index 52101ae16..3884a3a65 100644 --- a/roles/kubernetes-apps/rotate_tokens/tasks/main.yml +++ b/roles/kubernetes-apps/rotate_tokens/tasks/main.yml @@ -44,5 +44,5 @@ when: needs_rotation - name: Rotate Tokens | Delete pods in system namespace - command: "{{ bin_dir }}/kubectl delete pods -n {{ system_namespace }} --all" + command: "{{ bin_dir }}/kubectl delete pods -n kube-system --all" when: needs_rotation diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 350a27a18..0a4e3e661 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: kube-apiserver - namespace: {{system_namespace}} + namespace: kube-system labels: k8s-app: kube-apiserver kubespray: v2 diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 index 2b4282a2e..99eef9b39 100644 --- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: kube-controller-manager - namespace: {{system_namespace}} + namespace: kube-system labels: k8s-app: kube-controller-manager annotations: diff --git a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 index b13fc7fa3..a4023365e 100644 --- a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: kube-scheduler - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: kube-scheduler annotations: diff --git a/roles/kubernetes/master/vars/main.yml b/roles/kubernetes/master/vars/main.yml deleted file mode 100644 index a5eba4f2b..000000000 --- a/roles/kubernetes/master/vars/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -namespace_kubesystem: - apiVersion: v1 - kind: Namespace - metadata: - name: "{{system_namespace}}" diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 index 57c2269a9..18e51069f 100644 --- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: kube-proxy - namespace: {{system_namespace}} + namespace: kube-system labels: k8s-app: kube-proxy annotations: diff --git a/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 index 2d566cad1..a1e9a7815 100644 --- a/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/nginx-proxy.manifest.j2 @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: name: nginx-proxy - namespace: {{system_namespace}} + namespace: kube-system labels: k8s-app: kube-nginx spec: diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index 4828de6af..f2c5dcd04 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -61,7 +61,6 @@ dns_domain: "{{ cluster_name }}" kube_config_dir: /etc/kubernetes kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" kube_manifest_dir: "{{ kube_config_dir }}/manifests" -system_namespace: kube-system # This is where all the cert scripts and certs will be located kube_cert_dir: "{{ kube_config_dir }}/ssl" diff --git a/roles/network_plugin/calico/templates/calico-config.yml.j2 b/roles/network_plugin/calico/templates/calico-config.yml.j2 index 92d2f1f0a..3be65deaa 100644 --- a/roles/network_plugin/calico/templates/calico-config.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-config.yml.j2 @@ -2,7 +2,7 @@ kind: ConfigMap apiVersion: v1 metadata: name: calico-config - namespace: {{ system_namespace }} + namespace: kube-system data: etcd_endpoints: "{{ etcd_access_addresses }}" etcd_ca: "/calico-secrets/ca_cert.crt" diff --git a/roles/network_plugin/calico/templates/calico-cr.yml.j2 b/roles/network_plugin/calico/templates/calico-cr.yml.j2 index 47d626659..cef8331f3 100644 --- a/roles/network_plugin/calico/templates/calico-cr.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-cr.yml.j2 @@ -3,7 +3,7 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: calico-node - namespace: {{ system_namespace }} + namespace: kube-system rules: - apiGroups: [""] resources: diff --git a/roles/network_plugin/calico/templates/calico-crb.yml.j2 b/roles/network_plugin/calico/templates/calico-crb.yml.j2 index 2e132a0dc..1b4e8fe00 100644 --- a/roles/network_plugin/calico/templates/calico-crb.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-crb.yml.j2 @@ -10,4 +10,4 @@ roleRef: subjects: - kind: ServiceAccount name: calico-node - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 b/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 index 5cce29793..68b1c286f 100644 --- a/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 @@ -3,6 +3,6 @@ apiVersion: v1 kind: ServiceAccount metadata: name: calico-node - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2 index 6ec3cd20b..849ea0afb 100644 --- a/roles/network_plugin/calico/templates/calico-node.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-node.yml.j2 @@ -6,7 +6,7 @@ kind: DaemonSet apiVersion: extensions/v1beta1 metadata: name: calico-node - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: calico-node spec: diff --git a/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2 b/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2 index e3b048c64..2e92b7b2b 100644 --- a/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2 +++ b/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2 @@ -3,7 +3,7 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: calico - namespace: {{ system_namespace }} + namespace: kube-system rules: - apiGroups: [""] resources: diff --git a/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 b/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 index e1c1f5050..016e5193e 100644 --- a/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 +++ b/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 @@ -11,4 +11,4 @@ roleRef: subjects: - kind: ServiceAccount name: canal - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 b/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 index 3b00017b1..097b1538e 100644 --- a/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 +++ b/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 @@ -11,4 +11,4 @@ roleRef: subjects: - kind: ServiceAccount name: canal - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 b/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 index d5b9a6e97..aa168d15c 100644 --- a/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 +++ b/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 @@ -3,7 +3,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: canal - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" diff --git a/roles/network_plugin/canal/templates/canal-node.yaml.j2 b/roles/network_plugin/canal/templates/canal-node.yaml.j2 index d63bf99b0..8535360a1 100644 --- a/roles/network_plugin/canal/templates/canal-node.yaml.j2 +++ b/roles/network_plugin/canal/templates/canal-node.yaml.j2 @@ -3,7 +3,7 @@ kind: DaemonSet apiVersion: extensions/v1beta1 metadata: name: canal-node - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: canal-node spec: diff --git a/roles/network_plugin/cilium/templates/cilium-config.yml.j2 b/roles/network_plugin/cilium/templates/cilium-config.yml.j2 index a96bb8531..c5051e2ca 100755 --- a/roles/network_plugin/cilium/templates/cilium-config.yml.j2 +++ b/roles/network_plugin/cilium/templates/cilium-config.yml.j2 @@ -2,7 +2,7 @@ kind: ConfigMap apiVersion: v1 metadata: name: cilium-config - namespace: {{ system_namespace }} + namespace: kube-system data: # This etcd-config contains the etcd endpoints of your cluster. If you use # TLS please make sure you uncomment the ca-file line and add the respective diff --git a/roles/network_plugin/cilium/templates/cilium-crb.yml.j2 b/roles/network_plugin/cilium/templates/cilium-crb.yml.j2 index dcfe4d471..04d603d57 100755 --- a/roles/network_plugin/cilium/templates/cilium-crb.yml.j2 +++ b/roles/network_plugin/cilium/templates/cilium-crb.yml.j2 @@ -10,6 +10,6 @@ roleRef: subjects: - kind: ServiceAccount name: cilium - namespace: {{ system_namespace }} + namespace: kube-system - kind: Group name: system:nodes diff --git a/roles/network_plugin/cilium/templates/cilium-ds.yml.j2 b/roles/network_plugin/cilium/templates/cilium-ds.yml.j2 index 3d877a5cb..8eaa24f32 100755 --- a/roles/network_plugin/cilium/templates/cilium-ds.yml.j2 +++ b/roles/network_plugin/cilium/templates/cilium-ds.yml.j2 @@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1 kind: DaemonSet metadata: name: cilium - namespace: {{ system_namespace }} + namespace: kube-system spec: template: metadata: diff --git a/roles/network_plugin/cilium/templates/cilium-sa.yml.j2 b/roles/network_plugin/cilium/templates/cilium-sa.yml.j2 index d6ef2a431..c03ac59b4 100755 --- a/roles/network_plugin/cilium/templates/cilium-sa.yml.j2 +++ b/roles/network_plugin/cilium/templates/cilium-sa.yml.j2 @@ -3,4 +3,4 @@ apiVersion: v1 kind: ServiceAccount metadata: name: cilium - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2 b/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2 index 140379b13..3ccaffaf8 100644 --- a/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2 @@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1 kind: DaemonSet metadata: name: contiv-api-proxy - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: contiv-api-proxy spec: @@ -12,7 +12,7 @@ spec: template: metadata: name: contiv-api-proxy - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: contiv-api-proxy annotations: diff --git a/roles/network_plugin/contiv/templates/contiv-config.yml.j2 b/roles/network_plugin/contiv/templates/contiv-config.yml.j2 index 0505cd1f1..249d9d88e 100644 --- a/roles/network_plugin/contiv/templates/contiv-config.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-config.yml.j2 @@ -5,7 +5,7 @@ kind: ConfigMap apiVersion: v1 metadata: name: contiv-config - namespace: {{ system_namespace }} + namespace: kube-system data: # The location of your cluster store. This is set to the # avdertise-client value below from the contiv-etcd service. diff --git a/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2 b/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2 index a9690cc2f..75946d821 100644 --- a/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2 @@ -3,7 +3,7 @@ kind: DaemonSet apiVersion: extensions/v1beta1 metadata: name: contiv-etcd-proxy - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: contiv-etcd-proxy spec: diff --git a/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2 b/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2 index 8060f4c01..a6e9121d4 100644 --- a/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-etcd.yml.j2 @@ -3,7 +3,7 @@ kind: DaemonSet apiVersion: extensions/v1beta1 metadata: name: contiv-etcd - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: contiv-etcd spec: diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2 index 82ca00437..6ccd4f9b4 100644 --- a/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2 @@ -2,7 +2,7 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: contiv-netmaster - namespace: {{ system_namespace }} + namespace: kube-system rules: - apiGroups: - "" diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrolebinding.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrolebinding.yml.j2 index 74c5e3145..73d636775 100644 --- a/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrolebinding.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrolebinding.yml.j2 @@ -9,4 +9,4 @@ roleRef: subjects: - kind: ServiceAccount name: contiv-netmaster - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster-serviceaccount.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster-serviceaccount.yml.j2 index 0c1bfb3e5..758ea4493 100644 --- a/roles/network_plugin/contiv/templates/contiv-netmaster-serviceaccount.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netmaster-serviceaccount.yml.j2 @@ -2,6 +2,6 @@ apiVersion: v1 kind: ServiceAccount metadata: name: contiv-netmaster - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2 index 56be2d93d..d41259ec1 100644 --- a/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2 @@ -3,7 +3,7 @@ kind: DaemonSet apiVersion: extensions/v1beta1 metadata: name: contiv-netmaster - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: contiv-netmaster spec: @@ -12,7 +12,7 @@ spec: template: metadata: name: contiv-netmaster - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: contiv-netmaster annotations: diff --git a/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrole.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrole.yml.j2 index c26e094ed..af4c6e584 100644 --- a/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrole.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrole.yml.j2 @@ -2,7 +2,7 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: contiv-netplugin - namespace: {{ system_namespace }} + namespace: kube-system rules: - apiGroups: - "" diff --git a/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrolebinding.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrolebinding.yml.j2 index 0c989008a..6cac217fc 100644 --- a/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrolebinding.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netplugin-clusterrolebinding.yml.j2 @@ -9,4 +9,4 @@ roleRef: subjects: - kind: ServiceAccount name: contiv-netplugin - namespace: {{ system_namespace }} + namespace: kube-system diff --git a/roles/network_plugin/contiv/templates/contiv-netplugin-serviceaccount.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netplugin-serviceaccount.yml.j2 index edfac8bb3..8d00ec8cb 100644 --- a/roles/network_plugin/contiv/templates/contiv-netplugin-serviceaccount.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netplugin-serviceaccount.yml.j2 @@ -2,6 +2,6 @@ apiVersion: v1 kind: ServiceAccount metadata: name: contiv-netplugin - namespace: {{ system_namespace }} + namespace: kube-system labels: kubernetes.io/cluster-service: "true" diff --git a/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2 index 9c2c0a036..2a7bf71cb 100644 --- a/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2 @@ -5,7 +5,7 @@ kind: DaemonSet apiVersion: extensions/v1beta1 metadata: name: contiv-netplugin - namespace: {{ system_namespace }} + namespace: kube-system labels: k8s-app: contiv-netplugin spec: diff --git a/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 index aafe2a0f5..6f5c9a211 100644 --- a/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 +++ b/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 @@ -3,7 +3,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: flannel - namespace: "{{system_namespace}}" + namespace: "kube-system" --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 @@ -41,4 +41,4 @@ roleRef: subjects: - kind: ServiceAccount name: flannel - namespace: "{{system_namespace}}" \ No newline at end of file + namespace: "kube-system" \ No newline at end of file diff --git a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 index bb2a6a7f8..7ecb21ad0 100644 --- a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 +++ b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 @@ -3,7 +3,7 @@ kind: ConfigMap apiVersion: v1 metadata: name: kube-flannel-cfg - namespace: "{{system_namespace}}" + namespace: "kube-system" labels: tier: node app: flannel @@ -41,7 +41,7 @@ apiVersion: extensions/v1beta1 kind: DaemonSet metadata: name: kube-flannel - namespace: "{{system_namespace}}" + namespace: "kube-system" labels: tier: node k8s-app: flannel diff --git a/roles/network_plugin/weave/templates/weave-net.yml.j2 b/roles/network_plugin/weave/templates/weave-net.yml.j2 index 699ba3128..9a7da7377 100644 --- a/roles/network_plugin/weave/templates/weave-net.yml.j2 +++ b/roles/network_plugin/weave/templates/weave-net.yml.j2 @@ -8,14 +8,14 @@ items: name: weave-net labels: name: weave-net - namespace: {{ system_namespace }} + namespace: kube-system - apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: name: weave-net labels: name: weave-net - namespace: {{ system_namespace }} + namespace: kube-system rules: - apiGroups: - '' @@ -41,7 +41,7 @@ items: name: weave-net labels: name: weave-net - namespace: {{ system_namespace }} + namespace: kube-system roleRef: kind: ClusterRole name: weave-net @@ -49,14 +49,14 @@ items: subjects: - kind: ServiceAccount name: weave-net - namespace: {{ system_namespace }} + namespace: kube-system - apiVersion: rbac.authorization.k8s.io/v1beta1 kind: Role metadata: name: weave-net labels: name: weave-net - namespace: {{ system_namespace }} + namespace: kube-system rules: - apiGroups: - '' @@ -79,7 +79,7 @@ items: name: weave-net labels: name: weave-net - namespace: {{ system_namespace }} + namespace: kube-system roleRef: kind: Role name: weave-net @@ -87,7 +87,7 @@ items: subjects: - kind: ServiceAccount name: weave-net - namespace: {{ system_namespace }} + namespace: kube-system - apiVersion: extensions/v1beta1 kind: DaemonSet metadata: @@ -95,7 +95,7 @@ items: labels: name: weave-net version: v{{ weave_version }} - namespace: {{ system_namespace }} + namespace: kube-system spec: minReadySeconds: 5 template: diff --git a/roles/vault/defaults/main.yml b/roles/vault/defaults/main.yml index 9a3e83035..8e5ad08a0 100644 --- a/roles/vault/defaults/main.yml +++ b/roles/vault/defaults/main.yml @@ -86,7 +86,7 @@ vault_ca_options: format: pem ttl: "{{ vault_max_lease_ttl }}" exclude_cn_from_sans: true - alt_names: "vault.{{ system_namespace }}.svc.{{ dns_domain }},vault.{{ system_namespace }}.svc,vault.{{ system_namespace }},vault" + alt_names: "vault.kube-system.svc.{{ dns_domain }},vault.kube-system.svc,vault.kube-system,vault" etcd: common_name: etcd format: pem From 13c57147ebc8e0ae123ca0a979a12b9566aaaf1d Mon Sep 17 00:00:00 2001 From: Spencer Smith Date: Fri, 30 Mar 2018 09:48:55 -0400 Subject: [PATCH 147/177] only set no_proxy if other proxy vars are defined --- roles/kubespray-defaults/defaults/main.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index 4828de6af..47ff298fe 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -241,6 +241,7 @@ weave_peers: uninitialized ## Set no_proxy to all assigned cluster IPs and hostnames no_proxy: >- + {%- if http_proxy is defined or https_proxy is defined %} {%- if loadbalancer_apiserver is defined -%} {{ apiserver_loadbalancer_domain_name| default('') }}, {{ loadbalancer_apiserver.address | default('') }}, @@ -254,11 +255,12 @@ no_proxy: >- {{ item }},{{ item }}.{{ dns_domain }}, {%- endfor -%} 127.0.0.1,localhost + {%- endif %} proxy_env: http_proxy: "{{ http_proxy| default ('') }}" https_proxy: "{{ https_proxy| default ('') }}" - no_proxy: "{{ no_proxy }}" + no_proxy: "{{ no_proxy| default ('') }}" # Vars for pointing to kubernetes api endpoints is_kube_master: "{{ inventory_hostname in groups['kube-master'] }}" From 72c2a8982b01c09796214fc29866c4fccf281170 Mon Sep 17 00:00:00 2001 From: avoidik Date: Fri, 30 Mar 2018 17:24:50 +0300 Subject: [PATCH 148/177] Fix kubecert_node.results indexes --- roles/kubernetes/secrets/tasks/check-certs.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/kubernetes/secrets/tasks/check-certs.yml b/roles/kubernetes/secrets/tasks/check-certs.yml index 627889771..4780b14d6 100644 --- a/roles/kubernetes/secrets/tasks/check-certs.yml +++ b/roles/kubernetes/secrets/tasks/check-certs.yml @@ -105,9 +105,9 @@ {%- set certs = {'sync': False} -%} {% if gen_node_certs[inventory_hostname] or (not kubecert_node.results[0].stat.exists|default(False)) or - (not kubecert_node.results[10].stat.exists|default(False)) or - (not kubecert_node.results[7].stat.exists|default(False)) or - (kubecert_node.results[10].stat.checksum|default('') != kubecert_master.files|selectattr("path", "equalto", kubecert_node.results[10].stat.path)|map(attribute="checksum")|first|default('')) -%} + (not kubecert_node.results[12].stat.exists|default(False)) or + (not kubecert_node.results[8].stat.exists|default(False)) or + (kubecert_node.results[12].stat.checksum|default('') != kubecert_master.files|selectattr("path", "equalto", kubecert_node.results[12].stat.path)|map(attribute="checksum")|first|default('')) -%} {%- set _ = certs.update({'sync': True}) -%} {% endif %} {{ certs.sync }} From e296ccb4d00db2de9bfeef3b7da91482e5e62e28 Mon Sep 17 00:00:00 2001 From: Spencer Smith Date: Fri, 30 Mar 2018 12:31:38 -0400 Subject: [PATCH 149/177] include do extension for jinja --- ansible.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/ansible.cfg b/ansible.cfg index d3102a6f4..6f381690e 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -13,3 +13,4 @@ callback_whitelist = profile_tasks roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles deprecation_warnings=False inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds +jinja2_extensions = jinja2.ext.do From 572ab650dbd2f091e6d8a7c6321b1f04f1825222 Mon Sep 17 00:00:00 2001 From: georgejdli Date: Fri, 30 Mar 2018 13:00:01 -0500 Subject: [PATCH 150/177] copy dedicated service account token signing key for kubeadm migration --- roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml b/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml index a9f938318..58eaaa66f 100644 --- a/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml +++ b/roles/kubernetes/master/tasks/kubeadm-migrate-certs.yml @@ -9,4 +9,6 @@ - {src: apiserver-key.pem, dest: apiserver.key} - {src: ca.pem, dest: ca.crt} - {src: ca-key.pem, dest: ca.key} + - {src: service-account-key.pem, dest: sa.pub} + - {src: service-account-key.pem, dest: sa.key} register: kubeadm_copy_old_certs From 859a7f32fb361646a4ae1cfa037aad7ba957dc5a Mon Sep 17 00:00:00 2001 From: woopstar Date: Sat, 31 Mar 2018 00:06:32 +0200 Subject: [PATCH 151/177] Fix import task. Has to be include task to evalutate etcd_cluster_setup variable at run time --- roles/etcd/tasks/main.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index bb299126b..a64d9b097 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -29,13 +29,13 @@ tags: - upgrade -- import_tasks: set_cluster_health.yml +- include_tasks: set_cluster_health.yml when: is_etcd_master and etcd_cluster_setup -- import_tasks: configure.yml +- include_tasks: configure.yml when: is_etcd_master and etcd_cluster_setup -- import_tasks: refresh_config.yml +- include_tasks: refresh_config.yml when: is_etcd_master and etcd_cluster_setup - name: Restart etcd if certs changed @@ -68,8 +68,8 @@ # After etcd cluster is assembled, make sure that # initial state of the cluster is in `existing` # state insted of `new`. -- import_tasks: set_cluster_health.yml +- include_tasks: set_cluster_health.yml when: is_etcd_master and etcd_cluster_setup -- import_tasks: refresh_config.yml +- include_tasks: refresh_config.yml when: is_etcd_master and etcd_cluster_setup From 8ece922ef093ebf09d1b21a9ba99e79b054aee18 Mon Sep 17 00:00:00 2001 From: Erwan Miran Date: Sat, 31 Mar 2018 00:30:42 +0200 Subject: [PATCH 152/177] node_labels documentation + kube-ingress label handling as role_node_label --- docs/vars.md | 8 +++++++- roles/kubernetes/node/templates/kubelet.standard.env.j2 | 2 +- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/docs/vars.md b/docs/vars.md index f4956c882..a4ae65678 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -119,7 +119,13 @@ Stack](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/dns-st cgroup-driver option for Kubelet. By default autodetection is used to match Docker configuration. * *node_labels* - Labels applied to nodes via kubelet --node-labels parameter. - For example, labels can be set in the inventory as variables or more widely in group_vars + For example, labels can be set in the inventory as variables or more widely in group_vars. + *node_labels* must be defined as a dict: +``` +node_labels: + label1_name: label1_value + label2_name: label2_value +``` ##### Custom flags for Kube Components For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. This can be done by providing a list of flags. Example: diff --git a/roles/kubernetes/node/templates/kubelet.standard.env.j2 b/roles/kubernetes/node/templates/kubelet.standard.env.j2 index 50a5441e0..cd48fca9c 100644 --- a/roles/kubernetes/node/templates/kubelet.standard.env.j2 +++ b/roles/kubernetes/node/templates/kubelet.standard.env.j2 @@ -88,7 +88,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" {% do role_node_labels.append('node-role.kubernetes.io/node=true') %} {% endif %} {% elif inventory_hostname in groups['kube-ingress']|default([]) %} -{% set node_labels %}--node-labels=node-role.kubernetes.io/ingress=true{% endset %} +{% do role_node_labels.append('node-role.kubernetes.io/ingress=true') %} {% else %} {% do role_node_labels.append('node-role.kubernetes.io/node=true') %} {% endif %} From 26caad4f12836ebee1a1202f8bd25e3f5ed9b2b0 Mon Sep 17 00:00:00 2001 From: avoidik Date: Sat, 31 Mar 2018 02:38:01 +0300 Subject: [PATCH 153/177] Allow ansible_ssh_private_key_file for Openstack --- contrib/terraform/openstack/ansible_bastion_template.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/terraform/openstack/ansible_bastion_template.txt b/contrib/terraform/openstack/ansible_bastion_template.txt index cdf012066..a304b2c9d 100644 --- a/contrib/terraform/openstack/ansible_bastion_template.txt +++ b/contrib/terraform/openstack/ansible_bastion_template.txt @@ -1 +1 @@ -ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@BASTION_ADDRESS"' +ansible_ssh_common_args: "-o ProxyCommand='ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p -q USER@BASTION_ADDRESS {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}'" From 3c12c6beb35e0f115e8eccdf0732b54614314a67 Mon Sep 17 00:00:00 2001 From: avoidik Date: Sat, 31 Mar 2018 02:59:59 +0300 Subject: [PATCH 154/177] Move cloud config configurations to proper location --- .../{preinstall => node}/templates/azure-cloud-config.j2 | 0 .../{preinstall => node}/templates/openstack-cloud-config.j2 | 0 .../{preinstall => node}/templates/vsphere-cloud-config.j2 | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename roles/kubernetes/{preinstall => node}/templates/azure-cloud-config.j2 (100%) rename roles/kubernetes/{preinstall => node}/templates/openstack-cloud-config.j2 (100%) rename roles/kubernetes/{preinstall => node}/templates/vsphere-cloud-config.j2 (100%) diff --git a/roles/kubernetes/preinstall/templates/azure-cloud-config.j2 b/roles/kubernetes/node/templates/azure-cloud-config.j2 similarity index 100% rename from roles/kubernetes/preinstall/templates/azure-cloud-config.j2 rename to roles/kubernetes/node/templates/azure-cloud-config.j2 diff --git a/roles/kubernetes/preinstall/templates/openstack-cloud-config.j2 b/roles/kubernetes/node/templates/openstack-cloud-config.j2 similarity index 100% rename from roles/kubernetes/preinstall/templates/openstack-cloud-config.j2 rename to roles/kubernetes/node/templates/openstack-cloud-config.j2 diff --git a/roles/kubernetes/preinstall/templates/vsphere-cloud-config.j2 b/roles/kubernetes/node/templates/vsphere-cloud-config.j2 similarity index 100% rename from roles/kubernetes/preinstall/templates/vsphere-cloud-config.j2 rename to roles/kubernetes/node/templates/vsphere-cloud-config.j2 From b6da596ec1e2b03ac326dc9f85926debb34de5bb Mon Sep 17 00:00:00 2001 From: avoidik Date: Sat, 31 Mar 2018 03:18:23 +0300 Subject: [PATCH 155/177] Move default configuration parameters for cloud-config --- roles/kubernetes/node/defaults/main.yml | 43 +++++++++++++++++++ roles/kubernetes/preinstall/defaults/main.yml | 29 ------------- 2 files changed, 43 insertions(+), 29 deletions(-) diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml index 2cbf56e1d..52ca8d59d 100644 --- a/roles/kubernetes/node/defaults/main.yml +++ b/roles/kubernetes/node/defaults/main.yml @@ -92,3 +92,46 @@ kube_cadvisor_port: 0 # The read-only port for the Kubelet to serve on with no authentication/authorization. kube_read_only_port: 0 + + +# For the openstack integration kubelet will need credentials to access +# openstack apis like nova and cinder. Per default this values will be +# read from the environment. +openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" +openstack_username: "{{ lookup('env','OS_USERNAME') }}" +openstack_password: "{{ lookup('env','OS_PASSWORD') }}" +openstack_region: "{{ lookup('env','OS_REGION_NAME') }}" +openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')|default(lookup('env','OS_PROJECT_ID'),true) }}" +openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}" +openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}" + +# For the vsphere integration, kubelet will need credentials to access +# vsphere apis +# Documentation regarding these values can be found +# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105 +vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}" +vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}" +vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}" +vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}" +vsphere_datacenter: "{{ lookup('env', 'VSPHERE_DATACENTER') }}" +vsphere_datastore: "{{ lookup('env', 'VSPHERE_DATASTORE') }}" +vsphere_working_dir: "{{ lookup('env', 'VSPHERE_WORKING_DIR') }}" +vsphere_insecure: "{{ lookup('env', 'VSPHERE_INSECURE') }}" +vsphere_resource_pool: "{{ lookup('env', 'VSPHERE_RESOURCE_POOL') }}" + +vsphere_scsi_controller_type: pvscsi +# vsphere_public_network is name of the network the VMs are joined to +vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('') }}" + +## When azure is used, you need to also set the following variables. +## see docs/azure.md for details on how to get these values +#azure_tenant_id: +#azure_subscription_id: +#azure_aad_client_id: +#azure_aad_client_secret: +#azure_resource_group: +#azure_location: +#azure_subnet_name: +#azure_security_group_name: +#azure_vnet_name: +#azure_route_table_name: diff --git a/roles/kubernetes/preinstall/defaults/main.yml b/roles/kubernetes/preinstall/defaults/main.yml index 295f10178..149cbb42a 100644 --- a/roles/kubernetes/preinstall/defaults/main.yml +++ b/roles/kubernetes/preinstall/defaults/main.yml @@ -23,35 +23,6 @@ disable_ipv6_dns: false kube_cert_group: kube-cert kube_config_dir: /etc/kubernetes -# For the openstack integration kubelet will need credentials to access -# openstack apis like nova and cinder. Per default this values will be -# read from the environment. -openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}" -openstack_username: "{{ lookup('env','OS_USERNAME') }}" -openstack_password: "{{ lookup('env','OS_PASSWORD') }}" -openstack_region: "{{ lookup('env','OS_REGION_NAME') }}" -openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')|default(lookup('env','OS_PROJECT_ID'),true) }}" -openstack_domain_name: "{{ lookup('env','OS_USER_DOMAIN_NAME') }}" -openstack_domain_id: "{{ lookup('env','OS_USER_DOMAIN_ID') }}" - -# For the vsphere integration, kubelet will need credentials to access -# vsphere apis -# Documentation regarding these values can be found -# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105 -vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}" -vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}" -vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}" -vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}" -vsphere_datacenter: "{{ lookup('env', 'VSPHERE_DATACENTER') }}" -vsphere_datastore: "{{ lookup('env', 'VSPHERE_DATASTORE') }}" -vsphere_working_dir: "{{ lookup('env', 'VSPHERE_WORKING_DIR') }}" -vsphere_insecure: "{{ lookup('env', 'VSPHERE_INSECURE') }}" -vsphere_resource_pool: "{{ lookup('env', 'VSPHERE_RESOURCE_POOL') }}" - -vsphere_scsi_controller_type: pvscsi -# vsphere_public_network is name of the network the VMs are joined to -vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('') }}" - # Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content # for hostnet pods and infra needs resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf From ab8760cc83a6bb6f9e33723e6348fe30bbb358c8 Mon Sep 17 00:00:00 2001 From: avoidik Date: Sat, 31 Mar 2018 03:24:57 +0300 Subject: [PATCH 156/177] Move credentials pre-check --- roles/kubernetes/node/tasks/main.yml | 8 ++++++++ roles/kubernetes/preinstall/tasks/main.yml | 8 -------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index 78e6d92d6..defd3e9f7 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -134,6 +134,14 @@ tags: - kube-proxy +- include_tasks: "{{ cloud_provider }}-credential-check.yml" + when: + - cloud_provider is defined + - cloud_provider in [ 'openstack', 'azure', 'vsphere' ] + tags: + - cloud-provider + - facts + - name: Write cloud-config template: src: "{{ cloud_provider }}-cloud-config.j2" diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml index aca0c9606..4b948831a 100644 --- a/roles/kubernetes/preinstall/tasks/main.yml +++ b/roles/kubernetes/preinstall/tasks/main.yml @@ -71,14 +71,6 @@ - cloud-provider - facts -- include_tasks: "{{ cloud_provider }}-credential-check.yml" - when: - - cloud_provider is defined - - cloud_provider in [ 'openstack', 'azure', 'vsphere' ] - tags: - - cloud-provider - - facts - - name: Create cni directories file: path: "{{ item }}" From 15efdf0c16724fa5389c35e27b01cb12ae1f3557 Mon Sep 17 00:00:00 2001 From: avoidik Date: Sat, 31 Mar 2018 03:26:37 +0300 Subject: [PATCH 157/177] Move credential checks --- .../tasks => node/templates}/azure-credential-check.yml | 0 .../tasks => node/templates}/openstack-credential-check.yml | 0 .../tasks => node/templates}/vsphere-credential-check.yml | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename roles/kubernetes/{preinstall/tasks => node/templates}/azure-credential-check.yml (100%) rename roles/kubernetes/{preinstall/tasks => node/templates}/openstack-credential-check.yml (100%) rename roles/kubernetes/{preinstall/tasks => node/templates}/vsphere-credential-check.yml (100%) diff --git a/roles/kubernetes/preinstall/tasks/azure-credential-check.yml b/roles/kubernetes/node/templates/azure-credential-check.yml similarity index 100% rename from roles/kubernetes/preinstall/tasks/azure-credential-check.yml rename to roles/kubernetes/node/templates/azure-credential-check.yml diff --git a/roles/kubernetes/preinstall/tasks/openstack-credential-check.yml b/roles/kubernetes/node/templates/openstack-credential-check.yml similarity index 100% rename from roles/kubernetes/preinstall/tasks/openstack-credential-check.yml rename to roles/kubernetes/node/templates/openstack-credential-check.yml diff --git a/roles/kubernetes/preinstall/tasks/vsphere-credential-check.yml b/roles/kubernetes/node/templates/vsphere-credential-check.yml similarity index 100% rename from roles/kubernetes/preinstall/tasks/vsphere-credential-check.yml rename to roles/kubernetes/node/templates/vsphere-credential-check.yml From 2c89a02db3af9333a930d0c5b80b221afbdc5562 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kr=C3=BCger?= Date: Sat, 31 Mar 2018 04:40:01 +0200 Subject: [PATCH 158/177] Only download container/file if host is in defined group (#2565) * Only download container/file if host is in defined group * Set correct when clause * Fix last entries * Update download groups --- roles/download/defaults/main.yml | 70 ++++++++++++++++++++- roles/download/tasks/download_container.yml | 3 + roles/download/tasks/download_file.yml | 3 + roles/download/tasks/sync_container.yml | 9 +++ 4 files changed, 83 insertions(+), 2 deletions(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 74a3aaaf8..74594ead3 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -140,18 +140,24 @@ downloads: repo: "{{ netcheck_server_img_repo }}" tag: "{{ netcheck_server_tag }}" sha256: "{{ netcheck_server_digest_checksum|default(None) }}" + groups: + - k8s-cluster netcheck_agent: enabled: "{{ deploy_netchecker }}" container: true repo: "{{ netcheck_agent_img_repo }}" tag: "{{ netcheck_agent_tag }}" sha256: "{{ netcheck_agent_digest_checksum|default(None) }}" + groups: + - k8s-cluster etcd: enabled: true container: true repo: "{{ etcd_image_repo }}" tag: "{{ etcd_image_tag }}" sha256: "{{ etcd_digest_checksum|default(None) }}" + groups: + - etcd kubeadm: enabled: "{{ kubeadm_enabled }}" file: true @@ -163,6 +169,8 @@ downloads: unarchive: false owner: "root" mode: "0755" + groups: + - k8s-cluster istioctl: enabled: "{{ istio_enabled }}" file: true @@ -174,140 +182,186 @@ downloads: unarchive: false owner: "root" mode: "0755" + groups: + - kube-master hyperkube: enabled: true container: true repo: "{{ hyperkube_image_repo }}" tag: "{{ hyperkube_image_tag }}" sha256: "{{ hyperkube_digest_checksum|default(None) }}" + groups: + - k8s-cluster cilium: enabled: "{{ kube_network_plugin == 'cilium' }}" container: true repo: "{{ cilium_image_repo }}" tag: "{{ cilium_image_tag }}" sha256: "{{ cilium_digest_checksum|default(None) }}" + groups: + - k8s-cluster flannel: enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}" container: true repo: "{{ flannel_image_repo }}" tag: "{{ flannel_image_tag }}" sha256: "{{ flannel_digest_checksum|default(None) }}" + groups: + - k8s-cluster flannel_cni: enabled: "{{ kube_network_plugin == 'flannel' }}" container: true repo: "{{ flannel_cni_image_repo }}" tag: "{{ flannel_cni_image_tag }}" sha256: "{{ flannel_cni_digest_checksum|default(None) }}" + groups: + - k8s-cluster calicoctl: enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}" container: true repo: "{{ calicoctl_image_repo }}" tag: "{{ calicoctl_image_tag }}" sha256: "{{ calicoctl_digest_checksum|default(None) }}" + groups: + - k8s-cluster calico_node: enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}" container: true repo: "{{ calico_node_image_repo }}" tag: "{{ calico_node_image_tag }}" sha256: "{{ calico_node_digest_checksum|default(None) }}" + groups: + - k8s-cluster calico_cni: enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}" container: true repo: "{{ calico_cni_image_repo }}" tag: "{{ calico_cni_image_tag }}" sha256: "{{ calico_cni_digest_checksum|default(None) }}" + groups: + - k8s-cluster calico_policy: enabled: "{{ enable_network_policy or kube_network_plugin == 'canal' }}" container: true repo: "{{ calico_policy_image_repo }}" tag: "{{ calico_policy_image_tag }}" sha256: "{{ calico_policy_digest_checksum|default(None) }}" + groups: + - k8s-cluster calico_rr: enabled: "{{ peer_with_calico_rr is defined and peer_with_calico_rr and kube_network_plugin == 'calico' }}" container: true repo: "{{ calico_rr_image_repo }}" tag: "{{ calico_rr_image_tag }}" sha256: "{{ calico_rr_digest_checksum|default(None) }}" + groups: + - calico-rr weave_kube: enabled: "{{ kube_network_plugin == 'weave' }}" container: true repo: "{{ weave_kube_image_repo }}" tag: "{{ weave_kube_image_tag }}" sha256: "{{ weave_kube_digest_checksum|default(None) }}" + groups: + - k8s-cluster weave_npc: enabled: "{{ kube_network_plugin == 'weave' }}" container: true repo: "{{ weave_npc_image_repo }}" tag: "{{ weave_npc_image_tag }}" sha256: "{{ weave_npc_digest_checksum|default(None) }}" + groups: + - k8s-cluster contiv: enabled: "{{ kube_network_plugin == 'contiv' }}" container: true repo: "{{ contiv_image_repo }}" tag: "{{ contiv_image_tag }}" sha256: "{{ contiv_digest_checksum|default(None) }}" + groups: + - k8s-cluster contiv_auth_proxy: enabled: "{{ kube_network_plugin == 'contiv' }}" container: true repo: "{{ contiv_auth_proxy_image_repo }}" tag: "{{ contiv_auth_proxy_image_tag }}" sha256: "{{ contiv_auth_proxy_digest_checksum|default(None) }}" + groups: + - k8s-cluster pod_infra: enabled: true container: true repo: "{{ pod_infra_image_repo }}" tag: "{{ pod_infra_image_tag }}" sha256: "{{ pod_infra_digest_checksum|default(None) }}" + groups: + - k8s-cluster install_socat: enabled: "{{ ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] }}" container: true repo: "{{ install_socat_image_repo }}" tag: "{{ install_socat_image_tag }}" sha256: "{{ install_socat_digest_checksum|default(None) }}" + groups: + - k8s-cluster nginx: - enabled: true + enabled: "{{ loadbalancer_apiserver_localhost }}" container: true repo: "{{ nginx_image_repo }}" tag: "{{ nginx_image_tag }}" sha256: "{{ nginx_digest_checksum|default(None) }}" + groups: + - kube-node dnsmasq: enabled: "{{ dns_mode == 'dnsmasq_kubedns' }}" container: true repo: "{{ dnsmasq_image_repo }}" tag: "{{ dnsmasq_image_tag }}" sha256: "{{ dnsmasq_digest_checksum|default(None) }}" + groups: + - kube-node kubedns: enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}" container: true repo: "{{ kubedns_image_repo }}" tag: "{{ kubedns_image_tag }}" sha256: "{{ kubedns_digest_checksum|default(None) }}" + groups: + - kube-node coredns: enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}" container: true repo: "{{ coredns_image_repo }}" tag: "{{ coredns_image_tag }}" sha256: "{{ coredns_digest_checksum|default(None) }}" + groups: + - kube-node dnsmasq_nanny: enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}" container: true repo: "{{ dnsmasq_nanny_image_repo }}" tag: "{{ dnsmasq_nanny_image_tag }}" sha256: "{{ dnsmasq_nanny_digest_checksum|default(None) }}" + groups: + - kube-node dnsmasq_sidecar: enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}" container: true repo: "{{ dnsmasq_sidecar_image_repo }}" tag: "{{ dnsmasq_sidecar_image_tag }}" sha256: "{{ dnsmasq_sidecar_digest_checksum|default(None) }}" + groups: + - kube-node kubednsautoscaler: enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}" container: true repo: "{{ kubednsautoscaler_image_repo }}" tag: "{{ kubednsautoscaler_image_tag }}" sha256: "{{ kubednsautoscaler_digest_checksum|default(None) }}" + groups: + - kube-node testbox: - enabled: true + enabled: false container: true repo: "{{ test_image_repo }}" tag: "{{ test_image_tag }}" @@ -318,30 +372,40 @@ downloads: repo: "{{ elasticsearch_image_repo }}" tag: "{{ elasticsearch_image_tag }}" sha256: "{{ elasticsearch_digest_checksum|default(None) }}" + groups: + - kube-node fluentd: enabled: "{{ efk_enabled }}" container: true repo: "{{ fluentd_image_repo }}" tag: "{{ fluentd_image_tag }}" sha256: "{{ fluentd_digest_checksum|default(None) }}" + groups: + - kube-node kibana: enabled: "{{ efk_enabled }}" container: true repo: "{{ kibana_image_repo }}" tag: "{{ kibana_image_tag }}" sha256: "{{ kibana_digest_checksum|default(None) }}" + groups: + - kube-node helm: enabled: "{{ helm_enabled }}" container: true repo: "{{ helm_image_repo }}" tag: "{{ helm_image_tag }}" sha256: "{{ helm_digest_checksum|default(None) }}" + groups: + - kube-node tiller: enabled: "{{ helm_enabled }}" container: true repo: "{{ tiller_image_repo }}" tag: "{{ tiller_image_tag }}" sha256: "{{ tiller_digest_checksum|default(None) }}" + groups: + - kube-node vault: enabled: "{{ cert_management == 'vault' }}" container: "{{ vault_deployment_type != 'host' }}" @@ -356,6 +420,8 @@ downloads: unarchive: true url: "{{ vault_download_url }}" version: "{{ vault_version }}" + groups: + - vault download_defaults: container: false diff --git a/roles/download/tasks/download_container.yml b/roles/download/tasks/download_container.yml index bbf7cec85..a5659619c 100644 --- a/roles/download/tasks/download_container.yml +++ b/roles/download/tasks/download_container.yml @@ -7,6 +7,7 @@ when: - download.enabled - download.container + - group_names | intersect(download.groups) | length tags: - facts @@ -23,6 +24,7 @@ - download.enabled - download.container - pull_required|default(download_always_pull) + - group_names | intersect(download.groups) | length delegate_to: "{{ download_delegate }}" delegate_facts: yes run_once: yes @@ -38,3 +40,4 @@ - download.enabled - download.container - pull_required|default(download_always_pull) + - group_names | intersect(download.groups) | length diff --git a/roles/download/tasks/download_file.yml b/roles/download/tasks/download_file.yml index 664fa4728..832fec41e 100644 --- a/roles/download/tasks/download_file.yml +++ b/roles/download/tasks/download_file.yml @@ -13,6 +13,7 @@ when: - download.enabled - download.file + - group_names | intersect(download.groups) | length - name: file_download | Download item get_url: @@ -28,6 +29,7 @@ when: - download.enabled - download.file + - group_names | intersect(download.groups) | length - name: file_download | Extract archives unarchive: @@ -40,3 +42,4 @@ - download.enabled - download.file - download.unarchive|default(False) + - group_names | intersect(download.groups) | length diff --git a/roles/download/tasks/sync_container.yml b/roles/download/tasks/sync_container.yml index a15f78cde..1ca84ad67 100644 --- a/roles/download/tasks/sync_container.yml +++ b/roles/download/tasks/sync_container.yml @@ -7,6 +7,7 @@ when: - download.enabled - download.container + - group_names | intersect(download.groups) | length tags: - facts @@ -17,6 +18,7 @@ - download.enabled - download.container - download_run_once + - group_names | intersect(download.groups) | length tags: - facts @@ -27,6 +29,7 @@ - download.enabled - download.container - download_run_once + - group_names | intersect(download.groups) | length - name: "container_download | Update the 'container_changed' fact" set_fact: @@ -36,6 +39,7 @@ - download.container - download_run_once - pull_required|default(download_always_pull) + - group_names | intersect(download.groups) | length run_once: "{{ download_run_once }}" tags: - facts @@ -53,6 +57,7 @@ - download.enabled - download.container - download_run_once + - group_names | intersect(download.groups) | length tags: - facts @@ -68,6 +73,7 @@ - download_run_once - (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] or download_delegate == "localhost") - (container_changed or not img.stat.exists) + - group_names | intersect(download.groups) | length - name: container_download | copy container images to ansible host synchronize: @@ -87,6 +93,7 @@ - inventory_hostname == download_delegate - download_delegate != "localhost" - saved.changed + - group_names | intersect(download.groups) | length - name: container_download | upload container images to nodes synchronize: @@ -108,6 +115,7 @@ - (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and inventory_hostname != download_delegate or download_delegate == "localhost") + - group_names | intersect(download.groups) | length tags: - upload - upgrade @@ -120,6 +128,7 @@ - download_run_once - (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and inventory_hostname != download_delegate or download_delegate == "localhost") + - group_names | intersect(download.groups) | length tags: - upload - upgrade From aa301c31d10643750da91600b7915601cb99aad1 Mon Sep 17 00:00:00 2001 From: avoidik Date: Sat, 31 Mar 2018 13:29:00 +0300 Subject: [PATCH 159/177] Move credential checks into proper folder --- .../node/{templates => tasks}/azure-credential-check.yml | 0 .../node/{templates => tasks}/openstack-credential-check.yml | 0 .../node/{templates => tasks}/vsphere-credential-check.yml | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename roles/kubernetes/node/{templates => tasks}/azure-credential-check.yml (100%) rename roles/kubernetes/node/{templates => tasks}/openstack-credential-check.yml (100%) rename roles/kubernetes/node/{templates => tasks}/vsphere-credential-check.yml (100%) diff --git a/roles/kubernetes/node/templates/azure-credential-check.yml b/roles/kubernetes/node/tasks/azure-credential-check.yml similarity index 100% rename from roles/kubernetes/node/templates/azure-credential-check.yml rename to roles/kubernetes/node/tasks/azure-credential-check.yml diff --git a/roles/kubernetes/node/templates/openstack-credential-check.yml b/roles/kubernetes/node/tasks/openstack-credential-check.yml similarity index 100% rename from roles/kubernetes/node/templates/openstack-credential-check.yml rename to roles/kubernetes/node/tasks/openstack-credential-check.yml diff --git a/roles/kubernetes/node/templates/vsphere-credential-check.yml b/roles/kubernetes/node/tasks/vsphere-credential-check.yml similarity index 100% rename from roles/kubernetes/node/templates/vsphere-credential-check.yml rename to roles/kubernetes/node/tasks/vsphere-credential-check.yml From 195d6d791add01e0f9723b5ffcdfe551831f3d2b Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Wed, 28 Mar 2018 22:30:00 +0800 Subject: [PATCH 160/177] Integrate jetstack/cert-manager 0.2.3 to Kubespray --- inventory/sample/group_vars/k8s-cluster.yml | 4 ++ roles/download/defaults/main.yml | 22 +++++++- roles/etcd/defaults/main.yml | 4 +- .../ingress_controller/cert_manager/README.md | 17 +++++++ .../cert_manager/defaults/main.yml | 6 +++ .../cert_manager/tasks/main.yml | 38 ++++++++++++++ .../cert-manager-certificate-crd.yml.j2 | 21 ++++++++ .../cert-manager-clusterissuer-crd.yml.j2 | 17 +++++++ .../templates/cert-manager-clusterrole.yml.j2 | 25 +++++++++ .../cert-manager-clusterrolebinding.yml.j2 | 18 +++++++ .../templates/cert-manager-deploy.yml.j2 | 51 +++++++++++++++++++ .../templates/cert-manager-issuer-crd.yml.j2 | 17 +++++++ .../templates/cert-manager-ns.yml.j2 | 7 +++ .../templates/cert-manager-sa.yml.j2 | 11 ++++ .../ingress_controller/meta/main.yml | 7 +++ roles/kubernetes/master/defaults/main.yml | 3 +- roles/kubespray-defaults/defaults/main.yaml | 1 + roles/network_plugin/calico/defaults/main.yml | 2 +- tests/files/gce_centos7-flannel-addons.yml | 2 + 19 files changed, 268 insertions(+), 5 deletions(-) create mode 100644 roles/kubernetes-apps/ingress_controller/cert_manager/README.md create mode 100644 roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml create mode 100644 roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml create mode 100644 roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-ns.yml.j2 create mode 100644 roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2 diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml index 694368954..96a301f7d 100644 --- a/inventory/sample/group_vars/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster.yml @@ -207,6 +207,10 @@ ingress_nginx_enabled: false # ingress_nginx_configmap_udp_services: # 53: "kube-system/kube-dns:53" +# Cert manager deployment +cert_manager_enabled: false +# cert_manager_namespace: "cert-manager" + # Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now ) persistent_volumes_enabled: false diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 74594ead3..21b6bc72d 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -124,7 +124,6 @@ fluentd_image_tag: "{{ fluentd_version }}" kibana_version: "v4.6.1" kibana_image_repo: "gcr.io/google_containers/kibana" kibana_image_tag: "{{ kibana_version }}" - helm_version: "v2.8.1" helm_image_repo: "lachlanevenson/k8s-helm" helm_image_tag: "{{ helm_version }}" @@ -132,6 +131,11 @@ tiller_image_repo: "gcr.io/kubernetes-helm/tiller" tiller_image_tag: "{{ helm_version }}" vault_image_repo: "vault" vault_image_tag: "{{ vault_version }}" +cert_manager_version: "v0.2.3" +cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller" +cert_manager_controller_image_tag: "{{ cert_manager_version }}" +cert_manager_ingress_shim_image_repo: "quay.io/jetstack/cert-manager-ingress-shim" +cert_manager_ingress_shim_image_tag: "{{ cert_manager_version }}" downloads: netcheck_server: @@ -422,6 +426,22 @@ downloads: version: "{{ vault_version }}" groups: - vault + cert_manager_controller: + enabled: "{{ cert_manager_enabled }}" + container: true + repo: "{{ cert_manager_controller_image_repo }}" + tag: "{{ cert_manager_controller_image_tag }}" + sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}" + groups: + - kube-node + cert_manager_ingress_shim: + enabled: "{{ cert_manager_enabled }}" + container: true + repo: "{{ cert_manager_ingress_shim_image_repo }}" + tag: "{{ cert_manager_ingress_shim_image_tag }}" + sha256: "{{ cert_manager_ingress_shim_digest_checksum|default(None) }}" + groups: + - kube-node download_defaults: container: false diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml index 1268c13c7..6c13810c5 100644 --- a/roles/etcd/defaults/main.yml +++ b/roles/etcd/defaults/main.yml @@ -22,12 +22,12 @@ etcd_script_dir: "{{ bin_dir }}/etcd-scripts" etcd_heartbeat_interval: "250" etcd_election_timeout: "5000" -#etcd_snapshot_count: "10000" +# etcd_snapshot_count: "10000" # Parameters for ionice # -c takes an integer between 0 and 3 or one of the strings none, realtime, best-effort or idle. # -n takes an integer between 0 (highest priority) and 7 (lowest priority) -#etcd_ionice: "-c2 -n0" +# etcd_ionice: "-c2 -n0" etcd_metrics: "basic" diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/README.md b/roles/kubernetes-apps/ingress_controller/cert_manager/README.md new file mode 100644 index 000000000..b0f008676 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/README.md @@ -0,0 +1,17 @@ +Deployment files +================ + +This directory contains example deployment manifests for cert-manager that can +be used in place of the official Helm chart. + +This is useful if you are deploying cert-manager into an environment without +Helm, or want to inspect a 'bare minimum' deployment. + +Where do these come from? +------------------------- + +The manifests in these subdirectories are generated from the Helm chart +automatically. The `values.yaml` files used to configure cert-manager can be +found in [`hack/deploy`](../../hack/deploy/). + +They are automatically generated by running `./hack/update-deploy-gen.sh`. diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml b/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml new file mode 100644 index 000000000..bc6bceb15 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/defaults/main.yml @@ -0,0 +1,6 @@ +--- +cert_manager_namespace: "cert-manager" +cert_manager_cpu_requests: 10m +cert_manager_cpu_limits: 30m +cert_manager_memory_requests: 32Mi +cert_manager_memory_limits: 200Mi diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml new file mode 100644 index 000000000..eeb29da2d --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/tasks/main.yml @@ -0,0 +1,38 @@ +--- + +- name: Cert Manager | Create addon dir + file: + path: "{{ kube_config_dir }}/addons/cert_manager" + state: directory + owner: root + group: root + mode: 0755 + +- name: Cert Manager | Create manifests + template: + src: "{{ item.file }}.j2" + dest: "{{ kube_config_dir }}/addons/cert_manager/{{ item.file }}" + with_items: + - { name: cert-manager-ns, file: cert-manager-ns.yml, type: ns } + - { name: cert-manager-sa, file: cert-manager-sa.yml, type: sa } + - { name: cert-manager-clusterrole, file: cert-manager-clusterrole.yml, type: clusterrole } + - { name: cert-manager-clusterrolebinding, file: cert-manager-clusterrolebinding.yml, type: clusterrolebinding } + - { name: cert-manager-issuer-crd, file: cert-manager-issuer-crd.yml, type: crd } + - { name: cert-manager-clusterissuer-crd, file: cert-manager-clusterissuer-crd.yml, type: crd } + - { name: cert-manager-certificate-crd, file: cert-manager-certificate-crd.yml, type: crd } + - { name: cert-manager-deploy, file: cert-manager-deploy.yml, type: deploy } + register: cert_manager_manifests + when: + - inventory_hostname == groups['kube-master'][0] + +- name: Cert Manager | Apply manifests + kube: + name: "{{ item.item.name }}" + namespace: "{{ cert_manager_namespace }}" + kubectl: "{{ bin_dir }}/kubectl" + resource: "{{ item.item.type }}" + filename: "{{ kube_config_dir }}/addons/cert_manager/{{ item.item.file }}" + state: "latest" + with_items: "{{ cert_manager_manifests.results }}" + when: + - inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2 new file mode 100644 index 000000000..48d0c5b49 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-certificate-crd.yml.j2 @@ -0,0 +1,21 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: certificates.certmanager.k8s.io + labels: + app: cert-manager + chart: cert-manager-0.2.5 + release: cert-manager + heritage: Tiller +spec: + group: certmanager.k8s.io + version: v1alpha1 + scope: Namespaced + names: + kind: Certificate + plural: certificates + shortNames: + - cert + - certs + diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2 new file mode 100644 index 000000000..86601e098 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterissuer-crd.yml.j2 @@ -0,0 +1,17 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterissuers.certmanager.k8s.io + labels: + app: cert-manager + chart: cert-manager-0.2.5 + release: cert-manager + heritage: Tiller +spec: + group: certmanager.k8s.io + version: v1alpha1 + names: + kind: ClusterIssuer + plural: clusterissuers + scope: Cluster diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2 new file mode 100644 index 000000000..9d36de5cb --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrole.yml.j2 @@ -0,0 +1,25 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: cert-manager + labels: + app: cert-manager + chart: cert-manager-0.2.5 + release: cert-manager + heritage: Tiller +rules: + - apiGroups: ["certmanager.k8s.io"] + resources: ["certificates", "issuers", "clusterissuers"] + verbs: ["*"] + - apiGroups: [""] + # TODO: remove endpoints once 0.4 is released. We include it here in case + # users use the 'master' version of the Helm chart with a 0.2.x release of + # cert-manager that still performs leader election with Endpoint resources. + # We advise users don't do this, but some will anyway and this will reduce + # friction. + resources: ["endpoints", "configmaps", "secrets", "events", "services", "pods"] + verbs: ["*"] + - apiGroups: ["extensions"] + resources: ["ingresses"] + verbs: ["*"] diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2 new file mode 100644 index 000000000..d0e481c6c --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-clusterrolebinding.yml.j2 @@ -0,0 +1,18 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: cert-manager + labels: + app: cert-manager + chart: cert-manager-0.2.5 + release: cert-manager + heritage: Tiller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager +subjects: + - name: cert-manager + namespace: {{ cert_manager_namespace }} + kind: ServiceAccount diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2 new file mode 100644 index 000000000..ef66bef05 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-deploy.yml.j2 @@ -0,0 +1,51 @@ +--- +apiVersion: apps/v1beta1 +kind: Deployment +metadata: + name: cert-manager + namespace: {{ cert_manager_namespace }} + labels: + app: cert-manager + chart: cert-manager-0.2.5 + release: cert-manager + heritage: Tiller +spec: + replicas: 1 + template: + metadata: + labels: + k8s-app: cert-manager + release: cert-manager + annotations: + spec: + serviceAccountName: cert-manager + containers: + - name: cert-manager + image: {{ cert_manager_controller_image_repo }}:{{ cert_manager_controller_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + args: + - --cluster-resource-namespace=$(POD_NAMESPACE) + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + resources: + requests: + cpu: {{ cert_manager_cpu_requests }} + memory: {{ cert_manager_memory_requests }} + limits: + cpu: {{ cert_manager_cpu_limits }} + memory: {{ cert_manager_memory_limits }} + + - name: ingress-shim + image: {{ cert_manager_ingress_shim_image_repo }}:{{ cert_manager_ingress_shim_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + requests: + cpu: {{ cert_manager_cpu_requests }} + memory: {{ cert_manager_memory_requests }} + limits: + cpu: {{ cert_manager_cpu_limits }} + memory: {{ cert_manager_memory_limits }} + diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2 new file mode 100644 index 000000000..7e344d9f9 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-issuer-crd.yml.j2 @@ -0,0 +1,17 @@ +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: issuers.certmanager.k8s.io + labels: + app: cert-manager + chart: cert-manager-0.2.5 + release: cert-manager + heritage: Tiller +spec: + group: certmanager.k8s.io + version: v1alpha1 + names: + kind: Issuer + plural: issuers + scope: Namespaced diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-ns.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-ns.yml.j2 new file mode 100644 index 000000000..7cf3a282d --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-ns.yml.j2 @@ -0,0 +1,7 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ cert_manager_namespace }} + labels: + name: {{ cert_manager_namespace }} diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2 b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2 new file mode 100644 index 000000000..ccdd5f430 --- /dev/null +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/templates/cert-manager-sa.yml.j2 @@ -0,0 +1,11 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cert-manager + namespace: {{ cert_manager_namespace }} + labels: + app: cert-manager + chart: cert-manager-0.2.5 + release: cert-manager + heritage: Tiller diff --git a/roles/kubernetes-apps/ingress_controller/meta/main.yml b/roles/kubernetes-apps/ingress_controller/meta/main.yml index da2e03ecc..617e9d9a7 100644 --- a/roles/kubernetes-apps/ingress_controller/meta/main.yml +++ b/roles/kubernetes-apps/ingress_controller/meta/main.yml @@ -6,3 +6,10 @@ dependencies: - apps - ingress-nginx - ingress-controller + + - role: kubernetes-apps/ingress_controller/cert_manager + when: cert_manager_enabled + tags: + - apps + - cert-manager + - ingress-controller diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml index 303c1a88a..6325bb31c 100644 --- a/roles/kubernetes/master/defaults/main.yml +++ b/roles/kubernetes/master/defaults/main.yml @@ -96,4 +96,5 @@ volume_cross_zone_attachment: false ## Encrypting Secret Data at Rest kube_encrypt_secret_data: false kube_encrypt_token: "{{ lookup('password', inventory_dir + '/credentials/kube_encrypt_token.creds length=32 chars=ascii_letters,digits') }}" -kube_encryption_algorithm: "aescbc" # Must be either: aescbc, secretbox or aesgcm +# Must be either: aescbc, secretbox or aesgcm +kube_encryption_algorithm: "aescbc" diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index b6f46eb5a..d6217d654 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -174,6 +174,7 @@ local_volume_provisioner_enabled: "{{ local_volumes_enabled | default('false') } persistent_volumes_enabled: false cephfs_provisioner_enabled: false ingress_nginx_enabled: false +cert_manager_enabled: false ## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461) # openstack_blockstorage_version: "v1/v2/auto (default)" diff --git a/roles/network_plugin/calico/defaults/main.yml b/roles/network_plugin/calico/defaults/main.yml index 1b0cd0421..857ebd11a 100644 --- a/roles/network_plugin/calico/defaults/main.yml +++ b/roles/network_plugin/calico/defaults/main.yml @@ -50,4 +50,4 @@ rbac_resources: # * can-reach=DESTINATION # * interface=INTERFACE-REGEX # see https://docs.projectcalico.org/v3.0/reference/node/configuration#ip-autodetection-methods -#calico_ip_auto_method: "interface=eth.*" +# calico_ip_auto_method: "interface=eth.*" diff --git a/tests/files/gce_centos7-flannel-addons.yml b/tests/files/gce_centos7-flannel-addons.yml index 8ac8a901b..9e2e1083f 100644 --- a/tests/files/gce_centos7-flannel-addons.yml +++ b/tests/files/gce_centos7-flannel-addons.yml @@ -16,3 +16,5 @@ deploy_netchecker: true kubedns_min_replicas: 1 cloud_provider: gce kube_encrypt_secret_data: true +ingress_nginx_enabled: true +cert_manager_enabled: true From 5fe144aa0f4c3bebd4490c20f046b6ce0dce90ed Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Sat, 31 Mar 2018 16:04:24 +0800 Subject: [PATCH 161/177] ingress-nginx: container download related things should defined in the download role --- inventory/sample/hosts.ini | 4 ++-- roles/download/defaults/main.yml | 21 ++++++++++++++++++- .../ingress_nginx/defaults/main.yml | 6 ------ .../ingress-nginx-controller-ds.yml.j2 | 2 -- .../node/templates/kubelet.standard.env.j2 | 5 +++-- 5 files changed, 25 insertions(+), 13 deletions(-) diff --git a/inventory/sample/hosts.ini b/inventory/sample/hosts.ini index 8eece0d48..245783334 100644 --- a/inventory/sample/hosts.ini +++ b/inventory/sample/hosts.ini @@ -26,11 +26,11 @@ # node5 # node6 -# optional for dedicated ingress node # [kube-ingress] # node2 # node3 # [k8s-cluster:children] -# kube-node # kube-master +# kube-node +# kube-ingress diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 21b6bc72d..16968cc4a 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -91,7 +91,6 @@ contiv_auth_proxy_image_repo: "contiv/auth_proxy" contiv_auth_proxy_image_tag: "{{ contiv_version }}" cilium_image_repo: "docker.io/cilium/cilium" cilium_image_tag: "{{ cilium_version }}" - nginx_image_repo: nginx nginx_image_tag: 1.13 dnsmasq_version: 2.78 @@ -131,6 +130,10 @@ tiller_image_repo: "gcr.io/kubernetes-helm/tiller" tiller_image_tag: "{{ helm_version }}" vault_image_repo: "vault" vault_image_tag: "{{ vault_version }}" +ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller" +ingress_nginx_controller_image_tag: "0.11.0" +ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend" +ingress_nginx_default_backend_image_tag: "1.4" cert_manager_version: "v0.2.3" cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller" cert_manager_controller_image_tag: "{{ cert_manager_version }}" @@ -426,6 +429,22 @@ downloads: version: "{{ vault_version }}" groups: - vault + ingress_nginx_controller: + enabled: "{{ ingress_nginx_enabled }}" + container: true + repo: "{{ ingress_nginx_controller_image_repo }}" + tag: "{{ ingress_nginx_controller_image_tag }}" + sha256: "{{ ingress_nginx_controller_digest_checksum|default(None) }}" + groups: + - kube-ingress + ingress_nginx_default_backend: + enabled: "{{ ingress_nginx_enabled }}" + container: true + repo: "{{ ingress_nginx_default_backend_image_repo }}" + tag: "{{ ingress_nginx_default_backend_image_tag }}" + sha256: "{{ ingress_nginx_default_backend_digest_checksum|default(None) }}" + groups: + - kube-ingress cert_manager_controller: enabled: "{{ cert_manager_enabled }}" container: true diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml b/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml index fc114a2ba..ff1217809 100644 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/defaults/main.yml @@ -1,10 +1,4 @@ --- -ingress_nginx_default_backend_image_repo: gcr.io/google_containers/defaultbackend -ingress_nginx_default_backend_image_tag: 1.4 - -ingress_nginx_controller_image_repo: quay.io/kubernetes-ingress-controller/nginx-ingress-controller -ingress_nginx_controller_image_tag: 0.11.0 - ingress_nginx_namespace: "ingress-nginx" ingress_nginx_host_network: false ingress_nginx_insecure_port: 80 diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 index e65a440b0..52501a4c7 100644 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/ingress-nginx-controller-ds.yml.j2 @@ -27,10 +27,8 @@ spec: {% if ingress_nginx_host_network %} hostNetwork: true {% endif %} -{% if 'kube-ingress' in groups and groups['kube-ingress']|length > 0 %} nodeSelector: node-role.kubernetes.io/ingress: "true" -{% endif %} terminationGracePeriodSeconds: 60 containers: - name: ingress-nginx-controller diff --git a/roles/kubernetes/node/templates/kubelet.standard.env.j2 b/roles/kubernetes/node/templates/kubelet.standard.env.j2 index cd48fca9c..5fef2476e 100644 --- a/roles/kubernetes/node/templates/kubelet.standard.env.j2 +++ b/roles/kubernetes/node/templates/kubelet.standard.env.j2 @@ -87,11 +87,12 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" {% if not standalone_kubelet|bool %} {% do role_node_labels.append('node-role.kubernetes.io/node=true') %} {% endif %} -{% elif inventory_hostname in groups['kube-ingress']|default([]) %} -{% do role_node_labels.append('node-role.kubernetes.io/ingress=true') %} {% else %} {% do role_node_labels.append('node-role.kubernetes.io/node=true') %} {% endif %} +{% if inventory_hostname in groups['kube-ingress']|default([]) %} +{% do role_node_labels.append('node-role.kubernetes.io/ingress=true') %} +{% endif %} {% set inventory_node_labels = [] %} {% if node_labels is defined %} {% for labelname, labelvalue in node_labels.iteritems() %} From b9b028a7350ff2bab9788f2999ceba79ddc5edb8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andreas=20Kr=C3=BCger?= Date: Sat, 31 Mar 2018 20:06:09 +0200 Subject: [PATCH 162/177] Update etcd deployment to use correct cert and key (#2572) * Update etcd deployment to use correct cert and key * Update to use admin cert for etcdctl commands * Update handler to use admin cert too --- roles/etcd/handlers/backup.yml | 4 ++-- roles/etcd/tasks/configure.yml | 8 ++++---- roles/etcd/tasks/join_etcd-events_member.yml | 8 ++++---- roles/etcd/tasks/join_etcd_member.yml | 8 ++++---- roles/etcd/tasks/join_member.yml | 8 ++++---- roles/etcd/tasks/set_cluster_health.yml | 8 ++++---- 6 files changed, 22 insertions(+), 22 deletions(-) diff --git a/roles/etcd/handlers/backup.yml b/roles/etcd/handlers/backup.yml index 247b2ae00..a0a80e108 100644 --- a/roles/etcd/handlers/backup.yml +++ b/roles/etcd/handlers/backup.yml @@ -48,7 +48,7 @@ snapshot save {{ etcd_backup_directory }}/snapshot.db environment: ETCDCTL_API: 3 - ETCDCTL_CERT: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" retries: 3 delay: "{{ retry_stagger | random + 3 }}" diff --git a/roles/etcd/tasks/configure.yml b/roles/etcd/tasks/configure.yml index d7d3920c6..d39ba62d4 100644 --- a/roles/etcd/tasks/configure.yml +++ b/roles/etcd/tasks/configure.yml @@ -9,8 +9,8 @@ tags: - facts environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" - name: Configure | Check if member is in etcd-events cluster shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_events_access_addresses }} member list | grep -q {{ etcd_access_address }}" @@ -22,8 +22,8 @@ tags: - facts environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" - name: Configure | Copy etcd.service systemd file template: diff --git a/roles/etcd/tasks/join_etcd-events_member.yml b/roles/etcd/tasks/join_etcd-events_member.yml index 104ef22df..5a7061880 100644 --- a/roles/etcd/tasks/join_etcd-events_member.yml +++ b/roles/etcd/tasks/join_etcd-events_member.yml @@ -7,8 +7,8 @@ delay: "{{ retry_stagger | random + 3 }}" when: target_node == inventory_hostname environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" - include_tasks: refresh_config.yml vars: @@ -43,5 +43,5 @@ - facts when: target_node == inventory_hostname environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" diff --git a/roles/etcd/tasks/join_etcd_member.yml b/roles/etcd/tasks/join_etcd_member.yml index b7801f0c9..d11037151 100644 --- a/roles/etcd/tasks/join_etcd_member.yml +++ b/roles/etcd/tasks/join_etcd_member.yml @@ -7,8 +7,8 @@ delay: "{{ retry_stagger | random + 3 }}" when: target_node == inventory_hostname environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" - include_tasks: refresh_config.yml vars: @@ -43,5 +43,5 @@ - facts when: target_node == inventory_hostname environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" diff --git a/roles/etcd/tasks/join_member.yml b/roles/etcd/tasks/join_member.yml index b7801f0c9..d11037151 100644 --- a/roles/etcd/tasks/join_member.yml +++ b/roles/etcd/tasks/join_member.yml @@ -7,8 +7,8 @@ delay: "{{ retry_stagger | random + 3 }}" when: target_node == inventory_hostname environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" - include_tasks: refresh_config.yml vars: @@ -43,5 +43,5 @@ - facts when: target_node == inventory_hostname environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" diff --git a/roles/etcd/tasks/set_cluster_health.yml b/roles/etcd/tasks/set_cluster_health.yml index 68e738031..d0202943c 100644 --- a/roles/etcd/tasks/set_cluster_health.yml +++ b/roles/etcd/tasks/set_cluster_health.yml @@ -9,8 +9,8 @@ tags: - facts environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" - name: Configure | Check if etcd-events cluster is healthy shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_events_access_addresses }} cluster-health | grep -q 'cluster is healthy'" @@ -22,5 +22,5 @@ tags: - facts environment: - ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" - ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" + ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem" + ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem" From 4c0e9ba8902d9c3cc0cdc8162908e2db1c220d23 Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Sat, 31 Mar 2018 18:57:00 +0800 Subject: [PATCH 163/177] registry: container download related things should defined in the download role --- roles/download/defaults/main.yml | 20 +++++++++++++++++++ .../registry/defaults/main.yml | 5 ----- 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 16968cc4a..77c7c8ed3 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -130,6 +130,10 @@ tiller_image_repo: "gcr.io/kubernetes-helm/tiller" tiller_image_tag: "{{ helm_version }}" vault_image_repo: "vault" vault_image_tag: "{{ vault_version }}" +registry_image_repo: "registry" +registry_image_tag: "2.6" +registry_proxy_image_repo: "gcr.io/google_containers/kube-registry-proxy" +registry_proxy_image_tag: "0.4" ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller" ingress_nginx_controller_image_tag: "0.11.0" ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend" @@ -429,6 +433,22 @@ downloads: version: "{{ vault_version }}" groups: - vault + registry: + enabled: "{{ registry_enabled }}" + container: true + repo: "{{ registry_image_repo }}" + tag: "{{ registry_image_tag }}" + sha256: "{{ registry_digest_checksum|default(None) }}" + groups: + - kube-node + registry_proxy: + enabled: "{{ registry_enabled }}" + container: true + repo: "{{ registry_proxy_image_repo }}" + tag: "{{ registry_proxy_image_tag }}" + sha256: "{{ registry_proxy_digest_checksum|default(None) }}" + groups: + - kube-node ingress_nginx_controller: enabled: "{{ ingress_nginx_enabled }}" container: true diff --git a/roles/kubernetes-apps/registry/defaults/main.yml b/roles/kubernetes-apps/registry/defaults/main.yml index a626435d5..aa52347bc 100644 --- a/roles/kubernetes-apps/registry/defaults/main.yml +++ b/roles/kubernetes-apps/registry/defaults/main.yml @@ -1,9 +1,4 @@ --- -registry_image_repo: registry -registry_image_tag: 2.6 -registry_proxy_image_repo: gcr.io/google_containers/kube-registry-proxy -registry_proxy_image_tag: 0.4 - registry_namespace: "kube-system" registry_storage_class: "" registry_disk_size: "10Gi" From 4f714b07b8f415be1c7f9d3083f64dee0dc92675 Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Sat, 31 Mar 2018 19:20:53 +0800 Subject: [PATCH 164/177] cephfs-provisioner: container download related things should defined in the download role --- roles/download/defaults/main.yml | 10 ++++++++++ .../cephfs_provisioner/defaults/main.yml | 3 --- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 77c7c8ed3..725fc0bbd 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -134,6 +134,8 @@ registry_image_repo: "registry" registry_image_tag: "2.6" registry_proxy_image_repo: "gcr.io/google_containers/kube-registry-proxy" registry_proxy_image_tag: "0.4" +cephfs_provisioner_image_repo: "quay.io/kubespray/cephfs-provisioner" +cephfs_provisioner_image_tag: "92295a30" ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller" ingress_nginx_controller_image_tag: "0.11.0" ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend" @@ -449,6 +451,14 @@ downloads: sha256: "{{ registry_proxy_digest_checksum|default(None) }}" groups: - kube-node + cephfs_provisioner: + enabled: "{{ cephfs_provisioner_enabled }}" + container: true + repo: "{{ cephfs_provisioner_image_repo }}" + tag: "{{ cephfs_provisioner_image_tag }}" + sha256: "{{ cephfs_provisioner_digest_checksum|default(None) }}" + groups: + - kube-node ingress_nginx_controller: enabled: "{{ ingress_nginx_enabled }}" container: true diff --git a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml index 3b80ecbb2..aa1bbcf83 100644 --- a/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml +++ b/roles/kubernetes-apps/external_provisioner/cephfs_provisioner/defaults/main.yml @@ -1,7 +1,4 @@ --- -cephfs_provisioner_image_repo: quay.io/kubespray/cephfs-provisioner -cephfs_provisioner_image_tag: 92295a30 - cephfs_provisioner_namespace: "kube-system" cephfs_provisioner_cluster: ceph cephfs_provisioner_monitors: [] From b1a7889ff52412e7e7b03a7122bc84a39960c84d Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Sat, 31 Mar 2018 19:25:05 +0800 Subject: [PATCH 165/177] local-volume-provisioner: container download related things should defined in the download role --- roles/download/defaults/main.yml | 10 ++++++++++ .../local_volume_provisioner/defaults/main.yml | 3 --- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 725fc0bbd..48f4743b1 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -134,6 +134,8 @@ registry_image_repo: "registry" registry_image_tag: "2.6" registry_proxy_image_repo: "gcr.io/google_containers/kube-registry-proxy" registry_proxy_image_tag: "0.4" +local_volume_provisioner_image_repo: "quay.io/external_storage/local-volume-provisioner" +local_volume_provisioner_image_tag: "v2.0.0" cephfs_provisioner_image_repo: "quay.io/kubespray/cephfs-provisioner" cephfs_provisioner_image_tag: "92295a30" ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller" @@ -451,6 +453,14 @@ downloads: sha256: "{{ registry_proxy_digest_checksum|default(None) }}" groups: - kube-node + local_volume_provisioner: + enabled: "{{ local_volume_provisioner_enabled }}" + container: true + repo: "{{ local_volume_provisioner_image_repo }}" + tag: "{{ local_volume_provisioner_image_tag }}" + sha256: "{{ local_volume_provisioner_digest_checksum|default(None) }}" + groups: + - kube-node cephfs_provisioner: enabled: "{{ cephfs_provisioner_enabled }}" container: true diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml index ea5dcb079..4b18546d3 100644 --- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml @@ -1,7 +1,4 @@ --- -local_volume_provisioner_image_repo: quay.io/external_storage/local-volume-provisioner -local_volume_provisioner_image_tag: v2.0.0 - local_volume_provisioner_namespace: "kube-system" local_volume_provisioner_base_dir: /mnt/disks local_volume_provisioner_mount_dir: /mnt/disks From 3004791c6469181a83d80971110813a3cd3ce658 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Mon, 2 Apr 2018 11:19:23 +0300 Subject: [PATCH 166/177] Add pre-upgrade task for moving credentials file (#2394) * Add pre-upgrade task for moving credentials file This reverts commit 7ef9f4dfdd7d64876aacc48a982313dbea8a06f5. * add python interpreter workaround for localhost --- .gitignore | 1 + .gitlab-ci.yml | 2 -- roles/kubernetes/preinstall/tasks/main.yml | 5 ++++ .../preinstall/tasks/pre_upgrade.yml | 28 +++++++++++++++++++ 4 files changed, 34 insertions(+), 2 deletions(-) create mode 100644 roles/kubernetes/preinstall/tasks/pre_upgrade.yml diff --git a/.gitignore b/.gitignore index fcbcd1da1..8da099d42 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ .vagrant *.retry inventory/vagrant_ansible_inventory +inventory/credentials/ inventory/group_vars/fake_hosts.yml inventory/host_vars/ temp diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 1014440ab..5af631476 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -109,7 +109,6 @@ before_script: ${SSH_ARGS} ${LOG_LEVEL} -e @${CI_TEST_VARS} - -e ansible_python_interpreter=${PYPATH} -e ansible_ssh_user=${SSH_USER} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" @@ -129,7 +128,6 @@ before_script: ${SSH_ARGS} ${LOG_LEVEL} -e @${CI_TEST_VARS} - -e ansible_python_interpreter=${PYPATH} -e ansible_ssh_user=${SSH_USER} -e local_release_dir=${PWD}/downloads --limit "all:!fake_hosts" diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml index aca0c9606..db7bfa00f 100644 --- a/roles/kubernetes/preinstall/tasks/main.yml +++ b/roles/kubernetes/preinstall/tasks/main.yml @@ -3,6 +3,11 @@ tags: - asserts +# This is run before bin_dir is pinned because these tasks are run on localhost +- import_tasks: pre_upgrade.yml + tags: + - upgrade + - name: Force binaries directory for Container Linux by CoreOS set_fact: bin_dir: "/opt/bin" diff --git a/roles/kubernetes/preinstall/tasks/pre_upgrade.yml b/roles/kubernetes/preinstall/tasks/pre_upgrade.yml new file mode 100644 index 000000000..63cbc9be1 --- /dev/null +++ b/roles/kubernetes/preinstall/tasks/pre_upgrade.yml @@ -0,0 +1,28 @@ +--- +- name: "Pre-upgrade | check if old credential dir exists" + local_action: + module: stat + path: "{{ inventory_dir }}/../credentials" + vars: + ansible_python_interpreter: "/usr/bin/env python" + register: old_credential_dir + become: no + +- name: "Pre-upgrade | check if new credential dir exists" + local_action: + module: stat + path: "{{ inventory_dir }}/credentials" + vars: + ansible_python_interpreter: "/usr/bin/env python" + register: new_credential_dir + become: no + when: old_credential_dir.stat.exists + +- name: "Pre-upgrade | move data from old credential dir to new" + local_action: command mv {{ inventory_dir }}/../credentials {{ inventory_dir }}/credentials + args: + creates: "{{ inventory_dir }}/credentials" + vars: + ansible_python_interpreter: "/usr/bin/env python" + become: no + when: old_credential_dir.stat.exists and not new_credential_dir.stat.exists From 4b98537f795353f61dacac51b956cc8c028c261a Mon Sep 17 00:00:00 2001 From: vterdunov Date: Mon, 2 Apr 2018 18:45:42 +0300 Subject: [PATCH 167/177] Properly check vsphere_cloud_provider.rc --- roles/kubernetes-apps/cluster_roles/tasks/main.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/kubernetes-apps/cluster_roles/tasks/main.yml b/roles/kubernetes-apps/cluster_roles/tasks/main.yml index fefa7caeb..0511b7be5 100644 --- a/roles/kubernetes-apps/cluster_roles/tasks/main.yml +++ b/roles/kubernetes-apps/cluster_roles/tasks/main.yml @@ -104,6 +104,7 @@ - rbac_enabled - cloud_provider is defined - cloud_provider == 'vsphere' + - vsphere_cloud_provider.rc is defined - vsphere_cloud_provider.rc != 0 - kube_version | version_compare('v1.9.0', '>=') - kube_version | version_compare('v1.9.3', '<=') @@ -121,6 +122,7 @@ - rbac_enabled - cloud_provider is defined - cloud_provider == 'vsphere' + - vsphere_cloud_provider.rc is defined - vsphere_cloud_provider.rc != 0 - kube_version | version_compare('v1.9.0', '>=') - kube_version | version_compare('v1.9.3', '<=') From 76bb5f8d756e335f1b06e1b385f9868c2581f817 Mon Sep 17 00:00:00 2001 From: georgejdli Date: Mon, 2 Apr 2018 10:57:24 -0500 Subject: [PATCH 168/177] check if dedicated service account token signing key exists --- roles/kubernetes/secrets/tasks/check-certs.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/roles/kubernetes/secrets/tasks/check-certs.yml b/roles/kubernetes/secrets/tasks/check-certs.yml index 4780b14d6..07820edf7 100644 --- a/roles/kubernetes/secrets/tasks/check-certs.yml +++ b/roles/kubernetes/secrets/tasks/check-certs.yml @@ -50,6 +50,7 @@ '{{ kube_cert_dir }}/kube-controller-manager-key.pem', '{{ kube_cert_dir }}/front-proxy-client.pem', '{{ kube_cert_dir }}/front-proxy-client-key.pem', + '{{ kube_cert_dir }}/service-account-key.pem', {% for host in groups['kube-master'] %} '{{ kube_cert_dir }}/admin-{{ host }}.pem' '{{ kube_cert_dir }}/admin-{{ host }}-key.pem' @@ -71,7 +72,8 @@ {% for cert in ['apiserver.pem', 'apiserver-key.pem', 'kube-scheduler.pem','kube-scheduler-key.pem', 'kube-controller-manager.pem','kube-controller-manager-key.pem', - 'front-proxy-client.pem','front-proxy-client-key.pem'] -%} + 'front-proxy-client.pem','front-proxy-client-key.pem', + 'service-account-key.pem'] -%} {% set cert_file = "%s/%s.pem"|format(kube_cert_dir, cert) %} {% if not cert_file in existing_certs -%} {%- set gen = True -%} From 32f4194cf8d19a0a70395db4118ef16274ae9dff Mon Sep 17 00:00:00 2001 From: Xiaoxi He Date: Tue, 3 Apr 2018 10:39:17 +0800 Subject: [PATCH 169/177] Bump ingress-nginx-controller to version 0.12.0 --- roles/download/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 48f4743b1..24be1685d 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -139,7 +139,7 @@ local_volume_provisioner_image_tag: "v2.0.0" cephfs_provisioner_image_repo: "quay.io/kubespray/cephfs-provisioner" cephfs_provisioner_image_tag: "92295a30" ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller" -ingress_nginx_controller_image_tag: "0.11.0" +ingress_nginx_controller_image_tag: "0.12.0" ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend" ingress_nginx_default_backend_image_tag: "1.4" cert_manager_version: "v0.2.3" From 428a554ddb4230abc267c17a8b7c1769345f0eb0 Mon Sep 17 00:00:00 2001 From: Wong Hoi Sing Edison Date: Tue, 3 Apr 2018 14:29:50 +0800 Subject: [PATCH 170/177] istio: container download related things should defined in the download role --- roles/download/defaults/main.yml | 80 +++++++++++++++++++ roles/kubernetes-apps/istio/defaults/main.yml | 30 ------- 2 files changed, 80 insertions(+), 30 deletions(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 48f4743b1..bc36a0fdc 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -70,6 +70,22 @@ calico_policy_image_repo: "quay.io/calico/kube-controllers" calico_policy_image_tag: "{{ calico_policy_version }}" calico_rr_image_repo: "quay.io/calico/routereflector" calico_rr_image_tag: "{{ calico_rr_version }}" +istio_proxy_image_repo: docker.io/istio/proxy +istio_proxy_image_tag: "{{ istio_version }}" +istio_proxy_init_image_repo: docker.io/istio/proxy_init +istio_proxy_init_image_tag: "{{ istio_version }}" +istio_ca_image_repo: docker.io/istio/istio-ca +istio_ca_image_tag: "{{ istio_version }}" +istio_mixer_image_repo: docker.io/istio/mixer +istio_mixer_image_tag: "{{ istio_version }}" +istio_pilot_image_repo: docker.io/istio/pilot +istio_pilot_image_tag: "{{ istio_version }}" +istio_proxy_debug_image_repo: docker.io/istio/proxy_debug +istio_proxy_debug_image_tag: "{{ istio_version }}" +istio_sidecar_initializer_image_repo: docker.io/istio/sidecar_initializer +istio_sidecar_initializer_image_tag: "{{ istio_version }}" +istio_statsd_image_repo: prom/statsd-exporter +istio_statsd_image_tag: latest hyperkube_image_repo: "gcr.io/google-containers/hyperkube" hyperkube_image_tag: "{{ kube_version }}" pod_infra_image_repo: "gcr.io/google_containers/pause-amd64" @@ -199,6 +215,70 @@ downloads: mode: "0755" groups: - kube-master + istio_proxy: + enabled: "{{ istio_enabled }}" + container: true + repo: "{{ istio_proxy_image_repo }}" + tag: "{{ istio_proxy_image_tag }}" + sha256: "{{ istio_proxy_digest_checksum|default(None) }}" + groups: + - kube-node + istio_proxy_init: + enabled: "{{ istio_enabled }}" + container: true + repo: "{{ istio_proxy_init_image_repo }}" + tag: "{{ istio_proxy_init_image_tag }}" + sha256: "{{ istio_proxy_init_digest_checksum|default(None) }}" + groups: + - kube-node + istio_ca: + enabled: "{{ istio_enabled }}" + container: true + repo: "{{ istio_ca_image_repo }}" + tag: "{{ istio_ca_image_tag }}" + sha256: "{{ istio_ca_digest_checksum|default(None) }}" + groups: + - kube-node + istio_mixer: + enabled: "{{ istio_enabled }}" + container: true + repo: "{{ istio_mixer_image_repo }}" + tag: "{{ istio_mixer_image_tag }}" + sha256: "{{ istio_mixer_digest_checksum|default(None) }}" + groups: + - kube-node + istio_pilot: + enabled: "{{ istio_enabled }}" + container: true + repo: "{{ istio_pilot_image_repo }}" + tag: "{{ istio_pilot_image_tag }}" + sha256: "{{ istio_pilot_digest_checksum|default(None) }}" + groups: + - kube-node + istio_proxy_debug: + enabled: "{{ istio_enabled }}" + container: true + repo: "{{ istio_proxy_debug_image_repo }}" + tag: "{{ istio_proxy_debug_image_tag }}" + sha256: "{{ istio_proxy_debug_digest_checksum|default(None) }}" + groups: + - kube-node + istio_sidecar_initializer: + enabled: "{{ istio_enabled }}" + container: true + repo: "{{ istio_sidecar_initializer_image_repo }}" + tag: "{{ istio_sidecar_initializer_image_tag }}" + sha256: "{{ istio_sidecar_initializer_digest_checksum|default(None) }}" + groups: + - kube-node + istio_statsd: + enabled: "{{ istio_enabled }}" + container: true + repo: "{{ istio_statsd_image_repo }}" + tag: "{{ istio_statsd_image_tag }}" + sha256: "{{ istio_statsd_digest_checksum|default(None) }}" + groups: + - kube-node hyperkube: enabled: true container: true diff --git a/roles/kubernetes-apps/istio/defaults/main.yml b/roles/kubernetes-apps/istio/defaults/main.yml index dc51ea7d6..6124ce42e 100644 --- a/roles/kubernetes-apps/istio/defaults/main.yml +++ b/roles/kubernetes-apps/istio/defaults/main.yml @@ -1,32 +1,2 @@ --- -istio_enabled: false - istio_namespace: istio-system -istio_version: "0.2.6" - -istioctl_download_url: "https://storage.googleapis.com/istio-release/releases/{{ istio_version }}/istioctl/istioctl-linux" -istioctl_checksum: fd703063c540b8c0ab943f478c05ab257d88ae27224c746a27d0526ddbf7c370 - -istio_proxy_image_repo: docker.io/istio/proxy -istio_proxy_image_tag: "{{ istio_version }}" - -istio_proxy_init_image_repo: docker.io/istio/proxy_init -istio_proxy_init_image_tag: "{{ istio_version }}" - -istio_ca_image_repo: docker.io/istio/istio-ca -istio_ca_image_tag: "{{ istio_version }}" - -istio_mixer_image_repo: docker.io/istio/mixer -istio_mixer_image_tag: "{{ istio_version }}" - -istio_pilot_image_repo: docker.io/istio/pilot -istio_pilot_image_tag: "{{ istio_version }}" - -istio_proxy_debug_image_repo: docker.io/istio/proxy_debug -istio_proxy_debug_image_tag: "{{ istio_version }}" - -istio_sidecar_initializer_image_repo: docker.io/istio/sidecar_initializer -istio_sidecar_initializer_image_tag: "{{ istio_version }}" - -istio_statsd_image_repo: prom/statsd-exporter -istio_statsd_image_tag: latest From b54e0918865d9d10db4aa2fcaf6b0fb4ad5a623d Mon Sep 17 00:00:00 2001 From: Chen Hong Date: Wed, 4 Apr 2018 18:18:51 +0800 Subject: [PATCH 171/177] Persist ip_vs modules --- roles/kubernetes/node/tasks/main.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index defd3e9f7..dd2885a97 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -118,6 +118,14 @@ tags: - kube-proxy +- name: Persist ip_vs modules + copy: + dest: /etc/modules-load.d/kube_proxy-ipvs.conf + content: "ip_vs\nip_vs_rr\nip_vs_wrr\nip_vs_sh\nnf_conntrack_ipv4" + when: kube_proxy_mode == 'ipvs' + tags: + - kube-proxy + - name: Write proxy manifest template: src: manifests/kube-proxy.manifest.j2 From 973e7372b452fd9cc9141dccff6f2f9ffc1ecac5 Mon Sep 17 00:00:00 2001 From: Chen Hong Date: Wed, 4 Apr 2018 23:05:27 +0800 Subject: [PATCH 172/177] content: | --- roles/kubernetes/node/tasks/main.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index dd2885a97..13cc0740d 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -121,7 +121,12 @@ - name: Persist ip_vs modules copy: dest: /etc/modules-load.d/kube_proxy-ipvs.conf - content: "ip_vs\nip_vs_rr\nip_vs_wrr\nip_vs_sh\nnf_conntrack_ipv4" + content: | + ip_vs + ip_vs_rr + ip_vs_wrr + ip_vs_sh + nf_conntrack_ipv4 when: kube_proxy_mode == 'ipvs' tags: - kube-proxy From ca6a07f5954d77afdbdf6c2ff2b0db01a1e4c210 Mon Sep 17 00:00:00 2001 From: RongZhang Date: Thu, 5 Apr 2018 22:36:50 +0800 Subject: [PATCH 173/177] Add VMware vSphere to deployed --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 56210a8f9..994469b22 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ Deploy a Production Ready Kubernetes Cluster If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**. -- Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal** +-   Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere or Baremetal** - **High available** cluster - **Composable** (Choice of the network plugin for instance) - Support most popular **Linux distributions** From ca40d51bc618118c1ae16a75078649c063eee8fc Mon Sep 17 00:00:00 2001 From: Daniel Hoherd Date: Thu, 5 Apr 2018 15:54:58 -0700 Subject: [PATCH 174/177] Fix typos (no logic changes) --- inventory/sample/group_vars/k8s-cluster.yml | 6 +++--- roles/kubernetes/preinstall/tasks/dhclient-hooks.yml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/inventory/sample/group_vars/k8s-cluster.yml b/inventory/sample/group_vars/k8s-cluster.yml index 031108767..345d22a36 100644 --- a/inventory/sample/group_vars/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s-cluster.yml @@ -1,8 +1,8 @@ # Kubernetes configuration dirs and system namespace. # Those are where all the additional config stuff goes -# the kubernetes normally puts in /srv/kubernets. +# the kubernetes normally puts in /srv/kubernetes. # This puts them in a sane location and namespace. -# Editting those values will almost surely break something. +# Editing those values will almost surely break something. kube_config_dir: /etc/kubernetes kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" kube_manifest_dir: "{{ kube_config_dir }}/manifests" @@ -28,7 +28,7 @@ local_release_dir: "/tmp/releases" retry_stagger: 5 # This is the group that the cert creation scripts chgrp the -# cert files to. Not really changable... +# cert files to. Not really changeable... kube_cert_group: kube-cert # Cluster Loglevel configuration diff --git a/roles/kubernetes/preinstall/tasks/dhclient-hooks.yml b/roles/kubernetes/preinstall/tasks/dhclient-hooks.yml index 8c0a5f599..0ab2c9b07 100644 --- a/roles/kubernetes/preinstall/tasks/dhclient-hooks.yml +++ b/roles/kubernetes/preinstall/tasks/dhclient-hooks.yml @@ -15,7 +15,7 @@ notify: Preinstall | restart network when: dhclientconffile is defined -- name: Configue dhclient hooks for resolv.conf (non-RH) +- name: Configure dhclient hooks for resolv.conf (non-RH) template: src: dhclient_dnsupdate.sh.j2 dest: "{{ dhclienthookfile }}" @@ -24,7 +24,7 @@ notify: Preinstall | restart network when: ansible_os_family != "RedHat" -- name: Configue dhclient hooks for resolv.conf (RH-only) +- name: Configure dhclient hooks for resolv.conf (RH-only) template: src: dhclient_dnsupdate_rh.sh.j2 dest: "{{ dhclienthookfile }}" From 9086665013a10051e7f79233937b3ba19dcc4878 Mon Sep 17 00:00:00 2001 From: rongzhang Date: Fri, 6 Apr 2018 17:28:33 +0800 Subject: [PATCH 175/177] Fix issues #2522 Support Debian stretch https://download.docker.com/linux/debian/dists/ --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 994469b22..3bd0ebfb9 100644 --- a/README.md +++ b/README.md @@ -66,7 +66,7 @@ Supported Linux Distributions ----------------------------- - **Container Linux by CoreOS** -- **Debian** Jessie +- **Debian** Jessie, Stretch, Wheezy - **Ubuntu** 16.04 - **CentOS/RHEL** 7 - **Fedora/CentOS** Atomic From 66b61866cdc7fbe7bde19bf260a361414953f19d Mon Sep 17 00:00:00 2001 From: rongzhang Date: Fri, 6 Apr 2018 18:16:05 +0800 Subject: [PATCH 176/177] Fix check docker error for atomic Fix issues #2611 --- roles/docker/tasks/pre-upgrade.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/roles/docker/tasks/pre-upgrade.yml b/roles/docker/tasks/pre-upgrade.yml index 9315da305..8b75cba0d 100644 --- a/roles/docker/tasks/pre-upgrade.yml +++ b/roles/docker/tasks/pre-upgrade.yml @@ -6,7 +6,9 @@ with_items: - docker - docker-engine - when: ansible_os_family == 'Debian' and (docker_versioned_pkg[docker_version | string] | search('docker-ce')) + when: + - ansible_os_family == 'Debian' + - (docker_versioned_pkg[docker_version | string] | search('docker-ce')) - name: Ensure old versions of Docker are not installed. | RedHat package: @@ -17,4 +19,7 @@ - docker-common - docker-engine - docker-selinux - when: ansible_os_family == 'RedHat' and (docker_versioned_pkg[docker_version | string] | search('docker-ce')) \ No newline at end of file + when: + - ansible_os_family == 'RedHat' + - (docker_versioned_pkg[docker_version | string] | search('docker-ce')) + - not is_atomic From b68854f79d5b777ea7f8a0ff9625c4e0b28faa65 Mon Sep 17 00:00:00 2001 From: Atoms Date: Mon, 9 Apr 2018 13:19:26 +0300 Subject: [PATCH 177/177] fix kubectl download location and kubectl.sh helper owner/group remove --- roles/kubernetes/client/tasks/main.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/roles/kubernetes/client/tasks/main.yml b/roles/kubernetes/client/tasks/main.yml index cf70b4995..d34131a3a 100644 --- a/roles/kubernetes/client/tasks/main.yml +++ b/roles/kubernetes/client/tasks/main.yml @@ -55,7 +55,7 @@ - name: Copy kubectl binary to ansible host fetch: src: "{{ bin_dir }}/kubectl" - dest: "{{ bin_dir }}/kubectl" + dest: "{{ artifacts_dir }}/kubectl" flat: yes validate_checksum: no become: no @@ -68,8 +68,6 @@ #!/bin/bash kubectl --kubeconfig=admin.conf $@ dest: "{{ artifacts_dir }}/kubectl.sh" - owner: root - group: root mode: 0755 become: no run_once: yes