From b9b654714eb91b4c2530512688c8d8d6371dd310 Mon Sep 17 00:00:00 2001 From: Mohamed Zaian Date: Tue, 25 Oct 2022 03:28:35 +0200 Subject: [PATCH 01/36] [nerdctl] upgrade to version 1.0.0 (#9424) --- roles/download/defaults/main.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 871c42f64..978a9fca9 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -124,7 +124,7 @@ kube_ovn_dpdk_version: "19.11-{{ kube_ovn_version }}" kube_router_version: "v1.5.1" multus_version: "v3.8-{{ image_arch }}" helm_version: "v3.9.4" -nerdctl_version: "0.22.2" +nerdctl_version: "1.0.0" krew_version: "v0.4.3" skopeo_version: v1.10.0 @@ -759,13 +759,13 @@ gvisor_containerd_shim_binary_checksums: nerdctl_archive_checksums: arm: - 0.22.2: 3db76ae74a6fac7aa740550cdb4fad338c0297ae585aa850b638042346f260f5 + 1.0.0: 8fd283a2f2272b15f3df43cd79642c25f19f62c3c56ad58bb68afb7ed92904c2 arm64: - 0.22.2: 15fc3f992b59d6fbadca9c71e0337dab77cdfb08d79c925502449180a13d94a4 + 1.0.0: 27622c9d95efe6d807d5f3770d24ddd71719c6ae18f76b5fc89663a51bcd6208 amd64: - 0.22.2: ad40ecf11c689fad594a05a40fef65adb4df8ecd1ffb6711e13cff5382aeaed9 + 1.0.0: 3e993d714e6b88d1803a58d9ff5a00d121f0544c35efed3a3789e19d6ab36964 ppc64le: - 0.22.2: c2c8d2785f0c4fb169f2f5b07547785ca83a5c249560b3c19c84f1c2adb0ff87 + 1.0.0: 2fb02e629a4be16b194bbfc64819132a72ede1f52596bd8e1ec2beaf7c28c117 containerd_archive_checksums: arm: From 2af918132ec0b3dfdb5fe3a0e2142b0c2f9d60dd Mon Sep 17 00:00:00 2001 From: Mohamed Zaian Date: Tue, 25 Oct 2022 03:32:36 +0200 Subject: [PATCH 02/36] Update kubernetes dashboard to 2.7.0 (k8s 1.25 support) (#9425) --- roles/download/defaults/main.yml | 2 +- .../ansible/templates/dashboard.yml.j2 | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 978a9fca9..34558f474 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -1044,7 +1044,7 @@ gcp_pd_csi_resizer_image_tag: "v0.4.0-gke.0" gcp_pd_csi_registrar_image_tag: "v1.2.0-gke.0" dashboard_image_repo: "{{ docker_image_repo }}/kubernetesui/dashboard-{{ image_arch }}" -dashboard_image_tag: "v2.6.1" +dashboard_image_tag: "v2.7.0" dashboard_metrics_scraper_repo: "{{ docker_image_repo }}/kubernetesui/metrics-scraper" dashboard_metrics_scraper_tag: "v1.0.8" diff --git a/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 b/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 index d75b2cd08..b0c341926 100644 --- a/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 @@ -163,6 +163,9 @@ spec: labels: k8s-app: kubernetes-dashboard spec: + securityContext: + seccompProfile: + type: RuntimeDefault priorityClassName: system-cluster-critical containers: - name: kubernetes-dashboard @@ -208,6 +211,11 @@ spec: port: 8443 initialDelaySeconds: 30 timeoutSeconds: 30 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 volumes: - name: kubernetes-dashboard-certs secret: @@ -293,6 +301,9 @@ spec: labels: k8s-app: kubernetes-metrics-scraper spec: + securityContext: + seccompProfile: + type: RuntimeDefault priorityClassName: system-cluster-critical containers: - name: kubernetes-metrics-scraper @@ -307,6 +318,11 @@ spec: port: 8000 initialDelaySeconds: 30 timeoutSeconds: 30 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 volumeMounts: - mountPath: /tmp name: tmp-volume From ef707b34616b3cb2c16f5d8bafc8537a9e7cee24 Mon Sep 17 00:00:00 2001 From: Kay Yan Date: Wed, 26 Oct 2022 07:34:37 +0800 Subject: [PATCH 03/36] update-containerd-1.6.9 (#9427) --- README.md | 2 +- roles/download/defaults/main.yml | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 2a21db86c..b8c15d76d 100644 --- a/README.md +++ b/README.md @@ -139,7 +139,7 @@ Note: Upstart/SysV init based OS types are not supported. - [kubernetes](https://github.com/kubernetes/kubernetes) v1.25.3 - [etcd](https://github.com/etcd-io/etcd) v3.5.5 - [docker](https://www.docker.com/) v20.10 (see note) - - [containerd](https://containerd.io/) v1.6.8 + - [containerd](https://containerd.io/) v1.6.9 - [cri-o](http://cri-o.io/) v1.24 (experimental: see [CRI-O Note](docs/cri-o.md). Only on fedora, ubuntu and centos based OS) - Network Plugin - [cni-plugins](https://github.com/containernetworking/plugins) v1.1.1 diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 34558f474..d67ccb6df 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -79,7 +79,7 @@ runc_version: v1.1.4 kata_containers_version: 2.4.1 youki_version: 0.0.1 gvisor_version: 20210921 -containerd_version: 1.6.8 +containerd_version: 1.6.9 cri_dockerd_version: 0.2.2 # this is relevant when container_manager == 'docker' @@ -786,6 +786,7 @@ containerd_archive_checksums: 1.6.6: 0 1.6.7: 0 1.6.8: 0 + 1.6.9: 0 arm64: 1.5.5: 0 1.5.7: 0 @@ -804,6 +805,7 @@ containerd_archive_checksums: 1.6.6: 807bf333df331d713708ead66919189d7b142a0cc21ec32debbc988f9069d5eb 1.6.7: 4167bf688a0ed08b76b3ac264b90aad7d9dd1424ad9c3911e9416b45e37b0be5 1.6.8: b114e36ecce78cef9d611416c01b784a420928c82766d6df7dc02b10d9da94cd + 1.6.9: 140197aee930a8bd8a69ff8e0161e56305751be66e899dccd833c27d139f4f47 amd64: 1.5.5: 8efc527ffb772a82021800f0151374a3113ed2439922497ff08f2596a70f10f1 1.5.7: 109fc95b86382065ea668005c376360ddcd8c4ec413e7abe220ae9f461e0e173 @@ -822,6 +824,7 @@ containerd_archive_checksums: 1.6.6: 0212869675742081d70600a1afc6cea4388435cc52bf5dc21f4efdcb9a92d2ef 1.6.7: 52e817b712d521b193773529ff33626f47507973040c02474a2db95a37da1c37 1.6.8: 3a1322c18ee5ff4b9bd5af6b7b30c923a3eab8af1df05554f530ef8e2b24ac5e + 1.6.9: 9ee2644bfb95b23123f96b564df2035ec94a46f64060ae12322e09a8ec3c2b53 ppc64le: 1.5.5: 0 1.5.7: 0 @@ -840,6 +843,7 @@ containerd_archive_checksums: 1.6.6: 0 1.6.7: 0db5cb6d5dd4f3b7369c6945d2ec29a9c10b106643948e3224e53885f56863a9 1.6.8: f18769721f614828f6b778030c72dc6969ce2108f2363ddc85f6c7a147df0fb8 + 1.6.9: fe0046437cfe971ef0b3101ee69fcef5cf52e8868de708d35f8b82f998044f6e skopeo_binary_checksums: arm: From eeb376460d112b15bd041ba038ede0d74416d1ff Mon Sep 17 00:00:00 2001 From: William Turner Date: Wed, 26 Oct 2022 03:28:37 -0400 Subject: [PATCH 04/36] Fix inconsistent handling of admission plugin list (#9407) * Fix inconsistent handling of admission plugin list * Adjust hardening doc with the normalized admission plugin list * Add pre-check for admission plugins format change * Ignore checking admission plugins value when variable is not defined --- docs/hardening.md | 13 ++++++++++++- .../control-plane/tasks/kubeadm-setup.yml | 2 +- .../templates/admission-controls.yaml.j2 | 2 +- .../preinstall/tasks/0020-verify-settings.yml | 8 ++++++++ .../files/packet_ubuntu20-calico-aio-hardening.yml | 13 ++++++++++++- 5 files changed, 34 insertions(+), 4 deletions(-) diff --git a/docs/hardening.md b/docs/hardening.md index 9a7f3d841..b3359b74b 100644 --- a/docs/hardening.md +++ b/docs/hardening.md @@ -41,7 +41,18 @@ kube_encrypt_secret_data: true kube_encryption_resources: [secrets] kube_encryption_algorithm: "secretbox" -kube_apiserver_enable_admission_plugins: ['EventRateLimit,AlwaysPullImages,ServiceAccount,NamespaceLifecycle,NodeRestriction,LimitRanger,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,PodNodeSelector,PodSecurity'] +kube_apiserver_enable_admission_plugins: + - EventRateLimit + - AlwaysPullImages + - ServiceAccount + - NamespaceLifecycle + - NodeRestriction + - LimitRanger + - ResourceQuota + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - PodNodeSelector + - PodSecurity kube_apiserver_admission_control_config_file: true # EventRateLimit plugin configuration kube_apiserver_admission_event_rate_limits: diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml index 5f8c78445..d9f7304ef 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml @@ -106,7 +106,7 @@ when: - kube_apiserver_admission_control_config_file - item in kube_apiserver_admission_plugins_needs_configuration - loop: "{{ kube_apiserver_enable_admission_plugins[0].split(',') }}" + loop: "{{ kube_apiserver_enable_admission_plugins }}" - name: kubeadm | Check if apiserver.crt contains all needed SANs shell: | diff --git a/roles/kubernetes/control-plane/templates/admission-controls.yaml.j2 b/roles/kubernetes/control-plane/templates/admission-controls.yaml.j2 index 0bb4517c2..34f5f188c 100644 --- a/roles/kubernetes/control-plane/templates/admission-controls.yaml.j2 +++ b/roles/kubernetes/control-plane/templates/admission-controls.yaml.j2 @@ -1,7 +1,7 @@ apiVersion: apiserver.config.k8s.io/v1 kind: AdmissionConfiguration plugins: -{% for plugin in kube_apiserver_enable_admission_plugins[0].split(',') %} +{% for plugin in kube_apiserver_enable_admission_plugins %} {% if plugin in kube_apiserver_admission_plugins_needs_configuration %} - name: {{ plugin }} path: {{ kube_config_dir }}/{{ plugin|lower }}.yaml diff --git a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml index b7f9b2570..242d6def9 100644 --- a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml +++ b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml @@ -305,3 +305,11 @@ when: - kube_external_ca_mode - not ignore_assert_errors + +- name: Stop if using deprecated comma separated list for admission plugins + assert: + that: "',' not in kube_apiserver_enable_admission_plugins[0]" + msg: "Comma-separated list for kube_apiserver_enable_admission_plugins is now deprecated, use separate list items for each plugin." + when: + - kube_apiserver_enable_admission_plugins is defined + - kube_apiserver_enable_admission_plugins | length > 0 diff --git a/tests/files/packet_ubuntu20-calico-aio-hardening.yml b/tests/files/packet_ubuntu20-calico-aio-hardening.yml index c013f7954..76340d873 100644 --- a/tests/files/packet_ubuntu20-calico-aio-hardening.yml +++ b/tests/files/packet_ubuntu20-calico-aio-hardening.yml @@ -36,7 +36,18 @@ kube_encrypt_secret_data: true kube_encryption_resources: [secrets] kube_encryption_algorithm: "secretbox" -kube_apiserver_enable_admission_plugins: ['EventRateLimit,AlwaysPullImages,ServiceAccount,NamespaceLifecycle,NodeRestriction,LimitRanger,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,PodNodeSelector,PodSecurity'] +kube_apiserver_enable_admission_plugins: + - EventRateLimit + - AlwaysPullImages + - ServiceAccount + - NamespaceLifecycle + - NodeRestriction + - LimitRanger + - ResourceQuota + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - PodNodeSelector + - PodSecurity kube_apiserver_admission_control_config_file: true # EventRateLimit plugin configuration kube_apiserver_admission_event_rate_limits: From 990f87acc811cdf0bdf38d906e499ec9b12c00a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=92=8B=E8=88=AA?= Date: Thu, 27 Oct 2022 10:28:32 +0800 Subject: [PATCH 05/36] Update kube-vip to v0.5.5 (#9437) Signed-off-by: hang.jiang Signed-off-by: hang.jiang --- README.md | 2 +- roles/download/defaults/main.yml | 2 +- roles/kubernetes/node/defaults/main.yml | 2 +- roles/kubernetes/node/templates/manifests/kube-vip.manifest.j2 | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index b8c15d76d..7dc6f390b 100644 --- a/README.md +++ b/README.md @@ -151,7 +151,7 @@ Note: Upstart/SysV init based OS types are not supported. - [kube-router](https://github.com/cloudnativelabs/kube-router) v1.5.1 - [multus](https://github.com/intel/multus-cni) v3.8 - [weave](https://github.com/weaveworks/weave) v2.8.1 - - [kube-vip](https://github.com/kube-vip/kube-vip) v0.4.2 + - [kube-vip](https://github.com/kube-vip/kube-vip) v0.5.5 - Application - [cert-manager](https://github.com/jetstack/cert-manager) v1.9.1 - [coredns](https://github.com/coredns/coredns) v1.8.6 diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index d67ccb6df..a24910a6b 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -942,7 +942,7 @@ multus_image_repo: "{{ github_image_repo }}/k8snetworkplumbingwg/multus-cni" multus_image_tag: "{{ multus_version }}" kube_vip_image_repo: "{{ github_image_repo }}/kube-vip/kube-vip" -kube_vip_image_tag: v0.4.2 +kube_vip_image_tag: v0.5.5 nginx_image_repo: "{{ docker_image_repo }}/library/nginx" nginx_image_tag: 1.23.0-alpine haproxy_image_repo: "{{ docker_image_repo }}/library/haproxy" diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml index 69a89ab4b..8be61744f 100644 --- a/roles/kubernetes/node/defaults/main.yml +++ b/roles/kubernetes/node/defaults/main.yml @@ -62,7 +62,7 @@ eviction_hard_control_plane: {} kubelet_status_update_frequency: 10s # kube-vip -kube_vip_version: v0.4.2 +kube_vip_version: v0.5.5 kube_vip_arp_enabled: false kube_vip_interface: diff --git a/roles/kubernetes/node/templates/manifests/kube-vip.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-vip.manifest.j2 index 2ca073f52..02887cfa9 100644 --- a/roles/kubernetes/node/templates/manifests/kube-vip.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/kube-vip.manifest.j2 @@ -1,4 +1,4 @@ -# Inspired by https://github.com/kube-vip/kube-vip/blob/v0.4.2/pkg/kubevip/config_generator.go#L13 +# Inspired by https://github.com/kube-vip/kube-vip/blob/v0.5.5/pkg/kubevip/config_generator.go#L13 apiVersion: v1 kind: Pod metadata: From 5e14398af4dd74f2e563dc328e94df6400ef89bb Mon Sep 17 00:00:00 2001 From: Olivier Lemasle Date: Thu, 27 Oct 2022 04:52:33 +0200 Subject: [PATCH 06/36] Upgrade ruamel.yaml.clib to work with Python 3.11 (#9426) ruamel.yaml.clib did not build with the upcoming Python 3.11. Cf. https://sourceforge.net/p/ruamel-yaml-clib/tickets/9/ ruamel.yaml.clib==0.2.7 fixes the issue. --- requirements-2.11.txt | 2 +- requirements-2.12.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements-2.11.txt b/requirements-2.11.txt index ab59f38d3..ef2e02315 100644 --- a/requirements-2.11.txt +++ b/requirements-2.11.txt @@ -6,5 +6,5 @@ netaddr==0.7.19 pbr==5.4.4 jmespath==0.9.5 ruamel.yaml==0.16.10 -ruamel.yaml.clib==0.2.6 +ruamel.yaml.clib==0.2.7 MarkupSafe==1.1.1 diff --git a/requirements-2.12.txt b/requirements-2.12.txt index f8b1e5000..722cc99e3 100644 --- a/requirements-2.12.txt +++ b/requirements-2.12.txt @@ -6,5 +6,5 @@ netaddr==0.7.19 pbr==5.4.4 jmespath==0.9.5 ruamel.yaml==0.16.10 -ruamel.yaml.clib==0.2.6 +ruamel.yaml.clib==0.2.7 MarkupSafe==1.1.1 From 4d3f6376840b341e981e616d55b36863aa71b0dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=A8=E5=88=9A?= Date: Fri, 28 Oct 2022 12:46:30 +0800 Subject: [PATCH 07/36] Remove PodSecurityPolicies in Metallb for kubernetes 1.25 (#9442) --- .../metallb/templates/metallb.yml.j2 | 78 ------------------- 1 file changed, 78 deletions(-) diff --git a/roles/kubernetes-apps/metallb/templates/metallb.yml.j2 b/roles/kubernetes-apps/metallb/templates/metallb.yml.j2 index 7408625ef..fc03cd286 100644 --- a/roles/kubernetes-apps/metallb/templates/metallb.yml.j2 +++ b/roles/kubernetes-apps/metallb/templates/metallb.yml.j2 @@ -5,84 +5,6 @@ metadata: labels: app: metallb --- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - labels: - app: metallb - name: controller -spec: - allowPrivilegeEscalation: false - allowedCapabilities: [] - allowedHostPaths: [] - defaultAddCapabilities: [] - defaultAllowPrivilegeEscalation: false - fsGroup: - ranges: - - max: 65535 - min: 1 - rule: MustRunAs - hostIPC: false - hostNetwork: false - hostPID: false - privileged: false - readOnlyRootFilesystem: true - requiredDropCapabilities: - - ALL - runAsUser: - rule: MustRunAsNonRoot - seLinux: - rule: RunAsAny - supplementalGroups: - ranges: - - max: 65535 - min: 1 - rule: MustRunAs - volumes: - - configMap - - secret - - emptyDir ---- -{% if metallb_speaker_enabled %} -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - labels: - app: metallb - name: speaker -spec: - allowPrivilegeEscalation: false - allowedCapabilities: - - NET_RAW - allowedHostPaths: [] - defaultAddCapabilities: [] - defaultAllowPrivilegeEscalation: false - fsGroup: - rule: RunAsAny - hostIPC: false - hostNetwork: true - hostPID: false - hostPorts: - - max: {{ metallb_port }} - min: {{ metallb_port }} - - max: {{ metallb_memberlist_port }} - min: {{ metallb_memberlist_port }} - privileged: true - readOnlyRootFilesystem: true - requiredDropCapabilities: - - ALL - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - configMap - - secret - - emptyDir -{% endif %} ---- apiVersion: v1 kind: ServiceAccount metadata: From c6814354325947c9ee99e326d15afcd14aad8eb9 Mon Sep 17 00:00:00 2001 From: biqiang Wu <62228454+dcwbq@users.noreply.github.com> Date: Fri, 28 Oct 2022 18:08:31 +0800 Subject: [PATCH 08/36] Add switch cilium_enable_bandwidth_manager (#9441) Signed-off-by: dcwbq Signed-off-by: dcwbq --- docs/cilium.md | 17 +++++++++++++++++ roles/network_plugin/cilium/defaults/main.yml | 7 +++++++ .../cilium/templates/cilium/config.yml.j2 | 9 +++++++++ 3 files changed, 33 insertions(+) diff --git a/docs/cilium.md b/docs/cilium.md index e907d53cd..033ea6a29 100644 --- a/docs/cilium.md +++ b/docs/cilium.md @@ -121,6 +121,23 @@ cilium_encryption_type: "wireguard" Kubespray currently supports Linux distributions with Wireguard Kernel mode on Linux 5.6 and newer. +## Bandwidth Manager + +Cilium’s bandwidth manager supports the kubernetes.io/egress-bandwidth Pod annotation. + +Bandwidth enforcement currently does not work in combination with L7 Cilium Network Policies. +In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods. + +Bandwidth Manager requires a v5.1.x or more recent Linux kernel. + +For further information, make sure to check the official [Cilium documentation.](https://docs.cilium.io/en/v1.12/gettingstarted/bandwidth-manager/) + +To use this function, set the following parameters + +```yml +cilium_enable_bandwidth_manager: true +``` + ## Install Cilium Hubble k8s-net-cilium.yml: diff --git a/roles/network_plugin/cilium/defaults/main.yml b/roles/network_plugin/cilium/defaults/main.yml index 0e624e53c..b58b39e15 100644 --- a/roles/network_plugin/cilium/defaults/main.yml +++ b/roles/network_plugin/cilium/defaults/main.yml @@ -103,6 +103,13 @@ cilium_ipsec_node_encryption: false # This option is only effective when `cilium_encryption_type` is set to `wireguard`. cilium_wireguard_userspace_fallback: false +# Enable Bandwidth Manager +# Cilium’s bandwidth manager supports the kubernetes.io/egress-bandwidth Pod annotation. +# Bandwidth enforcement currently does not work in combination with L7 Cilium Network Policies. +# In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods. +# Bandwidth Manager requires a v5.1.x or more recent Linux kernel. +cilium_enable_bandwidth_manager: false + # IP Masquerade Agent # https://docs.cilium.io/en/stable/concepts/networking/masquerading/ # By default, all packets from a pod destined to an IP address outside of the cilium_native_routing_cidr range are masqueraded diff --git a/roles/network_plugin/cilium/templates/cilium/config.yml.j2 b/roles/network_plugin/cilium/templates/cilium/config.yml.j2 index 6e647760d..313821ab1 100644 --- a/roles/network_plugin/cilium/templates/cilium/config.yml.j2 +++ b/roles/network_plugin/cilium/templates/cilium/config.yml.j2 @@ -117,6 +117,15 @@ data: # - geneve tunnel: "{{ cilium_tunnel_mode }}" + # Enable Bandwidth Manager + # Cilium’s bandwidth manager supports the kubernetes.io/egress-bandwidth Pod annotation. + # Bandwidth enforcement currently does not work in combination with L7 Cilium Network Policies. + # In case they select the Pod at egress, then the bandwidth enforcement will be disabled for those Pods. + # Bandwidth Manager requires a v5.1.x or more recent Linux kernel. +{% if cilium_enable_bandwidth_manager %} + enable-bandwidth-manager: "true" +{% endif %} + # Name of the cluster. Only relevant when building a mesh of clusters. cluster-name: "{{ cilium_cluster_name }}" From 78624c5bcbf52f228f4443ba5d60e2af868a6276 Mon Sep 17 00:00:00 2001 From: biqiang Wu <62228454+dcwbq@users.noreply.github.com> Date: Mon, 31 Oct 2022 08:02:45 +0800 Subject: [PATCH 09/36] When using cilium CNI, install Cilium CLI (#9436) Signed-off-by: dcwbq Signed-off-by: dcwbq --- roles/download/defaults/main.yml | 30 +++++++++++++++++++ roles/network_plugin/cilium/tasks/install.yml | 7 +++++ 2 files changed, 37 insertions(+) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index a24910a6b..60b7217b7 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -117,6 +117,7 @@ weave_version: 2.8.1 pod_infra_version: "3.7" cilium_version: "v1.12.1" +cilium_cli_version: "v0.12.5" cilium_enable_hubble: false kube_ovn_version: "v1.9.7" @@ -158,6 +159,7 @@ cni_download_url: "https://github.com/containernetworking/plugins/releases/downl calicoctl_download_url: "https://github.com/projectcalico/calico/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" calicoctl_alternate_download_url: "https://github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}" calico_crds_download_url: "https://github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz" +ciliumcli_download_url: "https://github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz" crictl_download_url: "https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-{{ ansible_system | lower }}-{{ image_arch }}.tar.gz" crio_download_url: "https://storage.googleapis.com/cri-o/artifacts/cri-o.{{ image_arch }}.{{ crio_version }}.tar.gz" helm_download_url: "https://get.helm.sh/helm-{{ helm_version }}-linux-{{ image_arch }}.tar.gz" @@ -592,6 +594,20 @@ calicoctl_binary_checksums: v3.22.4: f8672ac27ab72c1b05b0f9ae5694881ef8e061bfbcf551f964e7f0a37090a243 v3.21.6: f7aad0409de2838ba691708943a2aeeef6fb9c02a0475293106e179dc48a4632 +ciliumcli_binary_checksums: + arm: + v0.12.4: 8e0596d321c97a55449942c2ebd8bb0102dc6a9381919287e383b679cee8f524 + v0.12.5: 1c9a8cf8df62eb814d6c90f6ad6a1c074f991fde5b5573059d27729f12619496 + amd64: + v0.12.4: 6b4f899fa09b6558a89a32ace3be4dedca08b7f4b76f04931ed1ffb2de8965e2 + v0.12.5: 6b2c9031e4264482b18873ad337394442b8787d6ac26e16e865d36f320c650f0 + arm64: + v0.12.4: e037f34fded56e4199e9e7ff1ce623d2516be7116a6490e02377f786acec5bda + v0.12.5: b779d4b04b23fcae30cc158ce9d29e2cad0c98bd88582c0a2c8d457c71d5c4b3 + ppc64le: + v0.12.4: 0 + v0.12.5: 0 + calico_crds_archive_checksums: v3.23.3: d25f5c9a3adeba63219f3c8425a8475ebfbca485376a78193ec1e4c74e7a6115 v3.22.4: e72e7b8b26256950c1ce0042ac85fa83700154dae9723c8d007de88343f6a7e5 @@ -862,6 +878,7 @@ kubectl_binary_checksum: "{{ kubectl_checksums[image_arch][kube_version] }}" kubeadm_binary_checksum: "{{ kubeadm_checksums[image_arch][kubeadm_version] }}" calicoctl_binary_checksum: "{{ calicoctl_binary_checksums[image_arch][calico_ctl_version] }}" calico_crds_archive_checksum: "{{ calico_crds_archive_checksums[calico_version] }}" +ciliumcli_binary_checksum: "{{ ciliumcli_binary_checksums[image_arch][cilium_cli_version] }}" crictl_binary_checksum: "{{ crictl_checksums[image_arch][crictl_version] }}" crio_archive_checksum: "{{ crio_archive_checksums[image_arch][crio_version] }}" cri_dockerd_archive_checksum: "{{ cri_dockerd_archive_checksums[image_arch][cri_dockerd_version] }}" @@ -1375,6 +1392,19 @@ downloads: groups: - k8s_cluster + ciliumcli: + enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}" + file: true + version: "{{ cilium_cli_version }}" + dest: "{{ local_release_dir }}/cilium" + sha256: "{{ ciliumcli_binary_checksum }}" + url: "{{ ciliumcli_download_url }}" + unarchive: true + owner: "root" + mode: "0755" + groups: + - k8s_cluster + multus: enabled: "{{ kube_network_plugin_multus }}" container: true diff --git a/roles/network_plugin/cilium/tasks/install.yml b/roles/network_plugin/cilium/tasks/install.yml index a948f7ec7..9e89b7bc7 100644 --- a/roles/network_plugin/cilium/tasks/install.yml +++ b/roles/network_plugin/cilium/tasks/install.yml @@ -88,3 +88,10 @@ dest: /etc/cni/net.d/000-cilium-portmap.conflist mode: 0644 when: cilium_enable_portmap + +- name: Cilium | Copy Ciliumcli binary from download dir + copy: + src: "{{ local_release_dir }}/cilium" + dest: "{{ bin_dir }}/cilium" + mode: 0755 + remote_src: yes From c2724219102d79b0ee87167c593f8db7ef78df23 Mon Sep 17 00:00:00 2001 From: lijin-union Date: Mon, 31 Oct 2022 08:16:43 +0800 Subject: [PATCH 10/36] Add UOS linux support (#9432) --- README.md | 1 + docs/_sidebar.md | 1 + docs/uoslinux.md | 9 ++++ roles/bootstrap-os/tasks/main.yml | 10 ++++- .../containerd/tasks/main.yml | 2 +- roles/container-engine/docker/tasks/main.yml | 1 + .../docker/vars/uniontech.yml | 45 +++++++++++++++++++ .../preinstall/tasks/0020-verify-settings.yml | 2 +- 8 files changed, 68 insertions(+), 3 deletions(-) create mode 100644 docs/uoslinux.md create mode 100644 roles/container-engine/docker/vars/uniontech.yml diff --git a/README.md b/README.md index 7dc6f390b..467075fa1 100644 --- a/README.md +++ b/README.md @@ -130,6 +130,7 @@ vagrant up - **Rocky Linux** [8, 9](docs/centos.md#centos-8) - **Kylin Linux Advanced Server V10** (experimental: see [kylin linux notes](docs/kylinlinux.md)) - **Amazon Linux 2** (experimental: see [amazon linux notes](docs/amazonlinux.md)) +- **UOS Linux** (experimental: see [uos linux notes](docs/uoslinux.md)) Note: Upstart/SysV init based OS types are not supported. diff --git a/docs/_sidebar.md b/docs/_sidebar.md index 44d2d2de5..b269bea02 100644 --- a/docs/_sidebar.md +++ b/docs/_sidebar.md @@ -37,6 +37,7 @@ * [CentOS/OracleLinux/AlmaLinux/Rocky Linux](docs/centos.md) * [Kylin Linux Advanced Server V10](docs/kylinlinux.md) * [Amazon Linux 2](docs/amazonlinux.md) + * [UOS Linux](docs/uoslinux.md) * CRI * [Containerd](docs/containerd.md) * [Docker](docs/docker.md) diff --git a/docs/uoslinux.md b/docs/uoslinux.md new file mode 100644 index 000000000..1078389bd --- /dev/null +++ b/docs/uoslinux.md @@ -0,0 +1,9 @@ +# UOS Linux + +UOS Linux(UnionTech OS Server 20) is supported with docker and containerd runtimes. + +**Note:** that UOS Linux is not currently covered in kubespray CI and +support for it is currently considered experimental. + +There are no special considerations for using UOS Linux as the target OS +for Kubespray deployments. diff --git a/roles/bootstrap-os/tasks/main.yml b/roles/bootstrap-os/tasks/main.yml index 5b98d59d3..9d3d05d28 100644 --- a/roles/bootstrap-os/tasks/main.yml +++ b/roles/bootstrap-os/tasks/main.yml @@ -7,7 +7,7 @@ check_mode: false - include_tasks: bootstrap-centos.yml - when: '''ID="centos"'' in os_release.stdout_lines or ''ID="ol"'' in os_release.stdout_lines or ''ID="almalinux"'' in os_release.stdout_lines or ''ID="rocky"'' in os_release.stdout_lines or ''ID="kylin"'' in os_release.stdout_lines' + when: '''ID="centos"'' in os_release.stdout_lines or ''ID="ol"'' in os_release.stdout_lines or ''ID="almalinux"'' in os_release.stdout_lines or ''ID="rocky"'' in os_release.stdout_lines or ''ID="kylin"'' in os_release.stdout_lines or ''ID="uos"'' in os_release.stdout_lines' - include_tasks: bootstrap-amazon.yml when: '''ID="amzn"'' in os_release.stdout_lines' @@ -92,6 +92,14 @@ tags: - facts +- name: Set os_family fact for UOS Linux + set_fact: + ansible_os_family: "RedHat" + ansible_distribution_major_version: "8" + when: ansible_distribution == "UnionTech" + tags: + - facts + - name: Install ceph-commmon package package: name: diff --git a/roles/container-engine/containerd/tasks/main.yml b/roles/container-engine/containerd/tasks/main.yml index 5415059f3..6bb536413 100644 --- a/roles/container-engine/containerd/tasks/main.yml +++ b/roles/container-engine/containerd/tasks/main.yml @@ -3,7 +3,7 @@ fail: msg: "{{ ansible_distribution }} is not supported by containerd." when: - - ansible_distribution not in ["CentOS", "OracleLinux", "RedHat", "Ubuntu", "Debian", "Fedora", "AlmaLinux", "Rocky", "Amazon", "Flatcar", "Flatcar Container Linux by Kinvolk", "Suse", "openSUSE Leap", "openSUSE Tumbleweed", "Kylin Linux Advanced Server"] + - ansible_distribution not in ["CentOS", "OracleLinux", "RedHat", "Ubuntu", "Debian", "Fedora", "AlmaLinux", "Rocky", "Amazon", "Flatcar", "Flatcar Container Linux by Kinvolk", "Suse", "openSUSE Leap", "openSUSE Tumbleweed", "Kylin Linux Advanced Server", "UnionTech"] - name: containerd | Remove any package manager controlled containerd package package: diff --git a/roles/container-engine/docker/tasks/main.yml b/roles/container-engine/docker/tasks/main.yml index ee23b9d34..24d39b8ff 100644 --- a/roles/container-engine/docker/tasks/main.yml +++ b/roles/container-engine/docker/tasks/main.yml @@ -101,6 +101,7 @@ update_cache: true dnf: enablerepo: "{{ docker_package_info.enablerepo | default(omit) }}" + disablerepo: "{{ docker_package_info.disablerepo | default(omit) }}" yum: enablerepo: "{{ docker_package_info.enablerepo | default(omit) }}" zypper: diff --git a/roles/container-engine/docker/vars/uniontech.yml b/roles/container-engine/docker/vars/uniontech.yml new file mode 100644 index 000000000..79b8abc94 --- /dev/null +++ b/roles/container-engine/docker/vars/uniontech.yml @@ -0,0 +1,45 @@ +--- +# containerd versions are only relevant for docker +containerd_versioned_pkg: + 'latest': "{{ containerd_package }}" + '1.3.7': "{{ containerd_package }}-1.3.7-3.1.el{{ ansible_distribution_major_version }}" + '1.3.9': "{{ containerd_package }}-1.3.9-3.1.el{{ ansible_distribution_major_version }}" + '1.4.3': "{{ containerd_package }}-1.4.3-3.2.el{{ ansible_distribution_major_version }}" + '1.4.4': "{{ containerd_package }}-1.4.4-3.1.el{{ ansible_distribution_major_version }}" + '1.4.6': "{{ containerd_package }}-1.4.6-3.1.el{{ ansible_distribution_major_version }}" + '1.4.9': "{{ containerd_package }}-1.4.9-3.1.el{{ ansible_distribution_major_version }}" + '1.4.12': "{{ containerd_package }}-1.4.12-3.1.el{{ ansible_distribution_major_version }}" + '1.6.4': "{{ containerd_package }}-1.6.4-3.1.el{{ ansible_distribution_major_version }}" + '1.6.8': "{{ containerd_package }}-1.6.8-3.1.el{{ ansible_distribution_major_version }}" + 'stable': "{{ containerd_package }}-1.6.8-3.1.el{{ ansible_distribution_major_version }}" + 'edge': "{{ containerd_package }}-1.6.8-3.1.el{{ ansible_distribution_major_version }}" + +docker_version: 19.03 +docker_cli_version: 19.03 + +# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package +# https://download.docker.com/linux/centos/>/x86_64/stable/Packages/ +# or do 'yum --showduplicates list docker-engine' +docker_versioned_pkg: + 'latest': docker-ce + '18.09': docker-ce-3:18.09.9-3.el7 + '19.03': docker-ce-3:19.03.15-3.el{{ ansible_distribution_major_version }} + '20.10': docker-ce-3:20.10.17-3.el{{ ansible_distribution_major_version }} + 'stable': docker-ce-3:20.10.17-3.el{{ ansible_distribution_major_version }} + 'edge': docker-ce-3:20.10.17-3.el{{ ansible_distribution_major_version }} + +docker_cli_versioned_pkg: + 'latest': docker-ce-cli + '18.09': docker-ce-cli-1:18.09.9-3.el7 + '19.03': docker-ce-cli-1:19.03.15-3.el{{ ansible_distribution_major_version }} + '20.10': docker-ce-cli-1:20.10.17-3.el{{ ansible_distribution_major_version }} + 'stable': docker-ce-cli-1:20.10.17-3.el{{ ansible_distribution_major_version }} + 'edge': docker-ce-cli-1:20.10.17-3.el{{ ansible_distribution_major_version }} + +docker_package_info: + enablerepo: "docker-ce" + disablerepo: "UniontechOS-20-AppStream" + pkgs: + - "{{ containerd_versioned_pkg[docker_containerd_version | string] }}" + - "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}" + - "{{ docker_versioned_pkg[docker_version | string] }}" diff --git a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml index 242d6def9..b450a4ef9 100644 --- a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml +++ b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml @@ -24,7 +24,7 @@ - name: Stop if unknown OS assert: - that: ansible_distribution in ['RedHat', 'CentOS', 'Fedora', 'Ubuntu', 'Debian', 'Flatcar', 'Flatcar Container Linux by Kinvolk', 'Suse', 'openSUSE Leap', 'openSUSE Tumbleweed', 'ClearLinux', 'OracleLinux', 'AlmaLinux', 'Rocky', 'Amazon', 'Kylin Linux Advanced Server'] + that: ansible_distribution in ['RedHat', 'CentOS', 'Fedora', 'Ubuntu', 'Debian', 'Flatcar', 'Flatcar Container Linux by Kinvolk', 'Suse', 'openSUSE Leap', 'openSUSE Tumbleweed', 'ClearLinux', 'OracleLinux', 'AlmaLinux', 'Rocky', 'Amazon', 'Kylin Linux Advanced Server', 'UnionTech'] msg: "{{ ansible_distribution }} is not a known OS" when: not ignore_assert_errors From d00508105bd1a95a15807386f514b1ecd301df19 Mon Sep 17 00:00:00 2001 From: yanggang Date: Mon, 31 Oct 2022 11:08:44 +0800 Subject: [PATCH 11/36] Removed PodSecurityPolicy from ingress-nginx (#9448) --- .../ingress_nginx/tasks/main.yml | 7 --- .../templates/psp-ingress-nginx.yml.j2 | 47 ------------------- 2 files changed, 54 deletions(-) delete mode 100644 roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/psp-ingress-nginx.yml.j2 diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml b/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml index d99b6c265..cc0ed71c3 100644 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml +++ b/roles/kubernetes-apps/ingress_controller/ingress_nginx/tasks/main.yml @@ -23,8 +23,6 @@ - { name: role-ingress-nginx, file: role-ingress-nginx.yml, type: role } - { name: rolebinding-ingress-nginx, file: rolebinding-ingress-nginx.yml, type: rolebinding } - { name: ds-ingress-nginx-controller, file: ds-ingress-nginx-controller.yml, type: ds } - ingress_nginx_templates_for_psp: - - { name: psp-ingress-nginx, file: psp-ingress-nginx.yml, type: podsecuritypolicy } ingress_nginx_templates_for_webhook: - { name: admission-webhook-configuration, file: admission-webhook-configuration.yml, type: sa } - { name: sa-admission-webhook, file: sa-admission-webhook.yml, type: sa } @@ -34,11 +32,6 @@ - { name: rolebinding-admission-webhook, file: rolebinding-admission-webhook.yml, type: rolebinding } - { name: admission-webhook-job, file: admission-webhook-job.yml, type: job } -- name: NGINX Ingress Controller | Append extra templates to NGINX Ingress Templates list for PodSecurityPolicy - set_fact: - ingress_nginx_templates: "{{ ingress_nginx_templates_for_psp + ingress_nginx_templates }}" - when: podsecuritypolicy_enabled - - name: NGINX Ingress Controller | Append extra templates to NGINX Ingress Templates list for webhook set_fact: ingress_nginx_templates: "{{ ingress_nginx_templates + ingress_nginx_templates_for_webhook }}" diff --git a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/psp-ingress-nginx.yml.j2 b/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/psp-ingress-nginx.yml.j2 deleted file mode 100644 index 903f26808..000000000 --- a/roles/kubernetes-apps/ingress_controller/ingress_nginx/templates/psp-ingress-nginx.yml.j2 +++ /dev/null @@ -1,47 +0,0 @@ ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: ingress-nginx - annotations: - seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' - seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default' -{% if apparmor_enabled %} - apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' -{% endif %} - labels: - addonmanager.kubernetes.io/mode: Reconcile -spec: - privileged: false - allowPrivilegeEscalation: true - allowedCapabilities: - - NET_BIND_SERVICE - volumes: - - 'configMap' - - 'emptyDir' - - 'projected' - - 'secret' - - 'downwardAPI' - - 'persistentVolumeClaim' - hostNetwork: {{ ingress_nginx_host_network|bool }} - hostPorts: - - min: 0 - max: 65535 - hostIPC: false - hostPID: false - runAsUser: - rule: 'MustRunAsNonRoot' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - readOnlyRootFilesystem: false From 1f54cef71ceb02c274bbd7b322e7340733f62663 Mon Sep 17 00:00:00 2001 From: William Turner Date: Mon, 31 Oct 2022 16:16:45 -0400 Subject: [PATCH 12/36] Add variable to set direct routing on flannel VXLAN (#9438) --- inventory/sample/group_vars/k8s_cluster/k8s-net-flannel.yml | 1 + roles/network_plugin/flannel/defaults/main.yml | 1 + roles/network_plugin/flannel/templates/cni-flannel.yml.j2 | 3 ++- 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/inventory/sample/group_vars/k8s_cluster/k8s-net-flannel.yml b/inventory/sample/group_vars/k8s_cluster/k8s-net-flannel.yml index a42c5b3be..1a38ba71f 100644 --- a/inventory/sample/group_vars/k8s_cluster/k8s-net-flannel.yml +++ b/inventory/sample/group_vars/k8s_cluster/k8s-net-flannel.yml @@ -16,3 +16,4 @@ # flannel_backend_type: "vxlan" # flannel_vxlan_vni: 1 # flannel_vxlan_port: 8472 +# flannel_vxlan_direct_routing: false diff --git a/roles/network_plugin/flannel/defaults/main.yml b/roles/network_plugin/flannel/defaults/main.yml index b07513517..cd1dcf16d 100644 --- a/roles/network_plugin/flannel/defaults/main.yml +++ b/roles/network_plugin/flannel/defaults/main.yml @@ -19,6 +19,7 @@ flannel_backend_type: "vxlan" flannel_vxlan_vni: 1 flannel_vxlan_port: 8472 +flannel_vxlan_direct_routing: false # Limits for apps flannel_memory_limit: 500M diff --git a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 index 59cecb257..fb6f5bf88 100644 --- a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 +++ b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 @@ -34,7 +34,8 @@ data: "Backend": { "Type": "{{ flannel_backend_type }}"{% if flannel_backend_type == "vxlan" %}, "VNI": {{ flannel_vxlan_vni }}, - "Port": {{ flannel_vxlan_port }} + "Port": {{ flannel_vxlan_port }}, + "DirectRouting": {{ flannel_vxlan_direct_routing | to_json }} {% endif %} } } From c78862052cf47f2267727cb5f6ce50b22feb3138 Mon Sep 17 00:00:00 2001 From: Olivier Lemasle Date: Tue, 1 Nov 2022 05:08:45 +0100 Subject: [PATCH 13/36] Stop using python 'test' internal package (#9454) `test` is is a internal Python package (see [doc]), and as such should not be used here. It make tests fail in some environments. [doc]: https://docs.python.org/3/library/test.html --- contrib/inventory_builder/tests/test_inventory.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/inventory_builder/tests/test_inventory.py b/contrib/inventory_builder/tests/test_inventory.py index 12416f2d3..5d6649d68 100644 --- a/contrib/inventory_builder/tests/test_inventory.py +++ b/contrib/inventory_builder/tests/test_inventory.py @@ -13,7 +13,7 @@ # under the License. import inventory -from test import support +from io import StringIO import unittest from unittest import mock @@ -41,7 +41,7 @@ class TestInventoryPrintHostnames(unittest.TestCase): 'access_ip': '10.90.0.3'}}}}) with mock.patch('builtins.open', mock_io): with self.assertRaises(SystemExit) as cm: - with support.captured_stdout() as stdout: + with mock.patch('sys.stdout', new_callable=StringIO) as stdout: inventory.KubesprayInventory( changed_hosts=["print_hostnames"], config_file="file") From 6bff338bad2073b43b21d0f0545cf64aeee270f9 Mon Sep 17 00:00:00 2001 From: charlychiu Date: Wed, 2 Nov 2022 15:14:46 +0800 Subject: [PATCH 14/36] fix: hubble relay tls error (#9457) --- roles/network_plugin/cilium/templates/cilium/config.yml.j2 | 2 +- roles/network_plugin/cilium/templates/hubble/config.yml.j2 | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/roles/network_plugin/cilium/templates/cilium/config.yml.j2 b/roles/network_plugin/cilium/templates/cilium/config.yml.j2 index 313821ab1..7a524c6ba 100644 --- a/roles/network_plugin/cilium/templates/cilium/config.yml.j2 +++ b/roles/network_plugin/cilium/templates/cilium/config.yml.j2 @@ -175,7 +175,7 @@ data: {% endif %} hubble-listen-address: ":4244" {% if cilium_enable_hubble and cilium_hubble_install %} - hubble-disable-tls: "false" + hubble-disable-tls: "{% if cilium_hubble_tls_generate %}false{% else %}true{% endif %}" hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt diff --git a/roles/network_plugin/cilium/templates/hubble/config.yml.j2 b/roles/network_plugin/cilium/templates/hubble/config.yml.j2 index d9723db03..4f42abe85 100644 --- a/roles/network_plugin/cilium/templates/hubble/config.yml.j2 +++ b/roles/network_plugin/cilium/templates/hubble/config.yml.j2 @@ -16,7 +16,8 @@ data: tls-client-cert-file: /var/lib/hubble-relay/tls/client.crt tls-client-key-file: /var/lib/hubble-relay/tls/client.key tls-hubble-server-ca-files: /var/lib/hubble-relay/tls/hubble-server-ca.crt - disable-server-tls: true + disable-server-tls: {% if cilium_hubble_tls_generate %}false{% else %}true{% endif %} + disable-client-tls: {% if cilium_hubble_tls_generate %}false{% else %}true{% endif %} --- # Source: cilium/templates/hubble-ui-configmap.yaml apiVersion: v1 From 5cf2883444f9ffebec3622ae28b1f1bda838d201 Mon Sep 17 00:00:00 2001 From: cleverhu Date: Wed, 2 Nov 2022 15:18:45 +0800 Subject: [PATCH 15/36] add retry for start calico kube controller (#9450) Signed-off-by: cleverhu Signed-off-by: cleverhu --- roles/kubernetes-apps/policy_controller/calico/tasks/main.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml index 59a31e40c..e4169b237 100644 --- a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml +++ b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml @@ -33,6 +33,9 @@ state: "latest" with_items: - "{{ calico_kube_manifests.results }}" + register: calico_kube_controller_start + until: calico_kube_controller_start is succeeded + retries: 4 when: - inventory_hostname == groups['kube_control_plane'][0] - not item is skipped From ce751cb89da34b36c15b861a42245519ea7b7444 Mon Sep 17 00:00:00 2001 From: yanggang Date: Wed, 2 Nov 2022 15:22:46 +0800 Subject: [PATCH 16/36] add variable condition snapshot in vSphere CSI (#9429) --- roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml | 4 ++++ .../vsphere/templates/vsphere-csi-controller-config.yml.j2 | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml b/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml index 84e78c3f1..0a4d02d96 100644 --- a/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml +++ b/roles/kubernetes-apps/csi_driver/vsphere/defaults/main.yml @@ -31,3 +31,7 @@ vsphere_csi_node_affinity: {} # may contain some private data, so it is recommended to set it to false # in the production environment. unsafe_show_logs: false + +# https://github.com/kubernetes-sigs/vsphere-csi-driver/blob/master/docs/book/features/volume_snapshot.md#how-to-enable-volume-snapshot--restore-feature-in-vsphere-csi- +# according to the above link , we can controler the block-volume-snapshot parameter +vsphere_csi_block_volume_snapshot: false \ No newline at end of file diff --git a/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-config.yml.j2 b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-config.yml.j2 index 3e16ae1b0..d7ee521eb 100644 --- a/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-config.yml.j2 +++ b/roles/kubernetes-apps/csi_driver/vsphere/templates/vsphere-csi-controller-config.yml.j2 @@ -11,7 +11,7 @@ data: "async-query-volume": "true" "improved-csi-idempotency": "true" "improved-volume-topology": "true" - "block-volume-snapshot": "false" + "block-volume-snapshot": "{{ vsphere_csi_block_volume_snapshot }}" "csi-windows-support": "false" {% if vsphere_csi_controller is version('v2.5.0', '>=') %} "use-csinode-id": "true" From 34a52a702882fc5224ce5a2fae8f47bea2adf50f Mon Sep 17 00:00:00 2001 From: cleverhu Date: Wed, 2 Nov 2022 15:30:47 +0800 Subject: [PATCH 17/36] update cilium cli offline download url example (#9458) Signed-off-by: cleverhu Signed-off-by: cleverhu --- inventory/sample/group_vars/all/offline.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/inventory/sample/group_vars/all/offline.yml b/inventory/sample/group_vars/all/offline.yml index 9ea31ffca..01c45d69c 100644 --- a/inventory/sample/group_vars/all/offline.yml +++ b/inventory/sample/group_vars/all/offline.yml @@ -37,6 +37,9 @@ # [Optional] Calico with kdd: If using Calico network plugin with kdd datastore # calico_crds_download_url: "{{ files_repo }}/github.com/projectcalico/calico/archive/{{ calico_version }}.tar.gz" +# [Optional] Cilium: If using Cilium network plugin +# ciliumcli_download_url: "{{ files_repo }}/github.com/cilium/cilium-cli/releases/download/{{ cilium_cli_version }}/cilium-linux-{{ image_arch }}.tar.gz" + # [Optional] Flannel: If using Falnnel network plugin # flannel_cni_download_url: "{{ files_repo }}/kubernetes/flannel/{{ flannel_cni_version }}/flannel-{{ image_arch }}" From 58faef6ff6b8002a590cf5cb8b898f08b740920f Mon Sep 17 00:00:00 2001 From: Fred Rolland Date: Thu, 3 Nov 2022 11:41:36 +0200 Subject: [PATCH 18/36] Flannel: fix init container image arch (#9461) The install-cni-plugin image was not updated to the corresponding arch when building the different DS. Fixes issue #9460 Signed-off-by: Fred Rolland Signed-off-by: Fred Rolland --- roles/network_plugin/flannel/templates/cni-flannel.yml.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 index fb6f5bf88..819061119 100644 --- a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 +++ b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 @@ -114,7 +114,7 @@ spec: - {{ arch }} initContainers: - name: install-cni-plugin - image: {{ flannel_init_image_repo }}:{{ flannel_init_image_tag }} + image: {{ flannel_init_image_repo }}:{{ flannel_init_image_tag | regex_replace(image_arch,'') }}{{ arch }} command: - cp args: From 4aa1ef28eaaeb69de9a8d495f817f1594231a374 Mon Sep 17 00:00:00 2001 From: Jiffs Maverick Date: Thu, 3 Nov 2022 11:45:36 +0200 Subject: [PATCH 19/36] Don't use coredns_server in dhclient.conf if nodelocaldns is enabled (#9392) --- roles/kubernetes/preinstall/tasks/0040-set_facts.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml index 58e0685a2..e6148ace1 100644 --- a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml +++ b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml @@ -195,7 +195,7 @@ nameserverentries: |- {{ (([nodelocaldns_ip] if enable_nodelocaldns else []) + (coredns_server|d([]) if not enable_nodelocaldns else []) + nameservers|d([]) + cloud_resolver|d([]) + (configured_nameservers|d([]) if not disable_host_nameservers|d()|bool else [])) | unique | join(',') }} supersede_nameserver: - supersede domain-name-servers {{ ( ( [nodelocaldns_ip] if enable_nodelocaldns else []) + coredns_server|d([]) + nameservers|d([]) + cloud_resolver|d([])) | unique | join(', ') }}; + supersede domain-name-servers {{ ( ( [nodelocaldns_ip] if enable_nodelocaldns else []) + (coredns_server|d([]) if not enable_nodelocaldns else []) + nameservers|d([]) + cloud_resolver|d([])) | unique | join(', ') }}; when: not dns_early or dns_late # This task should run instead of the above task when cluster/nodelocal DNS hasn't From d7f08d1b0ce6e5a12c4a7b5b4d2c63cd525bd24e Mon Sep 17 00:00:00 2001 From: lijin-union Date: Thu, 3 Nov 2022 19:43:38 +0800 Subject: [PATCH 20/36] remove the set_fact action which raise error in the CI (#9462) --- roles/bootstrap-os/tasks/main.yml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/roles/bootstrap-os/tasks/main.yml b/roles/bootstrap-os/tasks/main.yml index 9d3d05d28..7ce82e686 100644 --- a/roles/bootstrap-os/tasks/main.yml +++ b/roles/bootstrap-os/tasks/main.yml @@ -92,14 +92,6 @@ tags: - facts -- name: Set os_family fact for UOS Linux - set_fact: - ansible_os_family: "RedHat" - ansible_distribution_major_version: "8" - when: ansible_distribution == "UnionTech" - tags: - - facts - - name: Install ceph-commmon package package: name: From 2a696ddb345036b129b457f294222d7689a603c2 Mon Sep 17 00:00:00 2001 From: ausias-armesto Date: Sun, 6 Nov 2022 11:38:15 +0100 Subject: [PATCH 21/36] Adding metrics server to use host network (#9444) * Adding metrics server to use host network * EXternalize value to a variable --- inventory/sample/group_vars/k8s_cluster/addons.yml | 1 + roles/kubernetes-apps/metrics_server/defaults/main.yml | 1 + .../metrics_server/templates/metrics-server-deployment.yaml.j2 | 1 + 3 files changed, 3 insertions(+) diff --git a/inventory/sample/group_vars/k8s_cluster/addons.yml b/inventory/sample/group_vars/k8s_cluster/addons.yml index 1560a01da..0d58f6379 100644 --- a/inventory/sample/group_vars/k8s_cluster/addons.yml +++ b/inventory/sample/group_vars/k8s_cluster/addons.yml @@ -18,6 +18,7 @@ metrics_server_enabled: false # metrics_server_kubelet_insecure_tls: true # metrics_server_metric_resolution: 15s # metrics_server_kubelet_preferred_address_types: "InternalIP,ExternalIP,Hostname" +# metrics_server_host_network: false # Rancher Local Path Provisioner local_path_provisioner_enabled: false diff --git a/roles/kubernetes-apps/metrics_server/defaults/main.yml b/roles/kubernetes-apps/metrics_server/defaults/main.yml index 2dfad2737..a682b25cb 100644 --- a/roles/kubernetes-apps/metrics_server/defaults/main.yml +++ b/roles/kubernetes-apps/metrics_server/defaults/main.yml @@ -7,3 +7,4 @@ metrics_server_limits_cpu: 100m metrics_server_limits_memory: 200Mi metrics_server_requests_cpu: 100m metrics_server_requests_memory: 200Mi +metrics_server_host_network: false diff --git a/roles/kubernetes-apps/metrics_server/templates/metrics-server-deployment.yaml.j2 b/roles/kubernetes-apps/metrics_server/templates/metrics-server-deployment.yaml.j2 index ce107ec17..d30b443c1 100644 --- a/roles/kubernetes-apps/metrics_server/templates/metrics-server-deployment.yaml.j2 +++ b/roles/kubernetes-apps/metrics_server/templates/metrics-server-deployment.yaml.j2 @@ -27,6 +27,7 @@ spec: spec: priorityClassName: system-cluster-critical serviceAccountName: metrics-server + hostNetwork: {{ metrics_server_host_network | default(false) }} containers: - name: metrics-server image: {{ metrics_server_image_repo }}:{{ metrics_server_image_tag }} From 590b4aa240c7615f665a2cb5817957ac1e7d127c Mon Sep 17 00:00:00 2001 From: Cyclinder Date: Mon, 7 Nov 2022 09:34:17 +0800 Subject: [PATCH 22/36] adjust calico-kube-controller to non-hostnetwork pod (#9465) Signed-off-by: cyclinder qifeng.guo@daocloud.io Signed-off-by: cyclinder qifeng.guo@daocloud.io --- .../calico/templates/calico-kube-controllers.yml.j2 | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 index 35e4959bb..bd15082f0 100644 --- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-kube-controllers.yml.j2 @@ -21,9 +21,10 @@ spec: spec: nodeSelector: {{ calico_policy_controller_deployment_nodeselector }} - hostNetwork: true serviceAccountName: calico-kube-controllers tolerations: + - key: CriticalAddonsOnly + operator: Exists - key: node-role.kubernetes.io/master effect: NoSchedule - key: node-role.kubernetes.io/control-plane From 40261fdf1477c82c58b6ab1b7c4809eca17813bb Mon Sep 17 00:00:00 2001 From: ERIK Date: Mon, 7 Nov 2022 09:54:16 +0800 Subject: [PATCH 23/36] Fix iputils install failure in Kylin OS (#9453) Signed-off-by: bo.jiang Signed-off-by: bo.jiang --- roles/bootstrap-os/tasks/main.yml | 8 -------- .../kubernetes/preinstall/tasks/0020-verify-settings.yml | 6 +++--- roles/kubernetes/preinstall/tasks/0040-set_facts.yml | 8 ++++++++ 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/roles/bootstrap-os/tasks/main.yml b/roles/bootstrap-os/tasks/main.yml index 7ce82e686..fbd15713a 100644 --- a/roles/bootstrap-os/tasks/main.yml +++ b/roles/bootstrap-os/tasks/main.yml @@ -84,14 +84,6 @@ or is_fedora_coreos or ansible_distribution == "Fedora") -- name: Set os_family fact for Kylin Linux Advanced Server - set_fact: - ansible_os_family: "RedHat" - ansible_distribution_major_version: "8" - when: ansible_distribution == "Kylin Linux Advanced Server" - tags: - - facts - - name: Install ceph-commmon package package: name: diff --git a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml index b450a4ef9..4705be9c6 100644 --- a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml +++ b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml @@ -103,10 +103,10 @@ - name: Ensure ping package package: name: >- - {%- if ansible_os_family in ['RedHat', 'Suse'] -%} - iputils - {%- else -%} + {%- if ansible_os_family == 'Debian' -%} iputils-ping + {%- else -%} + iputils {%- endif -%} state: present when: diff --git a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml index e6148ace1..54aa5b6dc 100644 --- a/roles/kubernetes/preinstall/tasks/0040-set_facts.yml +++ b/roles/kubernetes/preinstall/tasks/0040-set_facts.yml @@ -6,6 +6,14 @@ tags: - facts +- name: Set os_family fact for Kylin Linux Advanced Server + set_fact: + ansible_os_family: "RedHat" + ansible_distribution_major_version: "8" + when: ansible_distribution == "Kylin Linux Advanced Server" + tags: + - facts + - name: check if booted with ostree stat: path: /run/ostree-booted From 0d6dc08578e48b31d3f935a585c40b1f25ac3d75 Mon Sep 17 00:00:00 2001 From: yanggang Date: Mon, 7 Nov 2022 10:04:16 +0800 Subject: [PATCH 24/36] upgrade argocd version 2.4.16 (#9467) --- README.md | 2 +- inventory/sample/group_vars/k8s_cluster/addons.yml | 2 +- roles/kubernetes-apps/argocd/defaults/main.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 467075fa1..1df085c47 100644 --- a/README.md +++ b/README.md @@ -158,7 +158,7 @@ Note: Upstart/SysV init based OS types are not supported. - [coredns](https://github.com/coredns/coredns) v1.8.6 - [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v1.4.0 - [krew](https://github.com/kubernetes-sigs/krew) v0.4.3 - - [argocd](https://argoproj.github.io/) v2.4.15 + - [argocd](https://argoproj.github.io/) v2.4.16 - [helm](https://helm.sh/) v3.9.4 - [metallb](https://metallb.universe.tf/) v0.12.1 - [registry](https://github.com/distribution/distribution) v2.8.1 diff --git a/inventory/sample/group_vars/k8s_cluster/addons.yml b/inventory/sample/group_vars/k8s_cluster/addons.yml index 0d58f6379..73e4dfe12 100644 --- a/inventory/sample/group_vars/k8s_cluster/addons.yml +++ b/inventory/sample/group_vars/k8s_cluster/addons.yml @@ -211,7 +211,7 @@ metallb_speaker_enabled: "{{ metallb_enabled }}" # my_asn: 4200000000 argocd_enabled: false -# argocd_version: v2.4.15 +# argocd_version: v2.4.16 # argocd_namespace: argocd # Default password: # - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli diff --git a/roles/kubernetes-apps/argocd/defaults/main.yml b/roles/kubernetes-apps/argocd/defaults/main.yml index bb8f1fe9c..f0eb120a1 100644 --- a/roles/kubernetes-apps/argocd/defaults/main.yml +++ b/roles/kubernetes-apps/argocd/defaults/main.yml @@ -1,5 +1,5 @@ --- argocd_enabled: false -argocd_version: v2.4.15 +argocd_version: v2.4.16 argocd_namespace: argocd # argocd_admin_password: From a731e2577838253afaf80fb0139c358ac85d0749 Mon Sep 17 00:00:00 2001 From: Kenichi Omichi Date: Mon, 7 Nov 2022 17:08:16 +0900 Subject: [PATCH 25/36] Make vagrant-ubuntu20-flannel voting (#9469) We made all vagrant jobs non-voting because those jobs were not stable. However the setting allowed a pull request which broke vagrant jobs completely merged into the master branch. To avoid such situation, this makes one of vagrant jobs voting. Let's see the stability of the job. --- .gitlab-ci/vagrant.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab-ci/vagrant.yml b/.gitlab-ci/vagrant.yml index 8c0f4a21d..4f7bd9e43 100644 --- a/.gitlab-ci/vagrant.yml +++ b/.gitlab-ci/vagrant.yml @@ -43,6 +43,7 @@ vagrant_ubuntu20-flannel: stage: deploy-part2 extends: .vagrant when: on_success + allow_failure: false vagrant_ubuntu16-kube-router-sep: stage: deploy-part2 From 5d1fe64bc8cc01151527b16b7d40b6b4c0b665bb Mon Sep 17 00:00:00 2001 From: Olivier Lemasle Date: Tue, 8 Nov 2022 00:28:17 +0100 Subject: [PATCH 26/36] Update local-volume-provisioner (#9463) - Update and re-work the documentation: - Update links - Fix formatting (especially for lists) - Remove documentation about `useAlphaApi`, a flag only for k8s versions < v1.10 - Attempt to clarify the doc - Update to version 1.5.0 - Remove PodSecurityPolicy (deprecated in k8s v1.21+) - Update ClusterRole following upstream (cf https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner/pull/292) - Add nodeSelector to DaemonSet (following upstream) --- README.md | 2 +- .../local_volume_provisioner.md | 125 ++++++++++-------- roles/download/defaults/main.yml | 2 +- .../defaults/main.yml | 5 +- .../local_volume_provisioner/tasks/main.yml | 11 -- ...ocal-volume-provisioner-clusterrole.yml.j2 | 12 ++ ...lume-provisioner-clusterrolebinding.yml.j2 | 14 -- .../local-volume-provisioner-ds.yml.j2 | 2 + .../local-volume-provisioner-psp-cr.yml.j2 | 14 -- .../local-volume-provisioner-psp-rb.yml.j2 | 13 -- .../local-volume-provisioner-psp-role.yml.j2 | 15 --- .../local-volume-provisioner-psp.yml.j2 | 45 ------- 12 files changed, 85 insertions(+), 175 deletions(-) delete mode 100644 roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp-cr.yml.j2 delete mode 100644 roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp-rb.yml.j2 delete mode 100644 roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp-role.yml.j2 delete mode 100644 roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp.yml.j2 diff --git a/README.md b/README.md index 1df085c47..a455bd3e9 100644 --- a/README.md +++ b/README.md @@ -170,7 +170,7 @@ Note: Upstart/SysV init based OS types are not supported. - [cinder-csi-plugin](https://github.com/kubernetes/cloud-provider-openstack/blob/master/docs/cinder-csi-plugin/using-cinder-csi-plugin.md) v1.22.0 - [gcp-pd-csi-plugin](https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver) v1.4.0 - [local-path-provisioner](https://github.com/rancher/local-path-provisioner) v0.0.22 - - [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.4.0 + - [local-volume-provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) v2.5.0 ## Container Runtime Notes diff --git a/docs/kubernetes-apps/local_volume_provisioner.md b/docs/kubernetes-apps/local_volume_provisioner.md index 283969def..e9c622513 100644 --- a/docs/kubernetes-apps/local_volume_provisioner.md +++ b/docs/kubernetes-apps/local_volume_provisioner.md @@ -1,10 +1,11 @@ -# Local Storage Provisioner +# Local Static Storage Provisioner -The [local storage provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/local-volume) +The [local static storage provisioner](https://github.com/kubernetes-sigs/sig-storage-local-static-provisioner) is NOT a dynamic storage provisioner as you would expect from a cloud provider. Instead, it simply creates PersistentVolumes for -all mounts under the host_dir of the specified storage class. +all mounts under the `host_dir` of the specified storage class. These storage classes are specified in the `local_volume_provisioner_storage_classes` nested dictionary. + Example: ```yaml @@ -16,15 +17,18 @@ local_volume_provisioner_storage_classes: host_dir: /mnt/fast-disks mount_dir: /mnt/fast-disks block_cleaner_command: - - "/scripts/shred.sh" - - "2" + - "/scripts/shred.sh" + - "2" volume_mode: Filesystem fs_type: ext4 ``` -For each key in `local_volume_provisioner_storage_classes` a storageClass with the -same name is created. The subkeys of each storage class are converted to camelCase and added -as attributes to the storageClass. +For each key in `local_volume_provisioner_storage_classes` a "storage class" with +the same name is created in the entry `storageClassMap` of the ConfigMap `local-volume-provisioner`. +The subkeys of each storage class in `local_volume_provisioner_storage_classes` +are converted to camelCase and added as attributes to the storage class in the +ConfigMap. + The result of the above example is: ```yaml @@ -43,80 +47,85 @@ data: fsType: ext4 ``` -The default StorageClass is local-storage on /mnt/disks, -the rest of this doc will use that path as an example. +Additionally, a StorageClass object (`storageclasses.storage.k8s.io`) is also +created for each storage class: + +```bash +$ kubectl get storageclasses.storage.k8s.io +NAME PROVISIONER RECLAIMPOLICY +fast-disks kubernetes.io/no-provisioner Delete +local-storage kubernetes.io/no-provisioner Delete +``` + +The default StorageClass is `local-storage` on `/mnt/disks`; +the rest of this documentation will use that path as an example. ## Examples to create local storage volumes -1. tmpfs method: +1. Using tmpfs -``` bash -for vol in vol1 vol2 vol3; do -mkdir /mnt/disks/$vol -mount -t tmpfs -o size=5G $vol /mnt/disks/$vol -done -``` + ```bash + for vol in vol1 vol2 vol3; do + mkdir /mnt/disks/$vol + mount -t tmpfs -o size=5G $vol /mnt/disks/$vol + done + ``` -The tmpfs method is not recommended for production because the mount is not -persistent and data will be deleted on reboot. + The tmpfs method is not recommended for production because the mounts are not + persistent and data will be deleted on reboot. 1. Mount physical disks -``` bash -mkdir /mnt/disks/ssd1 -mount /dev/vdb1 /mnt/disks/ssd1 -``` + ```bash + mkdir /mnt/disks/ssd1 + mount /dev/vdb1 /mnt/disks/ssd1 + ``` -Physical disks are recommended for production environments because it offers -complete isolation in terms of I/O and capacity. + Physical disks are recommended for production environments because it offers + complete isolation in terms of I/O and capacity. 1. Mount unpartitioned physical devices -``` bash -for disk in /dev/sdc /dev/sdd /dev/sde; do - ln -s $disk /mnt/disks -done -``` + ```bash + for disk in /dev/sdc /dev/sdd /dev/sde; do + ln -s $disk /mnt/disks + done + ``` -This saves time of precreating filesystems. Note that your storageclass must have -volume_mode set to "Filesystem" and fs_type defined. If either is not set, the -disk will be added as a raw block device. + This saves time of precreating filesystems. Note that your storageclass must have + `volume_mode` set to `"Filesystem"` and `fs_type` defined. If either is not set, the + disk will be added as a raw block device. + +1. PersistentVolumes with `volumeMode="Block"` + + Just like above, you can create PersistentVolumes with volumeMode `Block` + by creating a symbolic link under discovery directory to the block device on + the node, if you set `volume_mode` to `"Block"`. This will create a volume + presented into a Pod as a block device, without any filesystem on it. 1. File-backed sparsefile method -``` bash -truncate /mnt/disks/disk5 --size 2G -mkfs.ext4 /mnt/disks/disk5 -mkdir /mnt/disks/vol5 -mount /mnt/disks/disk5 /mnt/disks/vol5 -``` + ```bash + truncate /mnt/disks/disk5 --size 2G + mkfs.ext4 /mnt/disks/disk5 + mkdir /mnt/disks/vol5 + mount /mnt/disks/disk5 /mnt/disks/vol5 + ``` -If you have a development environment and only one disk, this is the best way -to limit the quota of persistent volumes. + If you have a development environment and only one disk, this is the best way + to limit the quota of persistent volumes. 1. Simple directories -In a development environment using `mount --bind` works also, but there is no capacity -management. - -1. Block volumeMode PVs - -Create a symbolic link under discovery directory to the block device on the node. To use -raw block devices in pods, volume_type should be set to "Block". + In a development environment, using `mount --bind` works also, but there is no capacity + management. ## Usage notes -Beta PV.NodeAffinity field is used by default. If running against an older K8s -version, the useAlphaAPI flag must be set in the configMap. - -The volume provisioner cannot calculate volume sizes correctly, so you should -delete the daemonset pod on the relevant host after creating volumes. The pod -will be recreated and read the size correctly. - -Make sure to make any mounts persist via /etc/fstab or with systemd mounts (for -Flatcar Container Linux). Pods with persistent volume claims will not be +Make sure to make any mounts persist via `/etc/fstab` or with systemd mounts (for +Flatcar Container Linux or Fedora CoreOS). Pods with persistent volume claims will not be able to start if the mounts become unavailable. ## Further reading -Refer to the upstream docs here: +Refer to the upstream docs here: diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 60b7217b7..ce4198cda 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -988,7 +988,7 @@ registry_image_tag: "{{ registry_version }}" metrics_server_version: "v0.6.1" metrics_server_image_repo: "{{ kube_image_repo }}/metrics-server/metrics-server" metrics_server_image_tag: "{{ metrics_server_version }}" -local_volume_provisioner_version: "v2.4.0" +local_volume_provisioner_version: "v2.5.0" local_volume_provisioner_image_repo: "{{ kube_image_repo }}/sig-storage/local-volume-provisioner" local_volume_provisioner_image_tag: "{{ local_volume_provisioner_version }}" cephfs_provisioner_version: "v2.1.0-k8s1.11" diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml index c38fcc0f8..16ed6ffab 100644 --- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/defaults/main.yml @@ -6,9 +6,9 @@ local_volume_provisioner_nodelabels: [] # - topology.kubernetes.io/region # - topology.kubernetes.io/zone local_volume_provisioner_tolerations: [] -# Levarages Ansibles string to Python datatype casting. Otherwise the dict_key isn't substituted -# see https://github.com/ansible/ansible/issues/17324 local_volume_provisioner_use_node_name_only: false +# Leverages Ansible's string to Python datatype casting. Otherwise the dict_key isn't substituted. +# see https://github.com/ansible/ansible/issues/17324 local_volume_provisioner_storage_classes: | { "{{ local_volume_provisioner_storage_class | default('local-storage') }}": { @@ -16,6 +16,5 @@ local_volume_provisioner_storage_classes: | "mount_dir": "{{ local_volume_provisioner_mount_dir | default('/mnt/disks') }}", "volume_mode": "Filesystem", "fs_type": "ext4" - } } diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml index 88a57105a..2308b5ca6 100644 --- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml @@ -24,17 +24,6 @@ - { name: local-volume-provisioner-cm, file: local-volume-provisioner-cm.yml, type: cm } - { name: local-volume-provisioner-ds, file: local-volume-provisioner-ds.yml, type: ds } - { name: local-volume-provisioner-sc, file: local-volume-provisioner-sc.yml, type: sc } - local_volume_provisioner_templates_for_psp_not_system_ns: - - { name: local-volume-provisioner-psp, file: local-volume-provisioner-psp.yml, type: psp } - - { name: local-volume-provisioner-psp-role, file: local-volume-provisioner-psp-role.yml, type: role } - - { name: local-volume-provisioner-psp-rb, file: local-volume-provisioner-psp-rb.yml, type: rolebinding } - -- name: Local Volume Provisioner | Insert extra templates to Local Volume Provisioner templates list for PodSecurityPolicy - set_fact: - local_volume_provisioner_templates: "{{ local_volume_provisioner_templates[:2] + local_volume_provisioner_templates_for_psp_not_system_ns + local_volume_provisioner_templates[2:] }}" - when: - - podsecuritypolicy_enabled - - local_volume_provisioner_namespace != "kube-system" - name: Local Volume Provisioner | Create manifests template: diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrole.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrole.yml.j2 index e4348b123..ada55dd13 100644 --- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrole.yml.j2 +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrole.yml.j2 @@ -5,6 +5,18 @@ metadata: name: local-volume-provisioner-node-clusterrole namespace: {{ local_volume_provisioner_namespace }} rules: +- apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] +- apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["events"] + verbs: ["watch"] +- apiGroups: ["", "events.k8s.io"] + resources: ["events"] + verbs: ["create", "update", "patch"] - apiGroups: [""] resources: ["nodes"] verbs: ["get"] diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrolebinding.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrolebinding.yml.j2 index aafd88f00..bc286b2e4 100644 --- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrolebinding.yml.j2 +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-clusterrolebinding.yml.j2 @@ -1,20 +1,6 @@ --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding -metadata: - name: local-volume-provisioner-system-persistent-volume-provisioner - namespace: {{ local_volume_provisioner_namespace }} -subjects: -- kind: ServiceAccount - name: local-volume-provisioner - namespace: {{ local_volume_provisioner_namespace }} -roleRef: - kind: ClusterRole - name: system:persistent-volume-provisioner - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding metadata: name: local-volume-provisioner-system-node namespace: {{ local_volume_provisioner_namespace }} diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2 index 245e33afb..a8747a230 100644 --- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2 +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-ds.yml.j2 @@ -20,6 +20,8 @@ spec: spec: priorityClassName: {% if local_volume_provisioner_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}} serviceAccountName: local-volume-provisioner + nodeSelector: + kubernetes.io/os: linux {% if local_volume_provisioner_tolerations %} tolerations: {{ local_volume_provisioner_tolerations | to_nice_yaml(indent=2) | indent(width=8) }} diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp-cr.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp-cr.yml.j2 deleted file mode 100644 index 7c9b0b462..000000000 --- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp-cr.yml.j2 +++ /dev/null @@ -1,14 +0,0 @@ -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: psp:local-volume-provisioner - namespace: {{ local_volume_provisioner_namespace }} -rules: - - apiGroups: - - policy - resourceNames: - - local-volume-provisioner - resources: - - podsecuritypolicies - verbs: - - use diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp-rb.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp-rb.yml.j2 deleted file mode 100644 index 385b590e6..000000000 --- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp-rb.yml.j2 +++ /dev/null @@ -1,13 +0,0 @@ -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: psp:local-volume-provisioner - namespace: {{ local_volume_provisioner_namespace }} -subjects: - - kind: ServiceAccount - name: local-volume-provisioner - namespace: {{ local_volume_provisioner_namespace }} -roleRef: - kind: ClusterRole - name: psp:local-volume-provisioner - apiGroup: rbac.authorization.k8s.io diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp-role.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp-role.yml.j2 deleted file mode 100644 index 7cdd5af51..000000000 --- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp-role.yml.j2 +++ /dev/null @@ -1,15 +0,0 @@ ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: psp:local-volume-provisioner - namespace: {{ local_volume_provisioner_namespace }} -rules: - - apiGroups: - - policy - resourceNames: - - local-volume-provisioner - resources: - - podsecuritypolicies - verbs: - - use diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp.yml.j2 b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp.yml.j2 deleted file mode 100644 index 10b4f6e15..000000000 --- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/templates/local-volume-provisioner-psp.yml.j2 +++ /dev/null @@ -1,45 +0,0 @@ ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: local-volume-provisioner - annotations: - seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' - seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default' -{% if apparmor_enabled %} - apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' -{% endif %} - labels: - addonmanager.kubernetes.io/mode: Reconcile -spec: - privileged: true - allowPrivilegeEscalation: true - requiredDropCapabilities: - - ALL - volumes: - - 'configMap' - - 'emptyDir' - - 'secret' - - 'downwardAPI' - - 'hostPath' - allowedHostPaths: -{% for class_name, class_config in local_volume_provisioner_storage_classes.items() %} - - pathPrefix: "{{ class_config.host_dir }}" - readOnly: false -{% endfor %} - hostNetwork: false - hostIPC: false - hostPID: false - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - fsGroup: - rule: 'RunAsAny' - readOnlyRootFilesystem: false From 5c25b579896be6d904f87c7aba8887f4f57e95fc Mon Sep 17 00:00:00 2001 From: emiran-orange <71817149+emiran-orange@users.noreply.github.com> Date: Tue, 8 Nov 2022 15:44:25 +0100 Subject: [PATCH 27/36] Ability to define options for DNS upstream servers (#9311) * Ability to define options for DNS upstream servers * Doc and sample inventory vars --- docs/dns-stack.md | 6 ++++++ docs/vars.md | 1 + .../sample/group_vars/k8s_cluster/k8s-cluster.yml | 3 +++ roles/kubernetes-apps/ansible/defaults/main.yml | 4 ++++ .../ansible/templates/coredns-config.yml.j2 | 14 ++++++-------- .../ansible/templates/nodelocaldns-config.yml.j2 | 14 ++++++++++++-- 6 files changed, 32 insertions(+), 10 deletions(-) diff --git a/docs/dns-stack.md b/docs/dns-stack.md index 9d172b832..50d9724cc 100644 --- a/docs/dns-stack.md +++ b/docs/dns-stack.md @@ -50,6 +50,12 @@ is not set, a default resolver is chosen (depending on cloud provider or 8.8.8.8 DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode`` modes. These serve as backup DNS servers in early cluster deployment when no cluster DNS is available yet. +### dns_upstream_forward_extra_opts + +Whether or not upstream DNS servers come from `upstream_dns_servers` variable or /etc/resolv.conf, related forward block in coredns (and nodelocaldns) configuration can take options (see for details). +These are configurable in inventory in as a dictionary in the `dns_upstream_forward_extra_opts` variable. +By default, no other option than the ones hardcoded (see `roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2` and `roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2`). + ### coredns_external_zones Array of optional external zones to coredns forward queries to. It's injected into diff --git a/docs/vars.md b/docs/vars.md index f75ff0069..b3f26945d 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -169,6 +169,7 @@ variables to match your requirements. * *searchdomains* - Array of up to 4 search domains * *remove_default_searchdomains* - Boolean. If enabled, `searchdomains` variable can hold 6 search domains. * *dns_etchosts* - Content of hosts file for coredns and nodelocaldns +* *dns_upstream_forward_extra_opts* - Options to add in the forward section of coredns/nodelocaldns related to upstream DNS servers For more information, see [DNS Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.md). diff --git a/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml b/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml index 016fe7811..8b8978163 100644 --- a/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml @@ -205,6 +205,9 @@ enable_coredns_k8s_external: false coredns_k8s_external_zone: k8s_external.local # Enable endpoint_pod_names option for kubernetes plugin enable_coredns_k8s_endpoint_pod_names: false +# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config +# dns_upstream_forward_extra_opts: +# policy: sequential # Can be docker_dns, host_resolvconf or none resolvconf_mode: host_resolvconf diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml index 83b07080f..66b767341 100644 --- a/roles/kubernetes-apps/ansible/defaults/main.yml +++ b/roles/kubernetes-apps/ansible/defaults/main.yml @@ -14,6 +14,10 @@ coredns_deployment_nodeselector: "kubernetes.io/os: linux" coredns_default_zone_cache_block: | cache 30 +# dns_upstream_forward_extra_opts apply to coredns forward section as well as nodelocaldns upstream target forward section +# dns_upstream_forward_extra_opts: +# policy: sequential + # nodelocaldns nodelocaldns_cpu_requests: 100m nodelocaldns_memory_limit: 200Mi diff --git a/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 b/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 index 1ee1601d4..44eea93bc 100644 --- a/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/coredns-config.yml.j2 @@ -46,17 +46,15 @@ data: {% endif %} } prometheus :9153 -{% if upstream_dns_servers is defined and upstream_dns_servers|length > 0 %} - forward . {{ upstream_dns_servers|join(' ') }} { + forward . {{ upstream_dns_servers|join(' ') if upstream_dns_servers is defined and upstream_dns_servers|length > 0 else '/etc/resolv.conf' }} { prefer_udp max_concurrent 1000 - } -{% else %} - forward . /etc/resolv.conf { - prefer_udp - max_concurrent 1000 - } +{% if dns_upstream_forward_extra_opts is defined %} +{% for optname, optvalue in dns_upstream_forward_extra_opts.items() %} + {{ optname }} {{ optvalue }} +{% endfor %} {% endif %} + } {% if enable_coredns_k8s_external %} k8s_external {{ coredns_k8s_external_zone }} {% endif %} diff --git a/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2 b/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2 index 9ea695c48..231c8bac1 100644 --- a/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/nodelocaldns-config.yml.j2 @@ -80,7 +80,12 @@ data: reload loop bind {{ nodelocaldns_ip }} - forward . {{ upstreamForwardTarget }} + forward . {{ upstreamForwardTarget }}{% if dns_upstream_forward_extra_opts is defined %} { +{% for optname, optvalue in dns_upstream_forward_extra_opts.items() %} + {{ optname }} {{ optvalue }} +{% endfor %} + }{% endif %} + prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_prometheus_port }} {% if dns_etchosts | default(None) %} hosts /etc/coredns/hosts { @@ -157,7 +162,12 @@ data: reload loop bind {{ nodelocaldns_ip }} - forward . {{ upstreamForwardTarget }} + forward . {{ upstreamForwardTarget }}{% if dns_upstream_forward_extra_opts is defined %} { +{% for optname, optvalue in dns_upstream_forward_extra_opts.items() %} + {{ optname }} {{ optvalue }} +{% endfor %} + }{% endif %} + prometheus {% if nodelocaldns_bind_metrics_host_ip %}{$MY_HOST_IP}{% endif %}:{{ nodelocaldns_secondary_prometheus_port }} {% if dns_etchosts | default(None) %} hosts /etc/coredns/hosts { From 5a8cf824f60c37dc79c3b1947bd727726424668c Mon Sep 17 00:00:00 2001 From: Ilya Margolin Date: Tue, 8 Nov 2022 15:44:32 +0100 Subject: [PATCH 28/36] [containerd] Simplify limiting number of open files per container (#9319) by setting a default runtime spec with a patch for RLIMIT_NOFILE. - Introduces containerd_base_runtime_spec_rlimit_nofile. - Generates base_runtime_spec on-the-fly, to use the containerd version of the node. --- docs/containerd.md | 17 +- .../containerd/defaults/main.yml | 13 +- .../containerd/files/cri-base.json | 214 ------------------ .../containerd/tasks/main.yml | 10 + 4 files changed, 31 insertions(+), 223 deletions(-) delete mode 100644 roles/container-engine/containerd/files/cri-base.json diff --git a/docs/containerd.md b/docs/containerd.md index 847f7c9ca..5b20e7f16 100644 --- a/docs/containerd.md +++ b/docs/containerd.md @@ -64,14 +64,17 @@ is a list of such dictionaries. Default runtime can be changed by setting `containerd_default_runtime`. -#### base_runtime_spec +#### Base runtime specs and limiting number of open files -`base_runtime_spec` key in a runtime dictionary can be used to explicitly -specify a runtime spec json file. We ship the default one which is generated -with `ctr oci spec > /etc/containerd/cri-base.json`. It will be used if you set -`base_runtime_spec: cri-base.json`. The main advantage of doing so is the presence of -`rlimits` section in this configuration, which will restrict the maximum number -of file descriptors(open files) per container to 1024. +`base_runtime_spec` key in a runtime dictionary is used to explicitly +specify a runtime spec json file. `runc` runtime has it set to `cri-base.json`, +which is generated with `ctr oci spec > /etc/containerd/cri-base.json` and +updated to include a custom setting for maximum number of file descriptors per +container. + +You can change maximum number of file descriptors per container for the default +`runc` runtime by setting the `containerd_base_runtime_spec_rlimit_nofile` +variable. You can tune many more [settings][runtime-spec] by supplying your own file name and content with `containerd_base_runtime_specs`: diff --git a/roles/container-engine/containerd/defaults/main.yml b/roles/container-engine/containerd/defaults/main.yml index 5f82fae59..cc630ff20 100644 --- a/roles/container-engine/containerd/defaults/main.yml +++ b/roles/container-engine/containerd/defaults/main.yml @@ -15,7 +15,7 @@ containerd_runc_runtime: type: "io.containerd.runc.v2" engine: "" root: "" - # base_runtime_spec: cri-base.json # use this to limit number of file descriptors per container + base_runtime_spec: cri-base.json options: systemdCgroup: "{{ containerd_use_systemd_cgroup | ternary('true', 'false') }}" @@ -26,8 +26,17 @@ containerd_additional_runtimes: [] # engine: "" # root: "" +containerd_base_runtime_spec_rlimit_nofile: 16384 + +containerd_default_base_runtime_spec_patch: + process: + rlimits: + - type: RLIMIT_NOFILE + hard: "{{ containerd_base_runtime_spec_rlimit_nofile }}" + soft: "{{ containerd_base_runtime_spec_rlimit_nofile }}" + containerd_base_runtime_specs: - cri-base.json: "{{ lookup('file', 'cri-base.json') }}" + cri-base.json: "{{ containerd_default_base_runtime_spec | combine(containerd_default_base_runtime_spec_patch,recursive=1) }}" containerd_grpc_max_recv_message_size: 16777216 containerd_grpc_max_send_message_size: 16777216 diff --git a/roles/container-engine/containerd/files/cri-base.json b/roles/container-engine/containerd/files/cri-base.json deleted file mode 100644 index f022438a4..000000000 --- a/roles/container-engine/containerd/files/cri-base.json +++ /dev/null @@ -1,214 +0,0 @@ -{ - "ociVersion": "1.0.2-dev", - "process": { - "user": { - "uid": 0, - "gid": 0 - }, - "cwd": "/", - "capabilities": { - "bounding": [ - "CAP_CHOWN", - "CAP_DAC_OVERRIDE", - "CAP_FSETID", - "CAP_FOWNER", - "CAP_MKNOD", - "CAP_NET_RAW", - "CAP_SETGID", - "CAP_SETUID", - "CAP_SETFCAP", - "CAP_SETPCAP", - "CAP_NET_BIND_SERVICE", - "CAP_SYS_CHROOT", - "CAP_KILL", - "CAP_AUDIT_WRITE" - ], - "effective": [ - "CAP_CHOWN", - "CAP_DAC_OVERRIDE", - "CAP_FSETID", - "CAP_FOWNER", - "CAP_MKNOD", - "CAP_NET_RAW", - "CAP_SETGID", - "CAP_SETUID", - "CAP_SETFCAP", - "CAP_SETPCAP", - "CAP_NET_BIND_SERVICE", - "CAP_SYS_CHROOT", - "CAP_KILL", - "CAP_AUDIT_WRITE" - ], - "inheritable": [ - "CAP_CHOWN", - "CAP_DAC_OVERRIDE", - "CAP_FSETID", - "CAP_FOWNER", - "CAP_MKNOD", - "CAP_NET_RAW", - "CAP_SETGID", - "CAP_SETUID", - "CAP_SETFCAP", - "CAP_SETPCAP", - "CAP_NET_BIND_SERVICE", - "CAP_SYS_CHROOT", - "CAP_KILL", - "CAP_AUDIT_WRITE" - ], - "permitted": [ - "CAP_CHOWN", - "CAP_DAC_OVERRIDE", - "CAP_FSETID", - "CAP_FOWNER", - "CAP_MKNOD", - "CAP_NET_RAW", - "CAP_SETGID", - "CAP_SETUID", - "CAP_SETFCAP", - "CAP_SETPCAP", - "CAP_NET_BIND_SERVICE", - "CAP_SYS_CHROOT", - "CAP_KILL", - "CAP_AUDIT_WRITE" - ] - }, - "rlimits": [ - { - "type": "RLIMIT_NOFILE", - "hard": 1024, - "soft": 1024 - } - ], - "noNewPrivileges": true - }, - "root": { - "path": "rootfs" - }, - "mounts": [ - { - "destination": "/proc", - "type": "proc", - "source": "proc", - "options": [ - "nosuid", - "noexec", - "nodev" - ] - }, - { - "destination": "/dev", - "type": "tmpfs", - "source": "tmpfs", - "options": [ - "nosuid", - "strictatime", - "mode=755", - "size=65536k" - ] - }, - { - "destination": "/dev/pts", - "type": "devpts", - "source": "devpts", - "options": [ - "nosuid", - "noexec", - "newinstance", - "ptmxmode=0666", - "mode=0620", - "gid=5" - ] - }, - { - "destination": "/dev/shm", - "type": "tmpfs", - "source": "shm", - "options": [ - "nosuid", - "noexec", - "nodev", - "mode=1777", - "size=65536k" - ] - }, - { - "destination": "/dev/mqueue", - "type": "mqueue", - "source": "mqueue", - "options": [ - "nosuid", - "noexec", - "nodev" - ] - }, - { - "destination": "/sys", - "type": "sysfs", - "source": "sysfs", - "options": [ - "nosuid", - "noexec", - "nodev", - "ro" - ] - }, - { - "destination": "/run", - "type": "tmpfs", - "source": "tmpfs", - "options": [ - "nosuid", - "strictatime", - "mode=755", - "size=65536k" - ] - } - ], - "linux": { - "resources": { - "devices": [ - { - "allow": false, - "access": "rwm" - } - ] - }, - "cgroupsPath": "/default", - "namespaces": [ - { - "type": "pid" - }, - { - "type": "ipc" - }, - { - "type": "uts" - }, - { - "type": "mount" - }, - { - "type": "network" - } - ], - "maskedPaths": [ - "/proc/acpi", - "/proc/asound", - "/proc/kcore", - "/proc/keys", - "/proc/latency_stats", - "/proc/timer_list", - "/proc/timer_stats", - "/proc/sched_debug", - "/sys/firmware", - "/proc/scsi" - ], - "readonlyPaths": [ - "/proc/bus", - "/proc/fs", - "/proc/irq", - "/proc/sys", - "/proc/sysrq-trigger" - ] - } -} diff --git a/roles/container-engine/containerd/tasks/main.yml b/roles/container-engine/containerd/tasks/main.yml index 6bb536413..50efd4add 100644 --- a/roles/container-engine/containerd/tasks/main.yml +++ b/roles/container-engine/containerd/tasks/main.yml @@ -84,6 +84,16 @@ notify: restart containerd when: http_proxy is defined or https_proxy is defined +- name: containerd | Generate default base_runtime_spec + register: ctr_oci_spec + command: "{{ containerd_bin_dir }}/ctr oci spec" + check_mode: false + changed_when: false + +- name: containerd | Store generated default base_runtime_spec + set_fact: + containerd_default_base_runtime_spec: "{{ ctr_oci_spec.stdout | from_json }}" + - name: containerd | Write base_runtime_specs copy: content: "{{ item.value }}" From 8a654b6955714e67e53e703758c8738605884ffd Mon Sep 17 00:00:00 2001 From: ERIK Date: Wed, 9 Nov 2022 09:46:13 +0800 Subject: [PATCH 29/36] Add cni bin when installing calico (#9367) Signed-off-by: bo.jiang Signed-off-by: bo.jiang --- roles/network_plugin/meta/main.yml | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/roles/network_plugin/meta/main.yml b/roles/network_plugin/meta/main.yml index 49143874d..cb013fcca 100644 --- a/roles/network_plugin/meta/main.yml +++ b/roles/network_plugin/meta/main.yml @@ -1,5 +1,7 @@ --- dependencies: + - role: network_plugin/cni + - role: network_plugin/cilium when: kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool tags: @@ -25,11 +27,6 @@ dependencies: tags: - canal - - role: network_plugin/cni - when: kube_network_plugin in ['cni', 'cloud'] - tags: - - cni - - role: network_plugin/macvlan when: kube_network_plugin == 'macvlan' tags: From 7fe0b87d838bc3c1d7d298ad41ddd692d9674ddb Mon Sep 17 00:00:00 2001 From: Ilya Margolin Date: Wed, 9 Nov 2022 13:46:12 +0100 Subject: [PATCH 30/36] Fix docs for node_labels (#9471) --- docs/vars.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/vars.md b/docs/vars.md index b3f26945d..7680ab2b5 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -218,7 +218,7 @@ Stack](https://github.com/kubernetes-sigs/kubespray/blob/master/docs/dns-stack.m By default the `kubelet_secure_addresses` is set with the `10.0.0.110` the ansible control host uses `eth0` to connect to the machine. In case you want to use `eth1` as the outgoing interface on which `kube-apiserver` connects to the `kubelet`s, you should override the variable in this way: `kubelet_secure_addresses: "192.168.1.110"`. -* *node_labels* - Labels applied to nodes via kubelet --node-labels parameter. +* *node_labels* - Labels applied to nodes via `kubectl label node`. For example, labels can be set in the inventory as variables or more widely in group_vars. *node_labels* can only be defined as a dict: From 6ca89c80af8be78bb8db7f6c89f47fbdf269e484 Mon Sep 17 00:00:00 2001 From: cleverhu Date: Thu, 10 Nov 2022 21:42:55 +0800 Subject: [PATCH 31/36] fix error link kubernetes url (#9475) Signed-off-by: cleverhu Signed-off-by: cleverhu --- docs/setting-up-your-first-cluster.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/setting-up-your-first-cluster.md b/docs/setting-up-your-first-cluster.md index fdd0f58c4..03622da6c 100644 --- a/docs/setting-up-your-first-cluster.md +++ b/docs/setting-up-your-first-cluster.md @@ -466,7 +466,7 @@ kubectl logs $POD_NAME #### Exec -In this section you will verify the ability to [execute commands in a container](https://kubernetes.io/docs/tasks/debug-application-cluster/get-shell-running-container/#running-individual-commands-in-a-container). +In this section you will verify the ability to [execute commands in a container](https://kubernetes.io/docs/tasks/debug/debug-application/get-shell-running-container/#running-individual-commands-in-a-container). Print the nginx version by executing the `nginx -v` command in the `nginx` container: From df6da5219503ca9dbb7d3b67c55502df0c1e940a Mon Sep 17 00:00:00 2001 From: emiran-orange <71817149+emiran-orange@users.noreply.github.com> Date: Fri, 11 Nov 2022 04:58:09 +0100 Subject: [PATCH 32/36] Enable check mode in DNS Cleanup tasks (#9472) --- roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml index d4530c1b0..fef52461c 100644 --- a/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml +++ b/roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml @@ -3,6 +3,7 @@ command: "{{ kubectl }} get deploy -n kube-system coredns -o jsonpath='{ .spec.template.metadata.annotations.createdby }'" register: createdby_annotation_deploy changed_when: false + check_mode: false ignore_errors: true # noqa ignore-errors when: - dns_mode in ['coredns', 'coredns_dual'] @@ -12,6 +13,7 @@ command: "{{ kubectl }} get svc -n kube-system coredns -o jsonpath='{ .metadata.annotations.createdby }'" register: createdby_annotation_svc changed_when: false + check_mode: false ignore_errors: true # noqa ignore-errors when: - dns_mode in ['coredns', 'coredns_dual'] From 9439487219d1ae806466e11c36aa9255265c9e41 Mon Sep 17 00:00:00 2001 From: yanggang Date: Fri, 11 Nov 2022 12:00:09 +0800 Subject: [PATCH 33/36] Add hashes for 1.25.4, 1.24.8, 1.23.14 and make v1.25.4 default (#9479) Signed-off-by: yanggang Signed-off-by: yanggang --- README.md | 2 +- .../group_vars/k8s_cluster/k8s-cluster.yml | 2 +- roles/download/defaults/main.yml | 36 +++++++++++++++++++ roles/kubespray-defaults/defaults/main.yaml | 2 +- 4 files changed, 39 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index a455bd3e9..31095f15f 100644 --- a/README.md +++ b/README.md @@ -137,7 +137,7 @@ Note: Upstart/SysV init based OS types are not supported. ## Supported Components - Core - - [kubernetes](https://github.com/kubernetes/kubernetes) v1.25.3 + - [kubernetes](https://github.com/kubernetes/kubernetes) v1.25.4 - [etcd](https://github.com/etcd-io/etcd) v3.5.5 - [docker](https://www.docker.com/) v20.10 (see note) - [containerd](https://containerd.io/) v1.6.9 diff --git a/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml b/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml index 8b8978163..26c94bd8e 100644 --- a/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml +++ b/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml @@ -17,7 +17,7 @@ kube_token_dir: "{{ kube_config_dir }}/tokens" kube_api_anonymous_auth: true ## Change this to use another Kubernetes version, e.g. a current beta release -kube_version: v1.25.3 +kube_version: v1.25.4 # Where the binaries will be downloaded. # Note: ensure that you've enough disk space (about 1G) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index ce4198cda..62ee8f2d4 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -216,10 +216,12 @@ crio_archive_checksums: # Kubernetes versions above Kubespray's current target version are untested and should be used with caution. kubelet_checksums: arm: + v1.25.4: 1af9c17daa07c215a8ce40f7e65896279276e11b6f7a7d9ae850a0561e149ad8 v1.25.3: 9745a48340ca61b00f0094e4b8ff210839edcf05420f0d57b3cb1748cb887060 v1.25.2: 995f885543fa61a08bd4f1008ba6d7417a1c45bd2a8e0f70c67a83e53b46eea5 v1.25.1: 6fe430ad91e1ed50cf5cc396aa204fda0889c36b8a3b84619d633cd9a6a146e2 v1.25.0: ad45ac3216aa186648fd034dec30a00c1a2d2d1187cab8aae21aa441a13b4faa + v1.24.8: 0756748c89293e2c502ffcf7a275c3bb98a7b919d59130e5e0376c8afb327fe2 v1.24.7: 3841e80f54ee5576928e799e4962231261bcdafe94868a310a8782da9a321da5 v1.24.6: 084e469d1d3b60363e5e20812ee0d909daa5496f3e6ebd305d1f23d1fe0709d4 v1.24.5: ce55155d1aff0c72effee19c6bef534c2b7d1b23ec701d70335d181bd2d12a87 @@ -228,6 +230,7 @@ kubelet_checksums: v1.24.2: e484fb000dcfdcf7baca79451745e29764747a27d36f3fc1dda5815b9cbc9b22 v1.24.1: 393d130a1715205a253b2f70dbd1f00d1a52ab89b4f3684ed116a937e68116ec v1.24.0: fd19ff957c73e5397f9af931c82bdb95791e47dc7d3135d38720ecda211758a3 + v1.23.14: ddbb9930e232b51b2f3bbe6f944b96642cfb120f4fdd1820128fb842a454a947 v1.23.13: 58f744247dbc8bca50b01ec1c25b0b5868736319f9cc8bf964fc2c1dd9eef0f9 v1.23.12: 5b7c38206ba3c04cd756062b74093548ac6309dc086c2893351b1c479f5415a3 v1.23.11: 93bbe3a130dcd7d5732e8b949f13ba8728bb37d3d4bd58408f99352cf484f9d0 @@ -243,10 +246,12 @@ kubelet_checksums: v1.23.1: 29868f172ef171ae990deafcdc13af7fe5b00f0a546ae81c267c4ad01231c3ce v1.23.0: 7417fc7cd624a85887f0a28054f58f7534143579fe85285d0b68c8984c95f2ba arm64: + v1.25.4: 8ff80a12381fad2e96c9cec6712591018c830cdd327fc7bd825237aa51a6ada3 v1.25.3: 929d25fc3f901749b058141a9c624ff379759869e09df49b75657c0be3141091 v1.25.2: c9348c0bae1d723a39235fc041053d9453be6b517082f066b3a089c3edbdd2ae v1.25.1: b6baa99b99ecc1f358660208a9a27b64c65f3314ff95a84c73091b51ac98484b v1.25.0: 69572a7b3d179d4a479aa2e0f90e2f091d8d84ef33a35422fc89975dc137a590 + v1.24.8: 4e1427651e4ff3927f96ce4b93c471ccc76c683fc1619ee0d677d77345b54edb v1.24.7: d8bd38e595ca061c53d3b7d1daebe5b3cc1ad44c731666bd5e842d336077db4b v1.24.6: 2a7b8e131d6823462e38bc1514b5dea5dca86254b3a12ed4a0fa653c2e06dd0e v1.24.5: dd5dcea80828979981654ec0732b197be252a3259a527cbc299d9575bc2de3e8 @@ -255,6 +260,7 @@ kubelet_checksums: v1.24.2: 40a8460e104fbf97abee9763f6e1f2143debc46cc6c9a1a18e21c1ff9960d8c0 v1.24.1: c2189c6956afda0f6002839f9f14a9b48c89dcc0228701e84856be36a3aac6bf v1.24.0: 8f066c9a048dd1704bf22ccf6e994e2fa2ea1175c9768a786f6cb6608765025e + v1.23.14: 80cdff15398c8215bb7337efdee25b40c862befbdf7925f6a8aca71bc9a79eae v1.23.13: 4e2297c9893d425bfcd80741b95fb1a5b59b4fd4f4bcf782ccab94760e653cdf v1.23.12: b802f12c79a9797f83a366c617144d019d2994fc724c75f642a9d031ce6a3488 v1.23.11: ce4f568c3193e8e0895062f783980da89adb6b54a399c797656a3ce172ddb2fc @@ -270,10 +276,12 @@ kubelet_checksums: v1.23.1: c24e4ab211507a39141d227595610383f7c5686cae3795b7d75eebbce8606f3d v1.23.0: a546fb7ccce69c4163e4a0b19a31f30ea039b4e4560c23fd6e3016e2b2dfd0d9 amd64: + v1.25.4: 7f7437e361f829967ee02e30026d7e85219693432ac5e930cc98dd9c7ddb2fac v1.25.3: d5c89c5e5dae6afa5f06a3e0e653ac3b93fa9a93c775a715531269ec91a54abe v1.25.2: 631e31b3ec648f920292fdc1bde46053cca5d5c71d622678d86907d556efaea3 v1.25.1: 63e38bcbc4437ce10227695f8722371ec0d178067f1031d09fe1f59b6fcf214a v1.25.0: 7f9183fce12606818612ce80b6c09757452c4fb50aefea5fc5843951c5020e24 + v1.24.8: 2da0b93857cf352bff5d1eb42e34d398a5971b63a53d8687b45179a78540d6d6 v1.24.7: 4d24c97c924c40971412cc497145ad823e4b7b87ccda97ebced375f7e886e9e2 v1.24.6: f8b606f542327128e404d2e66a72a40dc2ddb4175fb8e93c55effeacea60921b v1.24.5: 2448debe26e90341b038d7ccfcd55942c76ef3d9db48e42ceae5e8de3fbad631 @@ -282,6 +290,7 @@ kubelet_checksums: v1.24.2: 13da57d32be1debad3d8923e481f30aaa46bca7030b7e748b099d403b30e5343 v1.24.1: fc352d5c983b0ccf47acd8816eb826d781f408d27263dd8f761dfb63e69abfde v1.24.0: 3d98ac8b4fb8dc99f9952226f2565951cc366c442656a889facc5b1b2ec2ba52 + v1.23.14: f2bef00508790f632d035a6cfdd31539115611bfc93c5a3266ceb95bb2f27b76 v1.23.13: 4d8f796b82dbe2b89b6d587bfeedf66724526b211c75a53456d4ac4014e3dcca v1.23.12: 98ffa8a736d3e43debb1aa61ae71dea3671989cde5e9e44c6ee51a3d47c63614 v1.23.11: b0e6d413f9b4cf1007fcb9f0ea6460ed5273a50c945ae475c224036b0ab817f7 @@ -297,10 +306,12 @@ kubelet_checksums: v1.23.1: 7ff47abf62096a41005d18c6d482cf73f26b613854173327fa9f2b98720804d4 v1.23.0: 4756ff345dd80704b749d87efb8eb294a143a1f4a251ec586197d26ad20ea518 ppc64le: + v1.25.4: 3d4806fae6f39f091ea3d9fb195aa6d3e1ef779f56e485b6afbb328c25e15bdc v1.25.3: 447a8b34646936bede22c93ca85f0a98210c9f61d6963a7d71f7f6a5152af1d1 v1.25.2: a45dc00ac3a8074c3e9ec6a45b63c0a654529a657d929f28bd79c550a0d213d7 v1.25.1: c1e3373ac088e934635fb13004a21ada39350033bfa0e4b258c114cb86b69138 v1.25.0: 8015f88d1364cf77436c157de8a5d3ab87f1cb2dfaa9289b097c92a808845491 + v1.24.8: 58ee62ed2fd4858d308ba672183ea0704555d977892510042fc2108da54cb93c v1.24.7: 621ce04d0cb1c66065303d062bf9ac248225b8428b1adbca3f6fa6dd2eda13cc v1.24.6: ea9068c28a0107f5e1317ef8ba3a23965d95ee57db6fa71ee27433cdaa0fe33c v1.24.5: 56844b2594212e81d7cd4470f81da5d0f79876f044ee6d1707166fe76fdcb03a @@ -309,6 +320,7 @@ kubelet_checksums: v1.24.2: 43e9354dfc46b6d3579a6c9a3e49a2f079fec8e63c3ed998143ab2f05790d132 v1.24.1: c59319571efe34ad9bcc4edfe89f5e324d9026d1c3182d86cadc00cfc77f7a06 v1.24.0: d41d62f6aeff9f8f9b1a1390ed2b17994952966741d6675af8410799bca38931 + v1.23.14: 2d71172abd71f3b1b3a8361c5cc55ec89b031052b2f91d64133b278e2b894a91 v1.23.13: 444c646dc94dd7f7541a91ddc16a0da7259e345e1f84ec648077f447626844a2 v1.23.12: e14a9dd3e3615e781d1de9000b250267eddfbab5ba46432ad2aa9108a5992e6a v1.23.11: 64b02bc0f17b9df2b7ca8006d6cb6c1345f32fe6e748fcb6cbe9c4b406b116f6 @@ -325,10 +337,12 @@ kubelet_checksums: v1.23.0: 25c841e08ab2655486813287aa97cadf7524277040599e95c32ed9f206308753 kubectl_checksums: arm: + v1.25.4: 49ab7f05bb27a710575c2d77982cbfb4a09247ec94a8e21af28a6e300b698a44 v1.25.3: 59e1dba0951f19d4d18eb04db50fcd437c1d57460f2008bc03e668f71b8ea685 v1.25.2: d6b581a41b010ef86a9364102f8612d2ee7fbc7dd2036e40ab7c85adb52331cb v1.25.1: e8c6bfd8797e42501d14c7d75201324630f15436f712c4f7e46ce8c8067d9adc v1.25.0: 0b907cfdcabafae7d2d4ac7de55e3ef814df999acdf6b1bd0ecf6abbef7c7131 + v1.24.8: b74c8ac75804fd35a14fab7f637acaf5c0cf94dfd0f5ce8d755104b1a1b2e43b v1.24.7: 1829c5bb2ef30df6e46f99aa5c87a0f510a809f9169c725b3da08455bcf7f258 v1.24.6: 7ca8fd7f5d6262668c20e3e639759e1976590ed4bd4fece62861dd376c2168de v1.24.5: 3ca0fcb90b715f0c13eafe15c9100495a8648d459f1281f3340875d1b0b7e78f @@ -337,6 +351,7 @@ kubectl_checksums: v1.24.2: c342216e1d32c28953e13f28ced387feda675b969a196ed69eaeda137fa7486a v1.24.1: 42e880ff20a55e8ec49187d54e2c1367226d220a0a6a1797e7fbf97426762f4f v1.24.0: 410fc0b3c718f8f431fe4f7d5820bf8133b16ffb76187a53fa90929a77a38cbc + v1.23.14: 071f390f560320c4caff188d8f6f21c1b3258dfed600184f39d054d1d0673f69 v1.23.13: c32baf45ad141f967b4877c7151aeee1ae296eebdbcb7a5200d418bd77c284b2 v1.23.12: 94e946dcd1c2f7c8c9e3e022202762a36dab604b861b50bdcbdfb2c719731bd9 v1.23.11: 6eaffb8f64929e888137366cf2aa7fd1df2cf851de4f96f62fe70ed4d79f0ef7 @@ -352,10 +367,12 @@ kubectl_checksums: v1.23.1: 52001ed48e9e1c8b8623f3e6b0242111227721e5ddd08fa18046c65c406e35a5 v1.23.0: 6152216d88fa4d32da58c67f78b63b3b99bf4d4d726ffb9fb74ea698dccc8644 arm64: + v1.25.4: a8e9cd3c6ca80b67091fc41bc7fe8e9f246835925c835823a08a20ed9bcea1ba v1.25.3: cfd5092ce347a69fe49c93681a164d9a8376d69eef587da894207c62ec7d6a5d v1.25.2: b26aa656194545699471278ad899a90b1ea9408d35f6c65e3a46831b9c063fd5 v1.25.1: 73602eabf20b877f88642fafcbe1eda439162c2c1dbcc9ed09fdd4d7ac9919ea v1.25.0: 24db547bbae294c5c44f2b4a777e45f0e2f3d6295eace0d0c4be2b2dfa45330d + v1.24.8: b8ac2abfcb1fa04695d18098558ff483ec2c2488877b5abc4035a543544cdcb1 v1.24.7: 4b138a11b13210ce1731e06918f8fff6709c004c6fb6bec28544713854de9fe8 v1.24.6: 2f62e55960b02bb63cbc9154141520ac7cf0c2d55b45dd4a72867971e24a7219 v1.24.5: a5e348758c0f2b22adeb1b663b4b66781bded895d8ea2a714eb1de81fb00907a @@ -364,6 +381,7 @@ kubectl_checksums: v1.24.2: 5a4c3652f08b4d095b686e1323ac246edbd8b6e5edd5a2626fb71afbcd89bc79 v1.24.1: b817b54183e089494f8b925096e9b65af3a356d87f94b73929bf5a6028a06271 v1.24.0: 449278789de283648e4076ade46816da249714f96e71567e035e9d17e1fff06d + v1.23.14: 857716aa5cd24500349e5de8238060845af34b91ac4683bd279988ad3e1d3efa v1.23.13: 950626ae35fca6c26096f97cac839d76e2f29616048ad30cec68f1ff003840f2 v1.23.12: 88ebbc41252b39d49ce574a5a2bb25943bb82e55a252c27fe4fc096ce2dbb437 v1.23.11: 9416cc7abaf03eb83f854a45a41986bf4e1232d129d7caafc3101a01ca11b0e3 @@ -379,10 +397,12 @@ kubectl_checksums: v1.23.1: c0c24c7f6a974390e15148a575c84878e925f32328ff96ae173ec762678e4524 v1.23.0: 1d77d6027fc8dfed772609ad9bd68f611b7e4ce73afa949f27084ad3a92b15fe amd64: + v1.25.4: e4e569249798a09f37e31b8b33571970fcfbdecdd99b1b81108adc93ca74b522 v1.25.3: f57e568495c377407485d3eadc27cda25310694ef4ffc480eeea81dea2b60624 v1.25.2: 8639f2b9c33d38910d706171ce3d25be9b19fc139d0e3d4627f38ce84f9040eb v1.25.1: 9cc2d6ce59740b6acf6d5d4a04d4a7d839b0a81373248ef0ce6c8d707143435b v1.25.0: e23cc7092218c95c22d8ee36fb9499194a36ac5b5349ca476886b7edc0203885 + v1.24.8: f93c18751ec715b4d4437e7ece18fe91948c71be1f24ab02a2dde150f5449855 v1.24.7: 2d88e56d668b1d7575b4783f22d512e94da432f42467c3aeac8a300b6345f12d v1.24.6: 3ba7e61aecb19eadfa5de1c648af1bc66f5980526645d9dfe682d77fc313b74c v1.24.5: 3037f2ec62956e7146fc86defb052d8d3b28e2daa199d7e3ff06d1e06a6286ed @@ -391,6 +411,7 @@ kubectl_checksums: v1.24.2: f15fb430afd79f79ef7cf94a4e402cd212f02d8ec5a5e6a7ba9c3d5a2f954542 v1.24.1: 0ec3c2dbafc6dd27fc8ad25fa27fc527b5d7356d1830c0efbb8adcf975d9e84a v1.24.0: 94d686bb6772f6fb59e3a32beff908ab406b79acdfb2427abdc4ac3ce1bb98d7 + v1.23.14: 13ce4b18ba6e15d5d259249c530637dd7fb9722d121df022099f3ed5f2bd74cd v1.23.13: fae6957e6a7047ad49cdd20976cd2ce9188b502c831fbf61f36618ea1188ba38 v1.23.12: b150c7c4830cc3be4bedd8998bf36a92975c95cd1967b4ef2d1edda080ffe5d9 v1.23.11: cf04ad2fa1cf118a951d690af0afbbe8f5fc4f02c721c848080d466e6159111e @@ -406,10 +427,12 @@ kubectl_checksums: v1.23.1: 156fd5e7ebbedf3c482fd274089ad75a448b04cf42bc53f370e4e4ea628f705e v1.23.0: 2d0f5ba6faa787878b642c151ccb2c3390ce4c1e6c8e2b59568b3869ba407c4f ppc64le: + v1.25.4: 23f5cec67088fa0c3efc17110ede5f6120d3ad18ad6b996846642c2f46b43da0 v1.25.3: bd59ac682fffa37806f768328fee3cb791772c4a12bcb155cc64b5c81b6c47ce v1.25.2: 1e3665de15a591d52943e6417f3102b5d413bc1d86009801ad0def04e8c920c5 v1.25.1: 957170066abc4d4c178ac8d84263a191d351e98978b86b0916c1b8c061da8282 v1.25.0: dffe15c626d7921d77e85f390b15f13ebc3a9699785f6b210cd13fa6f4653513 + v1.24.8: 9ed85938808b6ae52a2d0b5523dc3122a7dcf8857d609b7d79a1733c72344dc1 v1.24.7: a68ec0c8ed579324037fc0a3bafa9d10184e6ff3ca34bfffdcb78f9f02bcb765 v1.24.6: 448009693a97428aec7e60cc117079724f890e3a46d0aa54accdb56f33ca0f3d v1.24.5: 0861df1c77336fbe569887a884d62a24fcb6486d43798a8767dba7e5865c3c98 @@ -418,6 +441,7 @@ kubectl_checksums: v1.24.2: cacf9b4a539853158b885c39fa714710767aa6c12804fccb7de6b037228b811f v1.24.1: 8812543e6c34101d37ad9d7a7edb91621db0fe992b16bd9beb8e5ddb4c7792c5 v1.24.0: 153a1ca1593ef4cb56b16922f8e229986a621d396112f0cfad6fa568ad00fa75 + v1.23.14: 291127abe519e4a1c0193960d361ba5a58c21cddb4cfff8ae4e67c001671849d v1.23.13: 785d620dc77d10ce49218894225e935e55d08bb3842ae75c11cb41a814aca9ea v1.23.12: f9a8efede8872c23c54c44f09657fa522e99786f3dc73ba7d6d928e9b3c7dc1a v1.23.11: 52556d4e8ba19e8b0a65e4ac70203922b42b054647ec59a0177a2c4f61b903e7 @@ -434,10 +458,12 @@ kubectl_checksums: v1.23.0: e96f2b16d8a10fe6531dfac9143efa4960432cf2ae8b26ffd174fa00eb28a851 kubeadm_checksums: arm: + v1.25.4: a20379513e5d91073a52a0a3e7a9201e2d7b23daa55d68456465d8c9ef69427c v1.25.3: 3f357e1e57936ec7812d35681be249b079bbdc1c7f13a75e6159379398e37d5e v1.25.2: 2f794569c3322bb66309c7f67126b7f88155dfb1f70eea789bec0edf4e10015e v1.25.1: ecb7a459ca23dfe527f4eedf33fdb0df3d55519481a8be3f04a5c3a4d41fa588 v1.25.0: 67b6b58cb6abd5a4c9024aeaca103f999077ce6ec8e2ca13ced737f5139ad2f0 + v1.24.8: 5117a0f3b652950bee328ee9583504fe50c012290436e56f6f4b9d7219ad2591 v1.24.7: c0a9e6c08cad0b727f06bb3b539d55c65ea977be68fe471f6a9f73af3fbcb275 v1.24.6: 760f0fc195f00ca3d1612e0974461ab937c25aa1e7a2f8d2357cd1336b2ecf3a v1.24.5: 973f1ad7da9216fe3e0319a0c4fcb519a21a773cd39a0a445e689bea3d4a27c7 @@ -446,6 +472,7 @@ kubeadm_checksums: v1.24.2: d4bead61c1ba03113281ab96b21530b32e96eea24220bd2aebe1abdec739c266 v1.24.1: 1c0b22c941badb40f4fb93e619b4a1c5e4bba7c1c7313f7c7e87d77150f35153 v1.24.0: c463bf24981dea705f4ee6e547abd5cc3b3e499843f836aae1a04f5b80abf4c2 + v1.23.14: de222c7f05e90ae263b988e191a1b907c593c4ddc363277dae24d91ba694c731 v1.23.13: 54d0f4d7a65abf610606b0538005ab5f177566587a81af6b0bc24ded2f8e305c v1.23.12: 6da38118a7a1570ad76389f0492c11f8ae8e2068395773b89a2b0442d02e604c v1.23.11: 4ea0f63d245d01eccc5c3f2c849e2c799392d5e37c9bc4c0ec7a06a5d3722622 @@ -461,10 +488,12 @@ kubeadm_checksums: v1.23.1: 77baac1659f7f474ba066ef8ca67a86accc4e40d117e73c6c76a2e62689d8369 v1.23.0: b59790cdce297ac0937cc9ce0599979c40bc03601642b467707014686998dbda arm64: + v1.25.4: 3f5b273e8852d13fa39892a30cf64928465c32d0eb741118ba89714b51f03cd5 v1.25.3: 61bb61eceff78b44be62a12bce7c62fb232ce1338928e4207deeb144f82f1d06 v1.25.2: 437dc97b0ca25b3fa8d74b39e4059a77397b55c1a6d16bddfd5a889d91490ce0 v1.25.1: f4d57d89c53b7fb3fe347c9272ed40ec55eab120f4f09cd6b684e97cb9cbf1f0 v1.25.0: 07d9c6ffd3676502acd323c0ca92f44328a1f0e89a7d42a664099fd3016cf16b + v1.24.8: 6f35562001e859f2a76a89c0da61f09433cc6628ccbc3992e82a977e0e348870 v1.24.7: ee946d82173b63f69be9075e218250d4ab1deec39d17d600b16b6743e5dca289 v1.24.6: 211b8d1881468bb673b26036dbcfa4b12877587b0a6260ffd55fd87c2aee6e41 v1.24.5: a68c6dd24ef47825bb34a2ad430d76e6b4d3cbe92187363676993d0538013ac2 @@ -473,6 +502,7 @@ kubeadm_checksums: v1.24.2: bd823b934d1445a020f8df5fe544722175024af62adbf6eb27dc7250d5db0548 v1.24.1: 04f18fe097351cd16dc91cd3bde979201916686c6f4e1b87bae69ab4479fda04 v1.24.0: 3e0fa21b8ebce04ca919fdfea7cc756e5f645166b95d6e4b5d9912d7721f9004 + v1.23.14: 7c21c1fa6a852b10ddea7bd1797ce8b4498d6898014d17d20748307e510a0826 v1.23.13: 462971d5822c91598754dfaa9c4c8d46a8c74aefef0f4dbbc8be31c4f0d18855 v1.23.12: d05f6765a65f7541d07aad989ee80cd730c395f042afbe0526f667ea1a0b2947 v1.23.11: 329d9aa9461baf4a7b7225e664ec1ecd61512b937e1f160f9a303bc0f0d44bbb @@ -488,10 +518,12 @@ kubeadm_checksums: v1.23.1: eb865da197f4595dec21e6fb1fa1751ef25ac66b64fa77fd4411bbee33352a40 v1.23.0: 989d117128dcaa923b2c7a917a03f4836c1b023fe1ee723541e0e39b068b93a6 amd64: + v1.25.4: b8a6119d2a3a7c6add43dcf8f920436bf7fe71a77a086e96e40aa9d6f70be826 v1.25.3: 01b59ce429263c62b85d2db18f0ccdef076b866962ed63971ff2bd2864deea7b v1.25.2: 63ee3de0c386c6f3c155874b46b07707cc72ce5b9e23f336befd0b829c1bd2ad v1.25.1: adaa1e65c1cf9267a01e889d4c13884f883cf27948f00abb823f10486f1a8420 v1.25.0: 10b30b87af2cdc865983d742891eba467d038f94f3926bf5d0174f1abf6628f8 + v1.24.8: 9fea42b4fb5eb2da638d20710ebb791dde221e6477793d3de70134ac058c4cc7 v1.24.7: 8b67319d28bf37e8e7c224954dc778cbe946f2bb0ed86975d8caa83d51c955ee v1.24.6: 7f4443fd42e0e03f6fd0c7218ca7e2634c9255d5f9d7c581fe362e19098aec4c v1.24.5: 3b9c1844ec0fc3c94015d63470b073a7b219082b6a6424c6b0da9cf97e234aeb @@ -500,6 +532,7 @@ kubeadm_checksums: v1.24.2: 028f73b8e7c2ae389817d34e0cb829a814ce2fac0a535a3aa0708f3133e3e712 v1.24.1: 15e3193eecbc69330ada3f340c5a47999959bc227c735fa95e4aa79470c085d0 v1.24.0: 5e58a29eaaf69ea80e90d9780d2a2d5f189fd74f94ec3bec9e3823d472277318 + v1.23.14: 46c847e2699839b9ccf6673f0b946c4778a3a2e8e463d15854ba30d3f0cbd87a v1.23.13: ff86af2b5fa979234dd3f9e7b04ec7d3017239a58417397153726d8077c4ac89 v1.23.12: bf45d00062688d21ff479bf126e1259d0ce3dee1c5c2fcd803f57497cd5e9e83 v1.23.11: 2f10bd298a694d3133ea19192b796a106c282441e4148c114c39376042097692 @@ -515,10 +548,12 @@ kubeadm_checksums: v1.23.1: 4d5766cb90050ee84e15df5e09148072da2829492fdb324521c4fa6d74d3aa34 v1.23.0: e21269a058d4ad421cf5818d4c7825991b8ba51cd06286932a33b21293b071b0 ppc64le: + v1.25.4: 9703e40cb0df48052c3cfb0afc85dc582e600558ab687d6409f40c382f147976 v1.25.3: 8fe9a69db91c779a8f29b216134508ba49f999fa1e36b295b99444f31266da17 v1.25.2: a53101ed297299bcf1c4f44ec67ff1cb489ab2d75526d8be10c3068f161601a7 v1.25.1: c7e2c8d2b852e1b30894b64875191ce388a3a416d41311b21f2d8594872fe944 v1.25.0: 31bc72e892f3a6eb5db78003d6b6200ba56da46a746455991cb422877afc153d + v1.24.8: eccd3fd892b253a8632f3c4a917c19fff4982dd436f8f7de94868a0062c0bf2b v1.24.7: 29a53be9a74dcb01ea68b0a385bdd9b510f9792955f9f7c93ed608c851b5dc32 v1.24.6: 9d73bfde24ee9781fcca712658f297a041408b534f875f5e093222ed64c91c15 v1.24.5: f416c45ca5826ea3ff13be393911424a0fba3aa30b5557d3d32541551566142a @@ -527,6 +562,7 @@ kubeadm_checksums: v1.24.2: 452922d2ec9bfa5e085a879174d1d99adb6212598f3c8ffe15b5e7c3a4e128bb v1.24.1: 74e84b4e6f2c328a169dab33956bc076a2c1670c638764b9163b1080dcb68137 v1.24.0: 286de74330365bf660d480297a7aba165a956f6fbb98acd11df2f672e21d7b5c + v1.23.14: 529811ef359095fe33a1d94d20fca312c25a1513baf799513c47711d34bd73ad v1.23.13: 3dbf72fdfc108bf41cab151ac340b336ba17b14fa008b15d84ce223b30391914 v1.23.12: ccae0a4c81a60e50219954393432c5f4d4692847c866ca497a48a1118f417d0d v1.23.11: 9930cfb4ae7663f145c1d08e06c49ab60e28a6613ac5c7b19d047f15c1e24c22 diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index 4cbe0b2c4..2e0fa3b96 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -15,7 +15,7 @@ is_fedora_coreos: false disable_swap: true ## Change this to use another Kubernetes version, e.g. a current beta release -kube_version: v1.25.3 +kube_version: v1.25.4 ## The minimum version working kube_version_min_required: v1.23.0 From f007c776410433f750bfd62d52e6d10ca5fbd1b8 Mon Sep 17 00:00:00 2001 From: Mohamed Zaian Date: Sat, 12 Nov 2022 12:39:56 +0100 Subject: [PATCH 34/36] [etcd] make etcd 3.5.5 default for k8s 1.23 , 1.24 (#9482) --- roles/download/defaults/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 62ee8f2d4..720227812 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -134,8 +134,8 @@ kube_major_version: "{{ kube_version | regex_replace('^v([0-9])+\\.([0-9]+)\\.[0 etcd_supported_versions: v1.25: "v3.5.5" - v1.24: "v3.5.4" - v1.23: "v3.5.3" + v1.24: "v3.5.5" + v1.23: "v3.5.5" etcd_version: "{{ etcd_supported_versions[kube_major_version] }}" crictl_supported_versions: From fdbcce3a5e56d931030f0348616424f9d440072e Mon Sep 17 00:00:00 2001 From: Kenichi Omichi Date: Mon, 14 Nov 2022 11:23:57 +0900 Subject: [PATCH 35/36] Update offline-environment.md (#9481) This makes it more readable by explaining clearly what files are necessary to be downloaded in advance from online environment. --- docs/offline-environment.md | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/docs/offline-environment.md b/docs/offline-environment.md index 063ed74a3..b76134cd6 100644 --- a/docs/offline-environment.md +++ b/docs/offline-environment.md @@ -1,12 +1,25 @@ # Offline environment -In case your servers don't have access to internet (for example when deploying on premises with security constraints), you need to setup: +In case your servers don't have access to the internet directly (for example +when deploying on premises with security constraints), you need to get the +following artifacts in advance from another environment where has access to the internet. + +* Some static files (zips and binaries) +* OS packages (rpm/deb files) +* Container images used by Kubespray. Exhaustive list depends on your setup +* [Optional] Python packages used by Kubespray (only required if your OS doesn't provide all python packages/versions listed in `requirements.txt`) +* [Optional] Helm chart files (only required if `helm_enabled=true`) + +Then you need to setup the following services on your offline environment: * a HTTP reverse proxy/cache/mirror to serve some static files (zips and binaries) * an internal Yum/Deb repository for OS packages -* an internal container image registry that need to be populated with all container images used by Kubespray. Exhaustive list depends on your setup -* [Optional] an internal PyPi server for kubespray python packages (only required if your OS doesn't provide all python packages/versions listed in `requirements.txt`) -* [Optional] an internal Helm registry (only required if `helm_enabled=true`) +* an internal container image registry that need to be populated with all container images used by Kubespray +* [Optional] an internal PyPi server for python packages used by Kubespray +* [Optional] an internal Helm registry for Helm chart files + +You can get artifact lists with [generate_list.sh](contrib/offline/generate_list.sh) script. +In addition, you can find some tools for offline deployment under [contrib/offline](contrib/offline/README.md). ## Configure Inventory From ddbe9956e41e3b7732c45cf1774ad5e008f1c0e2 Mon Sep 17 00:00:00 2001 From: Kenichi Omichi Date: Mon, 14 Nov 2022 18:27:57 +0900 Subject: [PATCH 36/36] Fix pathes of offline tool on the doc (#9486) If clicking the links, we faced NotFound page at the time. This fixes the issue by specifying full pathes instead. --- docs/offline-environment.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/offline-environment.md b/docs/offline-environment.md index b76134cd6..fd345f3a9 100644 --- a/docs/offline-environment.md +++ b/docs/offline-environment.md @@ -18,8 +18,8 @@ Then you need to setup the following services on your offline environment: * [Optional] an internal PyPi server for python packages used by Kubespray * [Optional] an internal Helm registry for Helm chart files -You can get artifact lists with [generate_list.sh](contrib/offline/generate_list.sh) script. -In addition, you can find some tools for offline deployment under [contrib/offline](contrib/offline/README.md). +You can get artifact lists with [generate_list.sh](/contrib/offline/generate_list.sh) script. +In addition, you can find some tools for offline deployment under [contrib/offline](/contrib/offline/README.md). ## Configure Inventory