c12s-kubespray/roles/kubernetes/node/tasks/main.yml

232 lines
6 KiB
YAML

---
- import_tasks: facts.yml
tags:
- facts
- import_tasks: pre_upgrade.yml
tags:
- kubelet
- name: Ensure /var/lib/cni exists
file:
path: /var/lib/cni
state: directory
mode: 0755
- import_tasks: install.yml
tags:
- kubelet
- import_tasks: loadbalancer/nginx-proxy.yml
when:
- not is_kube_master
- loadbalancer_apiserver_localhost
- loadbalancer_apiserver_type == 'nginx'
tags:
- nginx
- import_tasks: loadbalancer/haproxy.yml
when:
- not is_kube_master
- loadbalancer_apiserver_localhost
- loadbalancer_apiserver_type == 'haproxy'
tags:
- haproxy
- name: Ensure nodePort range is reserved
sysctl:
name: net.ipv4.ip_local_reserved_ports
value: "{{ kube_apiserver_node_port_range }}"
sysctl_set: yes
sysctl_file: "{{ sysctl_file_path }}"
state: present
reload: yes
when: kube_apiserver_node_port_range is defined
tags:
- kube-proxy
- name: Verify if br_netfilter module exists
shell: "modinfo br_netfilter"
environment:
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH's conservative path management
register: modinfo_br_netfilter
failed_when: modinfo_br_netfilter.rc not in [0, 1]
changed_when: false
- name: Verify br_netfilter module path exists
file:
path: /etc/modules-load.d
state: directory
- name: Enable br_netfilter module
modprobe:
name: br_netfilter
state: present
when: modinfo_br_netfilter.rc == 0
- name: Persist br_netfilter module
copy:
dest: /etc/modules-load.d/kubespray-br_netfilter.conf
content: br_netfilter
when: modinfo_br_netfilter.rc == 0
# kube-proxy needs net.bridge.bridge-nf-call-iptables enabled when found if br_netfilter is not a module
- name: Check if bridge-nf-call-iptables key exists
command: "sysctl net.bridge.bridge-nf-call-iptables"
failed_when: false
changed_when: false
register: sysctl_bridge_nf_call_iptables
- name: Enable bridge-nf-call tables
sysctl:
name: "{{ item }}"
state: present
sysctl_file: "{{ sysctl_file_path }}"
value: "1"
reload: yes
when: sysctl_bridge_nf_call_iptables.rc == 0
with_items:
- net.bridge.bridge-nf-call-iptables
- net.bridge.bridge-nf-call-arptables
- net.bridge.bridge-nf-call-ip6tables
- name: Modprode Kernel Module for IPVS
modprobe:
name: "{{ item }}"
state: present
with_items:
- ip_vs
- ip_vs_rr
- ip_vs_wrr
- ip_vs_sh
when: kube_proxy_mode == 'ipvs'
tags:
- kube-proxy
- name: Modprobe nf_conntrack_ipv4 for kernels < 4.19
modprobe:
name: nf_conntrack_ipv4
state: present
register: enable_nf_conntrack
ignore_errors: yes
when: kube_proxy_mode == 'ipvs'
tags:
- kube-proxy
- name: Modprobe nf_conntrack for kernels >= 4.19
modprobe:
name: nf_conntrack
state: present
when:
- enable_nf_conntrack is failed
- kube_proxy_mode == 'ipvs'
tags:
- kube-proxy
- name: Persist ip_vs modules
copy:
dest: /etc/modules-load.d/kube_proxy-ipvs.conf
content: |
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
{% if enable_nf_conntrack is failed -%}
nf_conntrack
{%- else -%}
nf_conntrack_ipv4
{%- endif -%}
when: kube_proxy_mode == 'ipvs'
tags:
- kube-proxy
- name: Purge proxy manifest for kubeadm or if proxy services being provided by other means, e.g. network_plugin
file:
path: "{{ kube_manifest_dir }}/kube-proxy.manifest"
state: absent
when:
- kube_proxy_remove
tags:
- kube-proxy
- name: Set command for kube-proxy cleanup
set_fact:
kube_proxy_cleanup_command: >-
{%- if container_manager in ['docker', 'crio'] %}
{{ docker_bin_dir }}/docker run --rm --privileged -v /lib/modules:{{ kube_proxy_image_repo }}:{{ kube_version }} --cleanup
{%- elif container_manager == "containerd" %}
ctr run --rm --mount type=bind,src=/lib/modules,dst=/lib/modules,options=rbind:rw {{ kube_proxy_image_repo }}:{{ kube_version }} kube-proxy --cleanup
{%- endif %}
when:
- kube_proxy_remove
tags:
- kube-proxy
- name: Ensure kube-proxy container is pulled for containerd
command: "{{ bin_dir }}/crictl pull {{ kube_proxy_image_repo }}:{{ kube_version }}"
when:
- kube_proxy_remove
- container_manager == "containerd"
tags:
- kube-proxy
- name: Cleanup kube-proxy leftovers from node
command: "{{ kube_proxy_cleanup_command }}"
# `kube-proxy --cleanup`, being Ok as per shown WARNING, still returns 255 from above run (?)
ignore_errors: true
when:
- kube_proxy_remove
tags:
- kube-proxy
- include_tasks: "cloud-credentials/{{ cloud_provider }}-credential-check.yml"
when:
- cloud_provider is defined
- cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
tags:
- cloud-provider
- facts
- name: Test if openstack_cacert is a base64 string
set_fact:
openstack_cacert_is_base64: "{% if openstack_cacert | search ('^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)?$') %}true{% else %}false{% endif %}"
when:
- cloud_provider is defined
- cloud_provider == 'openstack'
- openstack_cacert is defined
- openstack_cacert | length > 0
- name: Write cacert file
copy:
src: "{{ openstack_cacert if not openstack_cacert_is_base64 else omit }}"
content: "{{ openstack_cacert | b64decode if openstack_cacert_is_base64 else omit }}"
dest: "{{ kube_config_dir }}/openstack-cacert.pem"
group: "{{ kube_cert_group }}"
mode: 0640
when:
- cloud_provider is defined
- cloud_provider == 'openstack'
- openstack_cacert is defined
- openstack_cacert | length > 0
tags:
- cloud-provider
- name: Write cloud-config
template:
src: "cloud-configs/{{ cloud_provider }}-cloud-config.j2"
dest: "{{ kube_config_dir }}/cloud_config"
group: "{{ kube_cert_group }}"
mode: 0640
when:
- cloud_provider is defined
- cloud_provider in [ 'openstack', 'azure', 'vsphere', 'aws' ]
notify: restart kubelet
tags:
- cloud-provider
- import_tasks: kubelet.yml
tags:
- kubelet
- kubeadm