c12s-kubespray/roles/kubernetes/node/tasks/main.yml
2019-04-28 23:00:20 -07:00

223 lines
5.2 KiB
YAML

---
- import_tasks: facts.yml
tags:
- facts
- import_tasks: pre_upgrade.yml
tags:
- kubelet
- name: Ensure /var/lib/cni exists
file:
path: /var/lib/cni
state: directory
mode: 0755
- import_tasks: install.yml
tags:
- kubelet
- import_tasks: nginx-proxy.yml
when:
- not is_kube_master
- loadbalancer_apiserver_localhost
- loadbalancer_apiserver_type == 'nginx'
tags:
- nginx
- import_tasks: haproxy.yml
when:
- not is_kube_master
- loadbalancer_apiserver_localhost
- loadbalancer_apiserver_type == 'haproxy'
tags:
- haproxy
- name: Make sure dynamic kubelet configuration directory is writeable
file:
path: "{{ dynamic_kubelet_configuration_dir }}"
mode: 0600
state: directory
when: dynamic_kubelet_configuration
- name: Write kubelet config file (kubeadm)
template:
src: kubelet.kubeadm.env.j2
dest: "{{ kube_config_dir }}/kubelet.env"
backup: yes
notify: restart kubelet
tags:
- kubelet
- kubeadm
- name: Ensure nodePort range is reserved
sysctl:
name: net.ipv4.ip_local_reserved_ports
value: "{{ kube_apiserver_node_port_range }}"
sysctl_set: yes
sysctl_file: "{{ sysctl_file_path }}"
state: present
reload: yes
when: kube_apiserver_node_port_range is defined
tags:
- kube-proxy
- name: Verify if br_netfilter module exists
shell: "modinfo br_netfilter"
environment:
PATH: "{{ ansible_env.PATH}}:/sbin" # Make sure we can workaround RH's conservative path management
register: modinfo_br_netfilter
failed_when: modinfo_br_netfilter.rc not in [0, 1]
changed_when: false
- name: Verify br_netfilter module path exists
file:
path: /etc/modules-load.d
state: directory
- name: Enable br_netfilter module
modprobe:
name: br_netfilter
state: present
when: modinfo_br_netfilter.rc == 0
- name: Persist br_netfilter module
copy:
dest: /etc/modules-load.d/kubespray-br_netfilter.conf
content: br_netfilter
when: modinfo_br_netfilter.rc == 0
# kube-proxy needs net.bridge.bridge-nf-call-iptables enabled when found if br_netfilter is not a module
- name: Check if bridge-nf-call-iptables key exists
command: "sysctl net.bridge.bridge-nf-call-iptables"
failed_when: false
changed_when: false
register: sysctl_bridge_nf_call_iptables
- name: Enable bridge-nf-call tables
sysctl:
name: "{{ item }}"
state: present
sysctl_file: "{{ sysctl_file_path }}"
value: 1
reload: yes
when: sysctl_bridge_nf_call_iptables.rc == 0
with_items:
- net.bridge.bridge-nf-call-iptables
- net.bridge.bridge-nf-call-arptables
- net.bridge.bridge-nf-call-ip6tables
- name: Modprode Kernel Module for IPVS
modprobe:
name: "{{ item }}"
state: present
with_items:
- ip_vs
- ip_vs_rr
- ip_vs_wrr
- ip_vs_sh
when: kube_proxy_mode == 'ipvs'
tags:
- kube-proxy
- name: Modprobe nf_conntrack_ipv4 for kernels < 4.19
modprobe:
name: nf_conntrack_ipv4
state: present
register: enable_nf_conntrack
ignore_errors: yes
when: kube_proxy_mode == 'ipvs'
tags:
- kube-proxy
- name: Modprobe nf_conntrack for kernels >= 4.19
modprobe:
name: nf_conntrack
state: present
when:
- enable_nf_conntrack is failed
- kube_proxy_mode == 'ipvs'
tags:
- kube-proxy
- name: Persist ip_vs modules
copy:
dest: /etc/modules-load.d/kube_proxy-ipvs.conf
content: |
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
{% if enable_nf_conntrack is failed -%}
nf_conntrack
{%- else -%}
nf_conntrack_ipv4
{%- endif -%}
when: kube_proxy_mode == 'ipvs'
tags:
- kube-proxy
- name: Purge proxy manifest for kubeadm or if proxy services being provided by other means, e.g. network_plugin
file:
path: "{{ kube_manifest_dir }}/kube-proxy.manifest"
state: absent
when:
- kube_proxy_remove
tags:
- kube-proxy
- name: Cleanup kube-proxy leftovers from node
command: "{{ bin_dir }}/hyperkube kube-proxy --cleanup"
when:
- kube_proxy_remove
# `kube-proxy --cleanup`, being Ok as per shown WARNING, still returns 255 from above run (?)
ignore_errors: true
tags:
- kube-proxy
- include_tasks: "{{ cloud_provider }}-credential-check.yml"
when:
- cloud_provider is defined
- cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
tags:
- cloud-provider
- facts
- name: Write cacert file
copy:
src: "{{ openstack_cacert }}"
dest: "{{ kube_config_dir }}/openstack-cacert.pem"
group: "{{ kube_cert_group }}"
mode: 0640
when:
- inventory_hostname in groups['k8s-cluster']
- cloud_provider is defined
- cloud_provider == 'openstack'
- openstack_cacert is defined
- openstack_cacert
tags:
- cloud-provider
- name: Write cloud-config
template:
src: "{{ cloud_provider }}-cloud-config.j2"
dest: "{{ kube_config_dir }}/cloud_config"
group: "{{ kube_cert_group }}"
mode: 0640
when:
- cloud_provider is defined
- cloud_provider in [ 'openstack', 'azure', 'vsphere', 'aws' ]
notify: restart kubelet
tags:
- cloud-provider
# reload-systemd
- meta: flush_handlers
- name: Enable kubelet
service:
name: kubelet
enabled: yes
state: started
tags:
- kubelet