ddffdb63bf
* Remove non-kubeadm deployment * More cleanup * More cleanup * More cleanup * More cleanup * Fix gitlab * Try stop gce first before absent to make the delete process work * More cleanup * Fix bug with checking if kubeadm has already run * Fix bug with checking if kubeadm has already run * More fixes * Fix test * fix * Fix gitlab checkout untill kubespray 2.8 is on quay * Fixed * Add upgrade path from non-kubeadm to kubeadm. Revert ssl path * Readd secret checking * Do gitlab checks from v2.7.0 test upgrade path to 2.8.0 * fix typo * Fix CI jobs to kubeadm again. Fix broken hyperkube path * Fix gitlab * Fix rotate tokens * More fixes * More fixes * Fix tokens
184 lines
4.4 KiB
YAML
184 lines
4.4 KiB
YAML
---
|
|
- import_tasks: facts.yml
|
|
tags:
|
|
- facts
|
|
|
|
- import_tasks: pre_upgrade.yml
|
|
tags:
|
|
- kubelet
|
|
|
|
- name: Ensure /var/lib/cni exists
|
|
file:
|
|
path: /var/lib/cni
|
|
state: directory
|
|
mode: 0755
|
|
|
|
- import_tasks: install.yml
|
|
tags:
|
|
- kubelet
|
|
|
|
- import_tasks: nginx-proxy.yml
|
|
when: is_kube_master == false and loadbalancer_apiserver_localhost
|
|
tags:
|
|
- nginx
|
|
|
|
- name: Make sure dynamic kubelet configuration directory is writeable
|
|
file:
|
|
path: "{{ dynamic_kubelet_configuration_dir }}"
|
|
mode: 0600
|
|
state: directory
|
|
when: dynamic_kubelet_configuration
|
|
|
|
- name: Write kubelet config file (kubeadm)
|
|
template:
|
|
src: kubelet.kubeadm.env.j2
|
|
dest: "{{ kube_config_dir }}/kubelet.env"
|
|
backup: yes
|
|
notify: restart kubelet
|
|
tags:
|
|
- kubelet
|
|
- kubeadm
|
|
|
|
- name: Ensure nodePort range is reserved
|
|
sysctl:
|
|
name: net.ipv4.ip_local_reserved_ports
|
|
value: "{{ kube_apiserver_node_port_range }}"
|
|
sysctl_set: yes
|
|
sysctl_file: "{{ sysctl_file_path }}"
|
|
state: present
|
|
reload: yes
|
|
when: kube_apiserver_node_port_range is defined
|
|
tags:
|
|
- kube-proxy
|
|
|
|
- name: Verify if br_netfilter module exists
|
|
shell: "modinfo br_netfilter"
|
|
environment:
|
|
PATH: "{{ ansible_env.PATH}}:/sbin" # Make sure we can workaround RH's conservative path management
|
|
register: modinfo_br_netfilter
|
|
failed_when: modinfo_br_netfilter.rc not in [0, 1]
|
|
changed_when: false
|
|
|
|
- name: Enable br_netfilter module
|
|
modprobe:
|
|
name: br_netfilter
|
|
state: present
|
|
when: modinfo_br_netfilter.rc == 0
|
|
|
|
- name: Persist br_netfilter module
|
|
copy:
|
|
dest: /etc/modules-load.d/kubespray-br_netfilter.conf
|
|
content: br_netfilter
|
|
when: modinfo_br_netfilter.rc == 0
|
|
|
|
# kube-proxy needs net.bridge.bridge-nf-call-iptables enabled when found if br_netfilter is not a module
|
|
- name: Check if bridge-nf-call-iptables key exists
|
|
command: "sysctl net.bridge.bridge-nf-call-iptables"
|
|
failed_when: false
|
|
changed_when: false
|
|
register: sysctl_bridge_nf_call_iptables
|
|
|
|
- name: Enable bridge-nf-call tables
|
|
sysctl:
|
|
name: "{{ item }}"
|
|
state: present
|
|
sysctl_file: "{{ sysctl_file_path }}"
|
|
value: 1
|
|
reload: yes
|
|
when: sysctl_bridge_nf_call_iptables.rc == 0
|
|
with_items:
|
|
- net.bridge.bridge-nf-call-iptables
|
|
- net.bridge.bridge-nf-call-arptables
|
|
- net.bridge.bridge-nf-call-ip6tables
|
|
|
|
- name: Modprode Kernel Module for IPVS
|
|
modprobe:
|
|
name: "{{ item }}"
|
|
state: present
|
|
with_items:
|
|
- ip_vs
|
|
- ip_vs_rr
|
|
- ip_vs_wrr
|
|
- ip_vs_sh
|
|
- nf_conntrack_ipv4
|
|
when: kube_proxy_mode == 'ipvs'
|
|
tags:
|
|
- kube-proxy
|
|
|
|
- name: Persist ip_vs modules
|
|
copy:
|
|
dest: /etc/modules-load.d/kube_proxy-ipvs.conf
|
|
content: |
|
|
ip_vs
|
|
ip_vs_rr
|
|
ip_vs_wrr
|
|
ip_vs_sh
|
|
nf_conntrack_ipv4
|
|
when: kube_proxy_mode == 'ipvs'
|
|
tags:
|
|
- kube-proxy
|
|
|
|
- name: Purge proxy manifest for kubeadm or if proxy services being provided by other means, e.g. network_plugin
|
|
file:
|
|
path: "{{ kube_manifest_dir }}/kube-proxy.manifest"
|
|
state: absent
|
|
when:
|
|
- kube_proxy_remove
|
|
tags:
|
|
- kube-proxy
|
|
|
|
- name: Cleanup kube-proxy leftovers from node
|
|
command: "{{ local_release_dir }}/hyperkube kube-proxy --cleanup"
|
|
when:
|
|
- kube_proxy_remove
|
|
# `kube-proxy --cleanup`, being Ok as per shown WARNING, still returns 255 from above run (?)
|
|
ignore_errors: true
|
|
tags:
|
|
- kube-proxy
|
|
|
|
- include_tasks: "{{ cloud_provider }}-credential-check.yml"
|
|
when:
|
|
- cloud_provider is defined
|
|
- cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
|
|
tags:
|
|
- cloud-provider
|
|
- facts
|
|
|
|
- name: Write cacert file
|
|
copy:
|
|
src: "{{ openstack_cacert }}"
|
|
dest: "{{ kube_config_dir }}/openstack-cacert.pem"
|
|
group: "{{ kube_cert_group }}"
|
|
mode: 0640
|
|
when:
|
|
- inventory_hostname in groups['k8s-cluster']
|
|
- cloud_provider is defined
|
|
- cloud_provider == 'openstack'
|
|
- openstack_cacert is defined
|
|
- openstack_cacert != ""
|
|
tags:
|
|
- cloud-provider
|
|
|
|
- name: Write cloud-config
|
|
template:
|
|
src: "{{ cloud_provider }}-cloud-config.j2"
|
|
dest: "{{ kube_config_dir }}/cloud_config"
|
|
group: "{{ kube_cert_group }}"
|
|
mode: 0640
|
|
when:
|
|
- cloud_provider is defined
|
|
- cloud_provider in [ 'openstack', 'azure', 'vsphere', 'aws' ]
|
|
notify: restart kubelet
|
|
tags:
|
|
- cloud-provider
|
|
|
|
# reload-systemd
|
|
- meta: flush_handlers
|
|
|
|
- name: Enable kubelet
|
|
service:
|
|
name: kubelet
|
|
enabled: yes
|
|
state: started
|
|
tags:
|
|
- kubelet
|