c12s-kubespray/roles/container-engine/docker/tasks/main.yml
Etienne Champetier 82af8e455e docker: remove old versions
Signed-off-by: Etienne Champetier <champetier.etienne@gmail.com>
2021-01-14 09:39:05 -08:00

189 lines
6.7 KiB
YAML

---
- name: check if fedora coreos
stat:
path: /run/ostree-booted
register: ostree
- name: set is_ostree
set_fact:
is_ostree: "{{ ostree.stat.exists }}"
- name: gather os specific variables
include_vars: "{{ item }}"
with_first_found:
- files:
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}-{{ host_architecture }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}-{{ host_architecture }}.yml"
- "{{ ansible_distribution|lower }}.yml"
- "{{ ansible_os_family|lower }}-{{ host_architecture }}.yml"
- "{{ ansible_os_family|lower }}.yml"
- defaults.yml
paths:
- ../vars
skip: true
tags:
- facts
- name: Warn about Docker version on SUSE
debug:
msg: "SUSE distributions always install Docker from the distro repos"
when: ansible_pkg_mgr == 'zypper'
- include_tasks: set_facts_dns.yml
when: dns_mode != 'none' and resolvconf_mode == 'docker_dns'
tags:
- facts
- name: disable unified_cgroup_hierarchy in Fedora 31+
command: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
when:
- ansible_distribution == "Fedora"
- (ansible_distribution_major_version | int) >= 31
- ansible_proc_cmdline['systemd.unified_cgroup_hierarchy'] is not defined or ansible_proc_cmdline['systemd.unified_cgroup_hierarchy'] != '0'
- name: reboot in Fedora 31+
reboot:
when:
- ansible_distribution == "Fedora"
- (ansible_distribution_major_version | int) >= 31
- ansible_proc_cmdline['systemd.unified_cgroup_hierarchy'] is not defined or ansible_proc_cmdline['systemd.unified_cgroup_hierarchy'] != '0'
- import_tasks: pre-upgrade.yml
- name: ensure docker-ce repository public key is installed
action: "{{ docker_repo_key_info.pkg_key }}"
args:
id: "{{ item }}"
url: "{{ docker_repo_key_info.url }}"
state: present
register: keyserver_task_result
until: keyserver_task_result is succeeded
retries: 4
delay: "{{ retry_stagger | d(3) }}"
with_items: "{{ docker_repo_key_info.repo_keys }}"
environment: "{{ proxy_env }}"
when: not (ansible_os_family in ["Flatcar Container Linux by Kinvolk", "RedHat", "Suse", "ClearLinux"] or is_ostree)
- name: ensure docker-ce repository is enabled
action: "{{ docker_repo_info.pkg_repo }}"
args:
repo: "{{ item }}"
state: present
with_items: "{{ docker_repo_info.repos }}"
when: not (ansible_os_family in ["Flatcar Container Linux by Kinvolk", "RedHat", "Suse", "ClearLinux"] or is_ostree) and (docker_repo_info.repos|length > 0)
- name: Configure docker repository on Fedora
template:
src: "fedora_docker.repo.j2"
dest: "{{ yum_repo_dir }}/docker.repo"
when: ansible_distribution == "Fedora" and not is_ostree
- name: Configure docker repository on RedHat/CentOS/Oracle Linux
template:
src: "rh_docker.repo.j2"
dest: "{{ yum_repo_dir }}/docker-ce.repo"
when: ansible_distribution in ["CentOS","RedHat","OracleLinux"] and not is_ostree
- name: check if container-selinux is available
yum:
list: "container-selinux"
register: yum_result
when: ansible_distribution in ["CentOS","RedHat"] and not is_ostree
- name: Configure extras repository on RedHat/CentOS if container-selinux is not available in current repos
yum_repository:
name: extras
description: "CentOS-{{ ansible_distribution_major_version }} - Extras"
state: present
baseurl: "{{ extras_rh_repo_base_url }}"
file: "extras"
gpgcheck: "{{ 'yes' if extras_rh_repo_gpgkey else 'no' }}"
gpgkey: "{{ extras_rh_repo_gpgkey }}"
keepcache: "{{ docker_rpm_keepcache | default('1') }}"
proxy: " {{ http_proxy | default('_none_') }}"
when:
- ansible_distribution in ["CentOS","RedHat"] and not is_ostree
- yum_result.results | length == 0
- name: ensure docker packages are installed
action: "{{ docker_package_info.pkg_mgr }}"
args:
pkg: "{{ item.name }}"
force: "{{ item.force|default(omit) }}"
state: "{{ item.state | default('present') }}"
update_cache: "{{ omit if ansible_distribution == 'Fedora' else True }}"
enablerepo: "{{ item.repo | default(omit) }}"
register: docker_task_result
until: docker_task_result is succeeded
retries: 4
delay: "{{ retry_stagger | d(3) }}"
with_items: "{{ docker_package_info.pkgs }}"
notify: restart docker
when: not (ansible_os_family in ["Flatcar Container Linux by Kinvolk", "ClearLinux"] or is_ostree) and (docker_package_info.pkgs|length > 0)
- name: Ensure docker packages are installed
action: "{{ docker_package_info.pkg_mgr }}"
args:
name: "{{ item.name }}"
state: "{{ item.state | default('present') }}"
with_items: "{{ docker_package_info.pkgs }}"
register: docker_task_result
until: docker_task_result is succeeded
retries: 4
delay: "{{ retry_stagger | d(3) }}"
notify: restart docker
when: ansible_os_family in ["ClearLinux"]
# This is required to ensure any apt upgrade will not break kubernetes
- name: Tell Debian hosts not to change the docker version with apt upgrade
dpkg_selections:
name: "{{ item }}"
selection: hold
when: ansible_os_family in ["Debian"]
with_items:
- docker-ce
- docker-ce-cli
- name: ensure docker started, remove our config if docker start failed and try again
block:
- name: ensure service is started if docker packages are already present
service:
name: docker
state: started
when: docker_task_result is not changed
rescue:
- debug:
msg: "Docker start failed. Try to remove our config"
- name: remove kubespray generated config
file:
path: "{{ item }}"
state: absent
with_items:
- /etc/systemd/system/docker.service.d/http-proxy.conf
- /etc/systemd/system/docker.service.d/docker-options.conf
- /etc/systemd/system/docker.service.d/docker-dns.conf
- /etc/systemd/system/docker.service.d/docker-orphan-cleanup.conf
notify: restart docker
- name: flush handlers so we can wait for docker to come up
meta: flush_handlers
# Install each plugin using a looped include to make error handling in the included task simpler.
- include_tasks: docker_plugin.yml
loop: "{{ docker_plugins }}"
loop_control:
loop_var: docker_plugin
- name: Set docker systemd config
import_tasks: systemd.yml
- name: ensure docker service is started and enabled
service:
name: "{{ item }}"
enabled: yes
state: started
with_items:
- docker