c12s-kubespray/roles/kubernetes/master/tasks/kubeadm-setup.yml
Andreas Holmsten a34139e19e (Re)add line break for supplementary addr in SANs (#3952)
The change implemented in #3908 remove line breaks for supplementary
addresses in kubeadm SANs, causing errors in the config file and
failure to bring cluster up. This commit reimplement line breaks in
between supplementary addresses.
2019-01-03 00:12:00 -08:00

185 lines
6.4 KiB
YAML

---
- name: kubeadm | Check if old apiserver cert exists on host
stat:
path: "{{ kube_cert_dir }}/apiserver.pem"
register: old_apiserver_cert
delegate_to: "{{groups['kube-master']|first}}"
run_once: true
- name: kubeadm | Migrate old certs if necessary
import_tasks: kubeadm-migrate-certs.yml
when: old_apiserver_cert.stat.exists
- name: kubeadm | Check apiserver key
stat:
path: "{{ kube_cert_dir }}/apiserver.key"
register: apiserver_key_before
delegate_to: "{{groups['kube-master']|first}}"
run_once: true
- name: kubeadm | Check if kubeadm has already run
stat:
path: "/var/lib/kubelet/config.yaml"
register: kubeadm_already_run
- name: kubeadm | Delete old admin.conf
file:
path: "{{ kube_config_dir }}/admin.conf"
state: absent
when:
- not kubeadm_already_run.stat.exists
- name: kubeadm | Delete old static pods
file:
path: "{{ kube_config_dir }}/manifests/{{item}}.manifest"
state: absent
with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler", "kube-proxy"]
when:
- old_apiserver_cert.stat.exists
- name: kubeadm | Forcefully delete old static pods
shell: "docker ps -f name=k8s_{{item}} -q | xargs --no-run-if-empty docker rm -f"
with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
when:
- old_apiserver_cert.stat.exists
- name: kubeadm | aggregate all SANs
set_fact:
apiserver_sans: >-
kubernetes
kubernetes.default
kubernetes.default.svc
kubernetes.default.svc.{{ dns_domain }}
{{ kube_apiserver_ip }}
localhost
127.0.0.1
{{ ' '.join(groups['kube-master']) }}
{%- if loadbalancer_apiserver is defined %}
{{ apiserver_loadbalancer_domain_name }}
{%- endif %}
{% for host in groups['kube-master'] -%}
{%- if hostvars[host]['access_ip'] is defined -%}
{{ hostvars[host]['access_ip'] }}
{%- endif %}
{{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
{%- endfor %}
{%- if supplementary_addresses_in_ssl_keys is defined -%}
{% for addr in supplementary_addresses_in_ssl_keys %}
{{ addr }}
{%- endfor %}
{%- endif %}
tags: facts
- name: kubeadm | Copy etcd cert dir under k8s cert dir
command: "cp -TR {{ etcd_cert_dir }} {{ kube_config_dir }}/ssl/etcd"
changed_when: false
- name: Create audit-policy directory
file:
path: "{{ audit_policy_file | dirname }}"
state: directory
when: kubernetes_audit|default(false)
- name: Write api audit policy yaml
template:
src: apiserver-audit-policy.yaml.j2
dest: "{{ audit_policy_file }}"
when: kubernetes_audit|default(false)
# Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint.
- name: set kubeadm_config_api_fqdn define
set_fact:
kubeadm_config_api_fqdn: "{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}"
when: loadbalancer_apiserver is defined
- name: kubeadm | set kubeadm version
import_tasks: kubeadm-version.yml
- name: kubeadm | Certificate management with kubeadm
import_tasks: kubeadm-certificate.yml
when:
- not upgrade_cluster_setup
- kubeadm_already_run.stat.exists
- name: kubeadm | Initialize first master
command: timeout -k 600s 600s {{ bin_dir }}/kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --ignore-preflight-errors=all
register: kubeadm_init
# Retry is because upload config sometimes fails
retries: 3
when: inventory_hostname == groups['kube-master']|first and not kubeadm_already_run.stat.exists
failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr
notify: Master | restart kubelet
- name: slurp kubeadm certs
slurp:
src: "{{ item }}"
with_items:
- "{{ kube_cert_dir }}/apiserver.crt"
- "{{ kube_cert_dir }}/apiserver.key"
- "{{ kube_cert_dir }}/apiserver-kubelet-client.crt"
- "{{ kube_cert_dir }}/apiserver-kubelet-client.key"
- "{{ kube_cert_dir }}/ca.crt"
- "{{ kube_cert_dir }}/ca.key"
- "{{ kube_cert_dir }}/front-proxy-ca.crt"
- "{{ kube_cert_dir }}/front-proxy-ca.key"
- "{{ kube_cert_dir }}/front-proxy-client.crt"
- "{{ kube_cert_dir }}/front-proxy-client.key"
- "{{ kube_cert_dir }}/sa.key"
- "{{ kube_cert_dir }}/sa.pub"
register: kubeadm_certs
delegate_to: "{{ groups['kube-master']|first }}"
run_once: true
- name: kubeadm | write out kubeadm certs
copy:
dest: "{{ item.item }}"
content: "{{ item.content | b64decode }}"
owner: root
group: root
mode: 0600
no_log: true
register: copy_kubeadm_certs
with_items: "{{ kubeadm_certs.results }}"
when: inventory_hostname != groups['kube-master']|first
- name: kubeadm | Kubeconfig management with kubeadm
import_tasks: kubeadm-kubeconfig.yml
when:
- not upgrade_cluster_setup
- kubeadm_already_run.stat.exists
- name: kubeadm | Init other uninitialized masters
command: timeout -k 600s 600s {{ bin_dir }}/kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --ignore-preflight-errors=all
register: kubeadm_init
retries: 10
until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr
when: inventory_hostname != groups['kube-master']|first and not kubeadm_already_run.stat.exists
failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr
notify: Master | restart kubelet
- name: kubeadm | upgrage kubernetes cluster
import_tasks: kubeadm-upgrade.yml
when: upgrade_cluster_setup
- name: kubeadm | Check apiserver key again
stat:
path: "{{ kube_cert_dir }}/apiserver.key"
register: apiserver_key_after
delegate_to: "{{groups['kube-master']|first}}"
run_once: true
- name: kubeadm | Set secret_changed if service account key was updated
command: /bin/true
notify: Master | set secret_changed
when: apiserver_key_before.stat.checksum|default("") != apiserver_key_after.stat.checksum
- name: kubeadm | cleanup old certs if necessary
import_tasks: kubeadm-cleanup-old-certs.yml
when:
- old_apiserver_cert.stat.exists
- name: kubeadm | Remove taint for master with node role
command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf taint node {{ inventory_hostname }} node-role.kubernetes.io/master:NoSchedule-"
delegate_to: "{{groups['kube-master']|first}}"
when: inventory_hostname in groups['kube-node']
failed_when: false