Use K8s 1.14 and add kubeadm experimental control plane mode (#4317)
* Use Kubernetes 1.14 and experimental control plane support * bump to v1.14.0
This commit is contained in:
parent
46ba6a4154
commit
316508626d
37 changed files with 297 additions and 401 deletions
|
@ -108,7 +108,7 @@ Supported Components
|
|||
--------------------
|
||||
|
||||
- Core
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.13.5
|
||||
- [kubernetes](https://github.com/kubernetes/kubernetes) v1.14.0
|
||||
- [etcd](https://github.com/coreos/etcd) v3.2.26
|
||||
- [docker](https://www.docker.com/) v18.06 (see note)
|
||||
- [rkt](https://github.com/rkt/rkt) v1.21.0 (see Note 2)
|
||||
|
|
|
@ -20,7 +20,7 @@ kube_users_dir: "{{ kube_config_dir }}/users"
|
|||
kube_api_anonymous_auth: true
|
||||
|
||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||
kube_version: v1.13.5
|
||||
kube_version: v1.14.0
|
||||
|
||||
# kubernetes image repo define
|
||||
kube_image_repo: "gcr.io/google-containers"
|
||||
|
|
|
@ -35,7 +35,7 @@ download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube
|
|||
image_arch: "{{host_architecture | default('amd64')}}"
|
||||
|
||||
# Versions
|
||||
kube_version: v1.13.5
|
||||
kube_version: v1.14.0
|
||||
kubeadm_version: "{{ kube_version }}"
|
||||
etcd_version: v3.2.26
|
||||
|
||||
|
|
|
@ -40,18 +40,35 @@
|
|||
run_once: yes
|
||||
when: kubeconfig_localhost|default(false)
|
||||
|
||||
# NOTE(mattymo): Please forgive this workaround
|
||||
- name: Generate admin kubeconfig with external api endpoint
|
||||
shell: >-
|
||||
{{ bin_dir }}/kubeadm alpha
|
||||
{% if kubeadm_version is version('v1.13.0', '<') %}
|
||||
phase
|
||||
{% if kubeadm_version is version('v1.14.0', '>=') %}
|
||||
mkdir -p {{ kube_config_dir }}/external_kubeconfig &&
|
||||
{% endif %}
|
||||
{{ bin_dir }}/kubeadm
|
||||
{% if kubeadm_version is version('v1.14.0', '>=') %}
|
||||
init phase
|
||||
{% elif kubeadm_version is version('v1.13.0', '>=') %}
|
||||
alpha
|
||||
{% else %}
|
||||
alpha phase
|
||||
{% endif %}
|
||||
{% if kubeadm_version is version('v1.14.0', '>=') %}
|
||||
kubeconfig admin
|
||||
--kubeconfig-dir {{ kube_config_dir }}/external_kubeconfig
|
||||
{% else %}
|
||||
kubeconfig user
|
||||
--client-name kubernetes-admin
|
||||
--org system:masters
|
||||
{% endif %}
|
||||
--cert-dir {{ kube_config_dir }}/ssl
|
||||
--apiserver-advertise-address {{ external_apiserver_address }}
|
||||
--apiserver-bind-port {{ external_apiserver_port }}
|
||||
{% if kubeadm_version is version('v1.14.0', '>=') %}
|
||||
&& cat {{ kube_config_dir }}/external_kubeconfig/admin.conf &&
|
||||
rm -rf {{ kube_config_dir }}/external_kubeconfig
|
||||
{% endif %}
|
||||
environment: "{{ proxy_env }}"
|
||||
run_once: yes
|
||||
register: admin_kubeconfig
|
||||
|
|
|
@ -26,6 +26,12 @@
|
|||
run_once: true
|
||||
register: temp_token
|
||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||
when: kubeadm_token is not defined
|
||||
|
||||
- name: Set kubeadm_token to generated token
|
||||
set_fact:
|
||||
kubeadm_token: "{{ temp_token.stdout }}"
|
||||
when: kubeadm_token is not defined
|
||||
|
||||
- name: gets the kubeadm version
|
||||
command: "{{ bin_dir }}/kubeadm version -o short"
|
||||
|
@ -61,8 +67,6 @@
|
|||
dest: "{{ kube_config_dir }}/kubeadm-client.conf"
|
||||
backup: yes
|
||||
when: not is_kube_master
|
||||
vars:
|
||||
kubeadm_token: "{{ temp_token.stdout }}"
|
||||
|
||||
- name: Join to cluster if needed
|
||||
environment:
|
||||
|
@ -122,11 +126,10 @@
|
|||
{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get configmap kube-proxy -n kube-system -o yaml
|
||||
| sed 's#server:.*#server:\ {{ kube_apiserver_endpoint }}#g'
|
||||
| {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf replace -f -
|
||||
delegate_to: "{{groups['kube-master']|first}}"
|
||||
run_once: true
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master']|first
|
||||
- kubeadm_config_api_fqdn is not defined
|
||||
- is_kube_master
|
||||
- kubeadm_discovery_address != kube_apiserver_endpoint
|
||||
- not kube_proxy_remove
|
||||
tags:
|
||||
|
@ -134,11 +137,10 @@
|
|||
|
||||
- name: Restart all kube-proxy pods to ensure that they load the new configmap
|
||||
shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
|
||||
delegate_to: "{{groups['kube-master']|first}}"
|
||||
run_once: true
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master']|first
|
||||
- kubeadm_config_api_fqdn is not defined
|
||||
- is_kube_master
|
||||
- kubeadm_discovery_address != kube_apiserver_endpoint
|
||||
- not kube_proxy_remove
|
||||
tags:
|
||||
|
@ -159,11 +161,10 @@
|
|||
# is fixed
|
||||
- name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services
|
||||
shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete daemonset -n kube-system kube-proxy"
|
||||
delegate_to: "{{groups['kube-master']|first}}"
|
||||
run_once: true
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master']|first
|
||||
- kube_proxy_remove
|
||||
- is_kube_master
|
||||
- kubeadm_discovery_address != kube_apiserver_endpoint
|
||||
tags:
|
||||
- kube-proxy
|
||||
|
|
|
@ -107,4 +107,4 @@ kube_proxy_resource_container: /kube-proxy
|
|||
|
||||
# udpIdleTimeout is how long an idle UDP connection will be kept open (e.g. '250ms', '2s').
|
||||
# Must be greater than 0. Only applicable for proxyMode=userspace.
|
||||
kube_proxy_udp_idle_timeout: 250ms
|
||||
kube_proxy_udp_idle_timeout: 250ms
|
||||
|
|
|
@ -23,11 +23,18 @@ kube_apiserver_storage_backend: etcd3
|
|||
# By default, force back to etcd2. Set to true to force etcd3 (experimental!)
|
||||
force_etcd3: false
|
||||
|
||||
kube_etcd_cacert_file: ca.pem
|
||||
kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
|
||||
kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem
|
||||
|
||||
# Associated interfaces must be reachable by the rest of the cluster, and by
|
||||
# CLI/web clients.
|
||||
kube_controller_manager_bind_address: 0.0.0.0
|
||||
kube_scheduler_bind_address: 0.0.0.0
|
||||
|
||||
# discovery_timeout modifies the discovery timeout
|
||||
discovery_timeout: 5m0s
|
||||
|
||||
# audit support
|
||||
kubernetes_audit: false
|
||||
# path to audit log file
|
||||
|
@ -78,7 +85,6 @@ kube_apiserver_request_timeout: "1m0s"
|
|||
|
||||
# 1.9 and below Admission control plug-ins
|
||||
kube_apiserver_admission_control:
|
||||
- Initializers
|
||||
- NamespaceLifecycle
|
||||
- LimitRanger
|
||||
- ServiceAccount
|
||||
|
@ -99,8 +105,7 @@ kube_apiserver_enable_admission_plugins: []
|
|||
kube_apiserver_disable_admission_plugins: []
|
||||
|
||||
# extra runtime config
|
||||
kube_api_runtime_config:
|
||||
- admissionregistration.k8s.io/v1alpha1
|
||||
kube_api_runtime_config: []
|
||||
|
||||
## Enable/Disable Kube API Server Authentication Methods
|
||||
kube_basic_auth: false
|
||||
|
|
|
@ -12,33 +12,3 @@
|
|||
- {src: front-proxy-client.crt, dest: front-proxy-client.crt.old}
|
||||
- {src: front-proxy-client.key, dest: front-proxy-client.key.old}
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Remove old certs and keys
|
||||
file:
|
||||
path: "{{ kube_cert_dir }}/{{ item }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- apiserver.crt
|
||||
- apiserver.key
|
||||
- apiserver-kubelet-client.crt
|
||||
- apiserver-kubelet-client.key
|
||||
- front-proxy-client.crt
|
||||
- front-proxy-client.key
|
||||
|
||||
- name: Generate new certs and keys
|
||||
command: "{{ bin_dir }}/kubeadm init phase certs {{ item }} --config={{ kube_config_dir }}/kubeadm-config.yaml"
|
||||
environment: "{{ proxy_env }}"
|
||||
with_items:
|
||||
- apiserver
|
||||
- apiserver-kubelet-client
|
||||
- front-proxy-client
|
||||
when: inventory_hostname == groups['kube-master']|first and kubeadm_version is version('v1.13.0', '>=')
|
||||
|
||||
- name: Generate new certs and keys
|
||||
command: "{{ bin_dir }}/kubeadm alpha phase certs {{ item }} --config={{ kube_config_dir }}/kubeadm-config.yaml"
|
||||
environment: "{{ proxy_env }}"
|
||||
with_items:
|
||||
- apiserver
|
||||
- apiserver-kubelet-client
|
||||
- front-proxy-client
|
||||
when: inventory_hostname == groups['kube-master']|first and kubeadm_version is version('v1.13.0', '<')
|
||||
|
|
|
@ -1,34 +0,0 @@
|
|||
---
|
||||
- name: Backup old configuration files
|
||||
copy:
|
||||
src: "{{ kube_config_dir }}/{{ item.src }}"
|
||||
dest: "{{ kube_config_dir }}/{{ item.dest }}"
|
||||
remote_src: yes
|
||||
with_items:
|
||||
- {src: admin.conf, dest: admin.conf.old}
|
||||
- {src: kubelet.conf, dest: kubelet.conf.old}
|
||||
- {src: controller-manager.conf, dest: controller-manager.conf.old}
|
||||
- {src: scheduler.conf, dest: scheduler.conf.old}
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Remove old configuration files
|
||||
file:
|
||||
path: "{{ kube_config_dir }}/{{ item }}"
|
||||
state: absent
|
||||
with_items:
|
||||
- admin.conf
|
||||
- kubelet.conf
|
||||
- controller-manager.conf
|
||||
- scheduler.conf
|
||||
|
||||
- name: Generate new configuration files
|
||||
command: "{{ bin_dir }}/kubeadm init phase kubeconfig all --config={{ kube_config_dir }}/kubeadm-config.yaml"
|
||||
environment: "{{ proxy_env }}"
|
||||
when: kubeadm_version is version('v1.13.0', '>=')
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Generate new configuration files
|
||||
command: "{{ bin_dir }}/kubeadm alpha phase kubeconfig all --config={{ kube_config_dir }}/kubeadm-config.yaml"
|
||||
environment: "{{ proxy_env }}"
|
||||
when: kubeadm_version is version('v1.13.0', '<')
|
||||
ignore_errors: yes
|
|
@ -15,6 +15,6 @@
|
|||
- {src: front-proxy-client-key.pem, dest: front-proxy-client.key}
|
||||
- {src: service-account-key.pem, dest: sa.pub}
|
||||
- {src: service-account-key.pem, dest: sa.key}
|
||||
- {src: "node-{{ inventory_hostname }}.pem", dest: apiserver-kubelet-client.crt }
|
||||
- {src: "node-{{ inventory_hostname }}-key.pem", dest: apiserver-kubelet-client.key }
|
||||
- {src: "node-{{ inventory_hostname }}.pem", dest: apiserver-kubelet-client.crt}
|
||||
- {src: "node-{{ inventory_hostname }}-key.pem", dest: apiserver-kubelet-client.key}
|
||||
register: kubeadm_copy_old_certs
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
---
|
||||
- name: Set kubeadm_discovery_address
|
||||
set_fact:
|
||||
kubeadm_discovery_address: >-
|
||||
{%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%}
|
||||
{{ first_kube_master }}:{{ kube_apiserver_port }}
|
||||
{%- else -%}
|
||||
{{ kube_apiserver_endpoint }}
|
||||
{%- endif %}
|
||||
tags:
|
||||
- facts
|
||||
|
||||
- name: Create kubeadm ControlPlane config
|
||||
template:
|
||||
src: "kubeadm-controlplane.{{ kubeadmConfig_api_version }}.yaml.j2"
|
||||
dest: "{{ kube_config_dir }}/kubeadm-controlplane.yaml"
|
||||
backup: yes
|
||||
when:
|
||||
- inventory_hostname != groups['kube-master']|first
|
||||
- not kubeadm_already_run.stat.exists
|
||||
|
||||
- name: Wait for k8s apiserver
|
||||
wait_for:
|
||||
host: "{{kubeadm_discovery_address.split(':')[0]}}"
|
||||
port: "{{kubeadm_discovery_address.split(':')[1]}}"
|
||||
timeout: 180
|
||||
|
||||
- name: Joining control plane node to the cluster.
|
||||
command: >-
|
||||
{{ bin_dir }}/kubeadm join
|
||||
--config {{ kube_config_dir}}/kubeadm-controlplane.yaml
|
||||
--ignore-preflight-errors=all
|
||||
{% if kubeadm_certificate_key is defined %}
|
||||
--certificate-key={{ kubeadm_certificate_key }}
|
||||
{% endif %}
|
||||
register: kubeadm_join_control_plane
|
||||
when:
|
||||
- inventory_hostname != groups['kube-master']|first
|
||||
- not kubeadm_already_run.stat.exists
|
||||
environment:
|
||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||
|
||||
- name: Set secret_changed to false to avoid extra token rotation
|
||||
set_fact:
|
||||
secret_changed: false
|
43
roles/kubernetes/master/tasks/kubeadm-secondary-legacy.yml
Normal file
43
roles/kubernetes/master/tasks/kubeadm-secondary-legacy.yml
Normal file
|
@ -0,0 +1,43 @@
|
|||
- name: slurp kubeadm certs
|
||||
slurp:
|
||||
src: "{{ item }}"
|
||||
with_items:
|
||||
- "{{ kube_cert_dir }}/apiserver.crt"
|
||||
- "{{ kube_cert_dir }}/apiserver.key"
|
||||
- "{{ kube_cert_dir }}/apiserver-kubelet-client.crt"
|
||||
- "{{ kube_cert_dir }}/apiserver-kubelet-client.key"
|
||||
- "{{ kube_cert_dir }}/ca.crt"
|
||||
- "{{ kube_cert_dir }}/ca.key"
|
||||
- "{{ kube_cert_dir }}/front-proxy-ca.crt"
|
||||
- "{{ kube_cert_dir }}/front-proxy-ca.key"
|
||||
- "{{ kube_cert_dir }}/front-proxy-client.crt"
|
||||
- "{{ kube_cert_dir }}/front-proxy-client.key"
|
||||
- "{{ kube_cert_dir }}/sa.key"
|
||||
- "{{ kube_cert_dir }}/sa.pub"
|
||||
register: kubeadm_certs
|
||||
delegate_to: "{{ groups['kube-master']|first }}"
|
||||
|
||||
- name: kubeadm | write out kubeadm certs
|
||||
copy:
|
||||
dest: "{{ item.item }}"
|
||||
content: "{{ item.content | b64decode }}"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0600
|
||||
no_log: true
|
||||
register: copy_kubeadm_certs
|
||||
with_items: "{{ kubeadm_certs.results }}"
|
||||
when: inventory_hostname != groups['kube-master']|first
|
||||
|
||||
- name: kubeadm | Init other uninitialized masters
|
||||
command: timeout -k 600s 600s {{ bin_dir }}/kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --ignore-preflight-errors=all
|
||||
register: kubeadm_init
|
||||
retries: 10
|
||||
until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr
|
||||
when:
|
||||
- inventory_hostname != groups['kube-master']|first
|
||||
- not kubeadm_already_run.stat.exists
|
||||
failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr
|
||||
environment:
|
||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||
notify: Master | restart kubelet
|
|
@ -10,11 +10,10 @@
|
|||
import_tasks: kubeadm-migrate-certs.yml
|
||||
when: old_apiserver_cert.stat.exists
|
||||
|
||||
- name: kubeadm | Check apiserver key
|
||||
- name: kubeadm | Check serviceaccount key
|
||||
stat:
|
||||
path: "{{ kube_cert_dir }}/apiserver.key"
|
||||
register: apiserver_key_before
|
||||
delegate_to: "{{groups['kube-master']|first}}"
|
||||
register: sa_key_before
|
||||
run_once: true
|
||||
|
||||
- name: kubeadm | Check if kubeadm has already run
|
||||
|
@ -62,10 +61,6 @@
|
|||
sans_address: "{{ groups['kube-master'] | map('extract', hostvars, ['ansible_default_ipv4', 'address']) | list | select('defined') | list }}"
|
||||
tags: facts
|
||||
|
||||
- name: kubeadm | Copy etcd cert dir under k8s cert dir
|
||||
command: "cp -TR {{ etcd_cert_dir }} {{ kube_config_dir }}/ssl/etcd"
|
||||
changed_when: false
|
||||
|
||||
- name: Create audit-policy directory
|
||||
file:
|
||||
path: "{{ audit_policy_file | dirname }}"
|
||||
|
@ -94,7 +89,18 @@
|
|||
- kubeadm_already_run.stat.exists
|
||||
|
||||
- name: kubeadm | Initialize first master
|
||||
command: timeout -k 600s 600s {{ bin_dir }}/kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --ignore-preflight-errors=all
|
||||
command: >-
|
||||
timeout -k 600s 600s
|
||||
{{ bin_dir }}/kubeadm init
|
||||
--config={{ kube_config_dir }}/kubeadm-config.yaml
|
||||
--ignore-preflight-errors=all
|
||||
{% if kubeadm_version is version('v1.14.0', '>=') %}
|
||||
--experimental-upload-certs
|
||||
{% endif %}
|
||||
--skip-phases=addon/coredns
|
||||
{% if kubeadm_certificate_key is defined %}
|
||||
--certificate-key={{ kubeadm_certificate_key }}
|
||||
{% endif %}
|
||||
register: kubeadm_init
|
||||
# Retry is because upload config sometimes fails
|
||||
retries: 3
|
||||
|
@ -105,76 +111,75 @@
|
|||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||
notify: Master | restart kubelet
|
||||
|
||||
- name: slurp kubeadm certs
|
||||
slurp:
|
||||
src: "{{ item }}"
|
||||
with_items:
|
||||
- "{{ kube_cert_dir }}/apiserver.crt"
|
||||
- "{{ kube_cert_dir }}/apiserver.key"
|
||||
- "{{ kube_cert_dir }}/apiserver-kubelet-client.crt"
|
||||
- "{{ kube_cert_dir }}/apiserver-kubelet-client.key"
|
||||
- "{{ kube_cert_dir }}/ca.crt"
|
||||
- "{{ kube_cert_dir }}/ca.key"
|
||||
- "{{ kube_cert_dir }}/front-proxy-ca.crt"
|
||||
- "{{ kube_cert_dir }}/front-proxy-ca.key"
|
||||
- "{{ kube_cert_dir }}/front-proxy-client.crt"
|
||||
- "{{ kube_cert_dir }}/front-proxy-client.key"
|
||||
- "{{ kube_cert_dir }}/sa.key"
|
||||
- "{{ kube_cert_dir }}/sa.pub"
|
||||
register: kubeadm_certs
|
||||
delegate_to: "{{ groups['kube-master']|first }}"
|
||||
run_once: true
|
||||
|
||||
- name: kubeadm | write out kubeadm certs
|
||||
copy:
|
||||
dest: "{{ item.item }}"
|
||||
content: "{{ item.content | b64decode }}"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0600
|
||||
no_log: true
|
||||
register: copy_kubeadm_certs
|
||||
with_items: "{{ kubeadm_certs.results }}"
|
||||
when: inventory_hostname != groups['kube-master']|first
|
||||
|
||||
- name: kubeadm | Kubeconfig management with kubeadm
|
||||
import_tasks: kubeadm-kubeconfig.yml
|
||||
- name: set kubeadm certificate key
|
||||
set_fact:
|
||||
kubeadm_certificate_key: "{{ item | regex_search('--certificate-key ([^ ]+)','\\1') | first }}"
|
||||
with_items: "{{ (kubeadm_init|default({'stdout_lines': []}))['stdout_lines'] }}"
|
||||
run_once: yes
|
||||
when:
|
||||
- not upgrade_cluster_setup
|
||||
- kubeadm_already_run.stat.exists
|
||||
- kubeadm_version is version('v1.14.0', '>=')
|
||||
- kubeadm_certificate_key is not defined
|
||||
- kubeadm_init is defined
|
||||
- item | trim | match('.*--certificate-key .*')
|
||||
|
||||
- name: kubeadm | Init other uninitialized masters
|
||||
command: timeout -k 600s 600s {{ bin_dir }}/kubeadm init --config={{ kube_config_dir }}/kubeadm-config.yaml --ignore-preflight-errors=all
|
||||
register: kubeadm_init
|
||||
retries: 10
|
||||
until: kubeadm_init is succeeded or "field is immutable" in kubeadm_init.stderr
|
||||
when: inventory_hostname != groups['kube-master']|first and not kubeadm_already_run.stat.exists
|
||||
failed_when: kubeadm_init.rc != 0 and "field is immutable" not in kubeadm_init.stderr
|
||||
environment:
|
||||
PATH: "{{ bin_dir }}:{{ ansible_env.PATH }}"
|
||||
notify: Master | restart kubelet
|
||||
- name: Create kubeadm token for joining nodes with 24h expiration (default)
|
||||
command: "{{ bin_dir }}/kubeadm --kubeconfig /etc/kubernetes/admin.conf token create"
|
||||
run_once: true
|
||||
register: temp_token
|
||||
retries: 5
|
||||
delay: 5
|
||||
until: temp_token is succeeded
|
||||
when: kubeadm_token is not defined
|
||||
tags:
|
||||
- kubeadm_token
|
||||
|
||||
- name: kubeadm | upgrage kubernetes cluster
|
||||
- name: Set kubeadm_token
|
||||
set_fact:
|
||||
kubeadm_token: "{{ temp_token.stdout }}"
|
||||
when: kubeadm_token is not defined
|
||||
tags:
|
||||
- kubeadm_token
|
||||
|
||||
- name: Create hardcoded kubeadm token for joining nodes with 24h expiration (if defined)
|
||||
shell: >-
|
||||
{{ bin_dir }}/kubeadm --kubeconfig /etc/kubernetes/admin.conf token delete {{ kubeadm_token }} || :;
|
||||
{{ bin_dir }}/kubeadm --kubeconfig /etc/kubernetes/admin.conf token create {{ kubeadm_token }}
|
||||
run_once: true
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master']|first
|
||||
- kubeadm_token is defined
|
||||
tags:
|
||||
- kubeadm_token
|
||||
|
||||
- name: kubeadm | Initialize other masters (experimental control plane)
|
||||
include: kubeadm-secondary-experimental.yml
|
||||
when: kubeadm_control_plane
|
||||
|
||||
- name: kubeadm | Initialize other masters (experimental control plane)
|
||||
include: kubeadm-secondary-legacy.yml
|
||||
when: not kubeadm_control_plane
|
||||
|
||||
- name: kubeadm | upgrade kubernetes cluster
|
||||
import_tasks: kubeadm-upgrade.yml
|
||||
when: upgrade_cluster_setup
|
||||
|
||||
- name: kubeadm | Check apiserver key again
|
||||
- name: kubeadm | Check serviceaccount key again
|
||||
stat:
|
||||
path: "{{ kube_cert_dir }}/apiserver.key"
|
||||
register: apiserver_key_after
|
||||
delegate_to: "{{groups['kube-master']|first}}"
|
||||
path: "{{ kube_cert_dir }}/sa.key"
|
||||
register: sa_key_after
|
||||
run_once: true
|
||||
|
||||
- name: kubeadm | Set secret_changed if service account key was updated
|
||||
command: /bin/true
|
||||
notify: Master | set secret_changed
|
||||
when: apiserver_key_before.stat.checksum|default("") != apiserver_key_after.stat.checksum
|
||||
when: sa_key_before.stat.checksum|default("") != sa_key_after.stat.checksum
|
||||
|
||||
- name: kubeadm | cleanup old certs if necessary
|
||||
import_tasks: kubeadm-cleanup-old-certs.yml
|
||||
when:
|
||||
- old_apiserver_cert.stat.exists
|
||||
|
||||
# FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
|
||||
- name: kubeadm | Remove taint for master with node role
|
||||
command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf taint node {{ inventory_hostname }} node.kubernetes.io/master:NoSchedule-"
|
||||
delegate_to: "{{groups['kube-master']|first}}"
|
||||
|
|
|
@ -3,11 +3,6 @@
|
|||
command: "{{ bin_dir }}/kubeadm version -o short"
|
||||
register: kubeadm_output
|
||||
|
||||
- name: sets kubeadm api version to v1alpha1
|
||||
set_fact:
|
||||
kubeadmConfig_api_version: v1alpha1
|
||||
when: kubeadm_output.stdout is version('v1.11.0', '<')
|
||||
|
||||
- name: sets kubeadm api version to v1alpha2
|
||||
set_fact:
|
||||
kubeadmConfig_api_version: v1alpha2
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
command: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} ls /registry/minions"
|
||||
environment:
|
||||
ETCDCTL_API: 2
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/{{ kube_etcd_key_file }}"
|
||||
register: old_data_exists
|
||||
delegate_to: "{{groups['etcd'][0]}}"
|
||||
changed_when: false
|
||||
|
|
|
@ -1,204 +0,0 @@
|
|||
apiVersion: kubeadm.k8s.io/v1alpha1
|
||||
kind: MasterConfiguration
|
||||
api:
|
||||
{% if kubeadm_config_api_fqdn is defined %}
|
||||
controlPlaneEndpoint: {{ kubeadm_config_api_fqdn }}
|
||||
bindPort: {{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
|
||||
{% else %}
|
||||
advertiseAddress: {{ ip | default(fallback_ips[inventory_hostname]) }}
|
||||
bindPort: {{ kube_apiserver_port }}
|
||||
{% endif %}
|
||||
etcd:
|
||||
endpoints:
|
||||
{% for endpoint in etcd_access_addresses.split(',') %}
|
||||
- {{ endpoint }}
|
||||
{% endfor %}
|
||||
caFile: {{ etcd_cert_dir }}/ca.pem
|
||||
certFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem
|
||||
keyFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem
|
||||
networking:
|
||||
dnsDomain: {{ dns_domain }}
|
||||
serviceSubnet: {{ kube_service_addresses }}
|
||||
podSubnet: {{ kube_pods_subnet }}
|
||||
kubernetesVersion: {{ kube_version }}
|
||||
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws"] %}
|
||||
cloudProvider: {{cloud_provider}}
|
||||
cloudConfig: {{ kube_config_dir }}/cloud_config
|
||||
{% elif cloud_provider is defined and cloud_provider in ["external"] %}
|
||||
cloudConfig: {{ kube_config_dir }}/cloud_config
|
||||
{% endif %}
|
||||
{% if kube_proxy_mode == 'ipvs' %}
|
||||
kubeProxy:
|
||||
config:
|
||||
{% if kube_version is version('v1.10', '<') %}
|
||||
featureGates: SupportIPVSProxyMode=true
|
||||
{% endif %}
|
||||
{% if kube_version is version('v1.10', '>=') %}
|
||||
featureGates:
|
||||
SupportIPVSProxyMode: true
|
||||
{% endif %}
|
||||
mode: ipvs
|
||||
{% endif %}
|
||||
{% if kube_proxy_nodeport_addresses %}
|
||||
nodePortAddresses: {{ kube_proxy_nodeport_addresses }}
|
||||
{% endif %}
|
||||
resourceContainer: ""
|
||||
authorizationModes:
|
||||
{% for mode in authorization_modes %}
|
||||
- {{ mode }}
|
||||
{% endfor %}
|
||||
selfHosted: false
|
||||
apiServerExtraArgs:
|
||||
bind-address: {{ kube_apiserver_bind_address }}
|
||||
{% if kube_apiserver_insecure_port|string != "0" %}
|
||||
insecure-bind-address: {{ kube_apiserver_insecure_bind_address }}
|
||||
{% endif %}
|
||||
insecure-port: "{{ kube_apiserver_insecure_port }}"
|
||||
{% if kube_version is version('v1.10', '<') %}
|
||||
admission-control: {{ kube_apiserver_admission_control | join(',') }}
|
||||
{% else %}
|
||||
{% if kube_apiserver_enable_admission_plugins|length > 0 %}
|
||||
enable-admission-plugins: {{ kube_apiserver_enable_admission_plugins | join(',') }}
|
||||
{% endif %}
|
||||
{% if kube_apiserver_disable_admission_plugins|length > 0 %}
|
||||
disable-admission-plugins: {{ kube_apiserver_disable_admission_plugins | join(',') }}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
apiserver-count: "{{ kube_apiserver_count }}"
|
||||
{% if kube_version is version('v1.9', '>=') %}
|
||||
endpoint-reconciler-type: lease
|
||||
{% endif %}
|
||||
{% if etcd_events_cluster_enabled %}
|
||||
etcd-servers-overrides: "/events#{{ etcd_events_access_addresses }}"
|
||||
{% endif %}
|
||||
service-node-port-range: {{ kube_apiserver_node_port_range }}
|
||||
kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}"
|
||||
profiling: "{{ kube_profiling }}"
|
||||
request-timeout: "{{ kube_apiserver_request_timeout }}"
|
||||
repair-malformed-updates: "false"
|
||||
enable-aggregator-routing: "{{ kube_api_aggregator_routing }}"
|
||||
{% if kube_api_anonymous_auth is defined and kube_version is version('v1.5', '>=') %}
|
||||
anonymous-auth: "{{ kube_api_anonymous_auth }}"
|
||||
{% endif %}
|
||||
{% if kube_basic_auth|default(true) %}
|
||||
basic-auth-file: {{ kube_users_dir }}/known_users.csv
|
||||
{% endif %}
|
||||
{% if kube_token_auth|default(true) %}
|
||||
token-auth-file: {{ kube_token_dir }}/known_tokens.csv
|
||||
{% endif %}
|
||||
{% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %}
|
||||
oidc-issuer-url: {{ kube_oidc_url }}
|
||||
oidc-client-id: {{ kube_oidc_client_id }}
|
||||
{% if kube_oidc_ca_file is defined %}
|
||||
oidc-ca-file: {{ kube_oidc_ca_file }}
|
||||
{% endif %}
|
||||
{% if kube_oidc_username_claim is defined %}
|
||||
oidc-username-claim: {{ kube_oidc_username_claim }}
|
||||
{% endif %}
|
||||
{% if kube_oidc_groups_claim is defined %}
|
||||
oidc-groups-claim: {{ kube_oidc_groups_claim }}
|
||||
{% endif %}
|
||||
{% if kube_oidc_username_prefix is defined %}
|
||||
oidc-username-prefix: "{{ kube_oidc_username_prefix }}"
|
||||
{% endif %}
|
||||
{% if kube_oidc_groups_prefix is defined %}
|
||||
oidc-groups-prefix: "{{ kube_oidc_groups_prefix }}"
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% if kube_webhook_token_auth|default(false) %}
|
||||
authentication-token-webhook-config-file: {{ kube_config_dir }}/webhook-token-auth-config.yaml
|
||||
{% endif %}
|
||||
{% if kube_encrypt_secret_data %}
|
||||
experimental-encryption-provider-config: {{ kube_cert_dir }}/secrets_encryption.yaml
|
||||
{% endif %}
|
||||
storage-backend: {{ kube_apiserver_storage_backend }}
|
||||
{% if kube_api_runtime_config is defined %}
|
||||
runtime-config: {{ kube_api_runtime_config | join(',') }}
|
||||
{% endif %}
|
||||
allow-privileged: "true"
|
||||
{% for key in kube_kubeadm_apiserver_extra_args %}
|
||||
{{ key }}: "{{ kube_kubeadm_apiserver_extra_args[key] }}"
|
||||
{% endfor %}
|
||||
{% if kube_feature_gates %}
|
||||
feature-gates: {{ kube_feature_gates|join(',') }}
|
||||
{% endif %}
|
||||
{% if kube_network_plugin is defined and kube_network_plugin == 'cloud' %}
|
||||
configure-cloud-routes: "true"
|
||||
{% endif %}
|
||||
controllerManagerExtraArgs:
|
||||
node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }}
|
||||
node-monitor-period: {{ kube_controller_node_monitor_period }}
|
||||
pod-eviction-timeout: {{ kube_controller_pod_eviction_timeout }}
|
||||
node-cidr-mask-size: "{{ kube_network_node_prefix }}"
|
||||
profiling: "{{ kube_profiling }}"
|
||||
terminated-pod-gc-threshold: "{{ kube_controller_terminated_pod_gc_threshold }}"
|
||||
{% if kube_feature_gates %}
|
||||
feature-gates: {{ kube_feature_gates|join(',') }}
|
||||
{% endif %}
|
||||
{% for key in kube_kubeadm_controller_extra_args %}
|
||||
{{ key }}: "{{ kube_kubeadm_controller_extra_args[key] }}"
|
||||
{% endfor %}
|
||||
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "external"] %}
|
||||
controllerManagerExtraVolumes:
|
||||
{% if cloud_provider is defined and cloud_provider in ["openstack"] and openstack_cacert is defined and openstack_cacert != "" %}
|
||||
- name: openstackcacert
|
||||
hostPath: "{{ kube_config_dir }}/openstack-cacert.pem"
|
||||
mountPath: "{{ kube_config_dir }}/openstack-cacert.pem"
|
||||
{% endif %}
|
||||
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "external"] %}
|
||||
- name: cloud-config
|
||||
hostPath: {{ kube_config_dir }}/cloud_config
|
||||
mountPath: {{ kube_config_dir }}/cloud_config
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
schedulerExtraArgs:
|
||||
profiling: "{{ kube_profiling }}"
|
||||
{% if kube_feature_gates %}
|
||||
feature-gates: {{ kube_feature_gates|join(',') }}
|
||||
{% endif %}
|
||||
{% if kube_kubeadm_scheduler_extra_args|length > 0 %}
|
||||
{% for key in kube_kubeadm_scheduler_extra_args %}
|
||||
{{ key }}: "{{ kube_kubeadm_scheduler_extra_args[key] }}"
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if kube_basic_auth|default(true) or kube_token_auth|default(true) or kube_webhook_token_auth|default(false) or ssl_ca_dirs|length %}
|
||||
apiServerExtraVolumes:
|
||||
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws", "external"] %}
|
||||
- name: cloud-config
|
||||
hostPath: {{ kube_config_dir }}/cloud_config
|
||||
mountPath: {{ kube_config_dir }}/cloud_config
|
||||
{% endif %}
|
||||
{% if kube_basic_auth|default(true) %}
|
||||
- name: basic-auth-config
|
||||
hostPath: {{ kube_users_dir }}
|
||||
mountPath: {{ kube_users_dir }}
|
||||
{% endif %}
|
||||
{% if kube_token_auth|default(true) %}
|
||||
- name: token-auth-config
|
||||
hostPath: {{ kube_token_dir }}
|
||||
mountPath: {{ kube_token_dir }}
|
||||
{% endif %}
|
||||
{% if kube_webhook_token_auth|default(false) %}
|
||||
- name: webhook-token-auth-config
|
||||
hostPath: {{ kube_config_dir }}/webhook-token-auth-config.yaml
|
||||
mountPath: {{ kube_config_dir }}/webhook-token-auth-config.yaml
|
||||
{% endif %}
|
||||
{% if ssl_ca_dirs|length %}
|
||||
{% for dir in ssl_ca_dirs %}
|
||||
- name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
|
||||
hostPath: {{ dir }}
|
||||
mountPath: {{ dir }}
|
||||
writable: false
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
apiServerCertSANs:
|
||||
{% for san in apiserver_sans %}
|
||||
- {{ san }}
|
||||
{% endfor %}
|
||||
certificatesDir: {{ kube_cert_dir }}
|
||||
imageRepository: {{ kube_image_repo }}
|
||||
unifiedControlPlaneImage: ""
|
||||
{% if kube_override_hostname|default('') %}
|
||||
nodeName: {{ kube_override_hostname }}
|
||||
{% endif %}
|
|
@ -14,9 +14,9 @@ etcd:
|
|||
{% for endpoint in etcd_access_addresses.split(',') %}
|
||||
- {{ endpoint }}
|
||||
{% endfor %}
|
||||
caFile: {{ etcd_cert_dir }}/ca.pem
|
||||
certFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem
|
||||
keyFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem
|
||||
caFile: {{ etcd_cert_dir }}/{{ kube_etcd_cacert_file }}
|
||||
certFile: {{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}
|
||||
keyFile: {{ etcd_cert_dir }}/{{ kube_etcd_key_file }}
|
||||
networking:
|
||||
dnsDomain: {{ dns_domain }}
|
||||
serviceSubnet: {{ kube_service_addresses }}
|
||||
|
@ -221,10 +221,12 @@ nodeRegistration:
|
|||
{% if kube_override_hostname|default('') %}
|
||||
name: {{ kube_override_hostname }}
|
||||
{% endif %}
|
||||
{% if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] %}
|
||||
{% if inventory_hostname not in groups['kube-node'] %}
|
||||
taints:
|
||||
- effect: NoSchedule
|
||||
key: node.kubernetes.io/master
|
||||
{% else %}
|
||||
taints: {}
|
||||
{% endif %}
|
||||
{% if container_manager == 'crio' %}
|
||||
criSocket: /var/run/crio/crio.sock
|
||||
|
|
|
@ -7,10 +7,12 @@ nodeRegistration:
|
|||
{% if kube_override_hostname|default('') %}
|
||||
name: {{ kube_override_hostname }}
|
||||
{% endif %}
|
||||
{% if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] %}
|
||||
{% if inventory_hostname not in groups['kube-node'] %}
|
||||
taints:
|
||||
- effect: NoSchedule
|
||||
key: node.kubernetes.io/master
|
||||
{% else %}
|
||||
taints: {}
|
||||
{% endif %}
|
||||
{% if container_manager == 'crio' %}
|
||||
criSocket: /var/run/crio/crio.sock
|
||||
|
@ -29,9 +31,9 @@ etcd:
|
|||
{% for endpoint in etcd_access_addresses.split(',') %}
|
||||
- {{ endpoint }}
|
||||
{% endfor %}
|
||||
caFile: {{ etcd_cert_dir }}/ca.pem
|
||||
certFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem
|
||||
keyFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem
|
||||
caFile: {{ etcd_cert_dir }}/{{ kube_etcd_cacert_file }}
|
||||
certFile: {{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}
|
||||
keyFile: {{ etcd_cert_dir }}/{{ kube_etcd_key_file }}
|
||||
networking:
|
||||
dnsDomain: {{ dns_domain }}
|
||||
serviceSubnet: {{ kube_service_addresses }}
|
||||
|
|
|
@ -7,10 +7,12 @@ nodeRegistration:
|
|||
{% if kube_override_hostname|default('') %}
|
||||
name: {{ kube_override_hostname }}
|
||||
{% endif %}
|
||||
{% if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] %}
|
||||
{% if inventory_hostname not in groups['kube-node'] %}
|
||||
taints:
|
||||
- effect: NoSchedule
|
||||
key: node.kubernetes.io/master
|
||||
{% else %}
|
||||
taints: []
|
||||
{% endif %}
|
||||
{% if container_manager == 'crio' %}
|
||||
criSocket: /var/run/crio/crio.sock
|
||||
|
@ -29,9 +31,9 @@ etcd:
|
|||
{% for endpoint in etcd_access_addresses.split(',') %}
|
||||
- {{ endpoint }}
|
||||
{% endfor %}
|
||||
caFile: {{ etcd_cert_dir }}/ca.pem
|
||||
certFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem
|
||||
keyFile: {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem
|
||||
caFile: {{ etcd_cert_dir }}/{{ kube_etcd_cacert_file }}
|
||||
certFile: {{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}
|
||||
keyFile: {{ etcd_cert_dir }}/{{ kube_etcd_key_file }}
|
||||
networking:
|
||||
dnsDomain: {{ dns_domain }}
|
||||
serviceSubnet: {{ kube_service_addresses }}
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
apiVersion: kubeadm.k8s.io/v1beta1
|
||||
kind: JoinConfiguration
|
||||
discovery:
|
||||
bootstrapToken:
|
||||
{% if kubeadm_config_api_fqdn is defined %}
|
||||
apiServerEndpoint: {{ kubeadm_config_api_fqdn }}:{{ loadbalancer_apiserver.port | default(kube_apiserver_port) }}
|
||||
{% else %}
|
||||
apiServerEndpoint: {{ kubeadm_discovery_address | replace("https://", "")}}
|
||||
{% endif %}
|
||||
token: {{ kubeadm_token }}
|
||||
unsafeSkipCAVerification: true
|
||||
timeout: {{ discovery_timeout }}
|
||||
tlsBootstrapToken: {{ kubeadm_token }}
|
||||
controlPlane:
|
||||
localAPIEndpoint:
|
||||
advertiseAddress: {{ kube_apiserver_address }}
|
||||
bindPort: {{ kube_apiserver_port }}
|
||||
nodeRegistration:
|
||||
name: {{ inventory_hostname }}
|
||||
{% if container_manager == 'crio' %}
|
||||
criSocket: /var/run/crio/crio.sock
|
||||
{% elif container_manager == 'rkt' %}
|
||||
criSocket: /var/run/rkt.sock
|
||||
{% else %}
|
||||
criSocket: /var/run/dockershim.sock
|
||||
{% endif %}
|
|
@ -137,6 +137,7 @@
|
|||
run_once: yes
|
||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
when:
|
||||
- kube_network_plugin == 'calico'
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ is_atomic: false
|
|||
disable_swap: true
|
||||
|
||||
## Change this to use another Kubernetes version, e.g. a current beta release
|
||||
kube_version: v1.13.5
|
||||
kube_version: v1.14.0
|
||||
|
||||
## Kube Proxy mode One of ['iptables','ipvs']
|
||||
kube_proxy_mode: ipvs
|
||||
|
@ -334,6 +334,9 @@ kube_feature_gates: |-
|
|||
{{ feature_gate_v1_12 }}
|
||||
{%- endif %}
|
||||
|
||||
# Enable kubeadm experimental control plane
|
||||
kubeadm_control_plane: false
|
||||
|
||||
# Local volume provisioner storage classes
|
||||
# Levarages Ansibles string to Python datatype casting. Otherwise the dict_key isn't substituted
|
||||
# see https://github.com/ansible/ansible/issues/17324
|
||||
|
@ -382,7 +385,7 @@ no_proxy: >-
|
|||
{%- endif -%}
|
||||
{%- for item in (groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]))|unique -%}
|
||||
{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }},
|
||||
{%- if item != hostvars[item].get('ansible_hostname', "") -%}
|
||||
{%- if item != hostvars[item].get('ansible_hostname', '') -%}
|
||||
{{ hostvars[item]['ansible_hostname'] }},
|
||||
{{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }},
|
||||
{%- endif -%}
|
||||
|
|
|
@ -61,3 +61,7 @@ calico_baremetal_nodename: "{{ kube_override_hostname | default(inventory_hostna
|
|||
|
||||
### do not enable this, this is detected in scope of tasks, this is just a default value
|
||||
calico_upgrade_needed: false
|
||||
|
||||
kube_etcd_cacert_file: ca.pem
|
||||
kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
|
||||
kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem
|
||||
|
|
|
@ -1,15 +1,14 @@
|
|||
---
|
||||
- name: restart calico-node
|
||||
- name: reset_calico_cni
|
||||
command: /bin/true
|
||||
notify:
|
||||
- Calico | reload systemd
|
||||
- Calico | reload calico-node
|
||||
- delete 10-calico.conflist
|
||||
- delete calico-node containers
|
||||
|
||||
- name: Calico | reload systemd
|
||||
shell: systemctl daemon-reload
|
||||
- name: delete 10-calico.conflist
|
||||
file:
|
||||
path: /etc/calico/10-calico.conflist
|
||||
state: absent
|
||||
|
||||
- name: Calico | reload calico-node
|
||||
service:
|
||||
name: calico-node
|
||||
state: restarted
|
||||
sleep: 10
|
||||
- name: delete calico-node containers
|
||||
shell: "docker ps -af name=k8s_POD_calico-node* -q | xargs --no-run-if-empty docker rm -f"
|
||||
|
|
|
@ -10,3 +10,7 @@ calico_rr_memory_limit: 1000M
|
|||
calico_rr_cpu_limit: 300m
|
||||
calico_rr_memory_requests: 128M
|
||||
calico_rr_cpu_requests: 150m
|
||||
|
||||
kube_etcd_cacert_file: ca.pem
|
||||
kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
|
||||
kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem
|
||||
|
|
|
@ -22,9 +22,9 @@
|
|||
state: hard
|
||||
force: yes
|
||||
with_items:
|
||||
- {s: "ca.pem", d: "ca_cert.crt"}
|
||||
- {s: "node-{{ inventory_hostname }}.pem", d: "cert.crt"}
|
||||
- {s: "node-{{ inventory_hostname }}-key.pem", d: "key.pem"}
|
||||
- {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"}
|
||||
- {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"}
|
||||
- {s: "{{ kube_etcd_key_file }}", d: "key.pem"}
|
||||
|
||||
- name: Calico-rr | Create dir for logs
|
||||
file:
|
||||
|
|
|
@ -13,8 +13,7 @@
|
|||
shell: "{{ bin_dir }}/calicoctl version | grep 'Cluster Version:' | awk '{ print $3}'"
|
||||
register: calico_version_on_server
|
||||
run_once: yes
|
||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||
async: 10
|
||||
async: 20
|
||||
poll: 3
|
||||
changed_when: false
|
||||
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
src: "cni-calico.conflist.j2"
|
||||
dest: "/etc/cni/net.d/{% if calico_version is version('v3.3.0', '>=') %}calico.conflist.template{% else %}10-calico.conflist{% endif %}"
|
||||
owner: kube
|
||||
register: calico_conflist
|
||||
notify: reset_calico_cni
|
||||
|
||||
- name: Calico | Create calico certs directory
|
||||
file:
|
||||
|
@ -20,9 +22,9 @@
|
|||
state: hard
|
||||
force: yes
|
||||
with_items:
|
||||
- {s: "ca.pem", d: "ca_cert.crt"}
|
||||
- {s: "node-{{ inventory_hostname }}.pem", d: "cert.crt"}
|
||||
- {s: "node-{{ inventory_hostname }}-key.pem", d: "key.pem"}
|
||||
- {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"}
|
||||
- {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"}
|
||||
- {s: "{{ kube_etcd_key_file }}", d: "key.pem"}
|
||||
|
||||
- name: Calico | Install calicoctl container script
|
||||
template:
|
||||
|
|
|
@ -13,4 +13,4 @@
|
|||
register: calico_kubelet_name
|
||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||
when:
|
||||
- "cloud_provider is defined"
|
||||
- "cloud_provider is defined"
|
||||
|
|
|
@ -4,6 +4,6 @@ metadata:
|
|||
spec:
|
||||
datastoreType: "etcdv2"
|
||||
etcdEndpoints: "{{ etcd_access_addresses }}"
|
||||
etcdKeyFile: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
||||
etcdCertFile: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
||||
etcdCACertFile: "{{ etcd_cert_dir }}/ca.pem"
|
||||
etcdKeyFile: "{{ etcd_cert_dir }}/{{ kube_etcd_key_file }}"
|
||||
etcdCertFile: "{{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}"
|
||||
etcdCACertFile: "{{ etcd_cert_dir }}/{{ kube_etcd_cacert_file }}"
|
||||
|
|
|
@ -4,6 +4,6 @@ metadata:
|
|||
spec:
|
||||
datastoreType: "etcdv3"
|
||||
etcdEndpoints: "{{ etcd_access_addresses }}"
|
||||
etcdKeyFile: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
||||
etcdCertFile: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
||||
etcdCACertFile: "{{ etcd_cert_dir }}/ca.pem"
|
||||
etcdKeyFile: "{{ etcd_cert_dir }}/{{ kube_etcd_key_file }}"
|
||||
etcdCertFile: "{{ etcd_cert_dir }}/{{ kube_etcd_cert_file }}"
|
||||
etcdCACertFile: "{{ etcd_cert_dir }}/{{ kube_etcd_cacert_file }}"
|
||||
|
|
|
@ -30,3 +30,8 @@ calicoctl_memory_limit: 170M
|
|||
calicoctl_cpu_limit: 100m
|
||||
calicoctl_memory_requests: 32M
|
||||
calicoctl_cpu_requests: 25m
|
||||
|
||||
# etcd cert filenames
|
||||
kube_etcd_cacert_file: ca.pem
|
||||
kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
|
||||
kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem
|
||||
|
|
|
@ -20,9 +20,9 @@
|
|||
state: hard
|
||||
force: yes
|
||||
with_items:
|
||||
- {s: "ca.pem", d: "ca_cert.crt"}
|
||||
- {s: "node-{{ inventory_hostname }}.pem", d: "cert.crt"}
|
||||
- {s: "node-{{ inventory_hostname }}-key.pem", d: "key.pem"}
|
||||
- {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"}
|
||||
- {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"}
|
||||
- {s: "{{ kube_etcd_key_file }}", d: "key.pem"}
|
||||
|
||||
- name: Canal | Set Flannel etcd configuration
|
||||
command: |-
|
||||
|
|
|
@ -5,6 +5,9 @@ cilium_disable_ipv4: false
|
|||
|
||||
# Etcd SSL dirs
|
||||
cilium_cert_dir: /etc/cilium/certs
|
||||
kube_etcd_cacert_file: ca.pem
|
||||
kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
|
||||
kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem
|
||||
|
||||
# Cilium Network Policy directory
|
||||
cilium_policy_dir: /etc/kubernetes/policy
|
||||
|
|
|
@ -11,4 +11,4 @@
|
|||
- name: Kubelet | reload kubelet
|
||||
service:
|
||||
name: kubelet
|
||||
state: restarted
|
||||
state: restarted
|
||||
|
|
|
@ -21,9 +21,9 @@
|
|||
state: hard
|
||||
force: yes
|
||||
with_items:
|
||||
- {s: "ca.pem", d: "ca_cert.crt"}
|
||||
- {s: "node-{{ inventory_hostname }}.pem", d: "cert.crt"}
|
||||
- {s: "node-{{ inventory_hostname }}-key.pem", d: "key.pem"}
|
||||
- {s: "{{ kube_etcd_cacert_file }}", d: "ca_cert.crt"}
|
||||
- {s: "{{ kube_etcd_cert_file }}", d: "cert.crt"}
|
||||
- {s: "{{ kube_etcd_key_file }}", d: "key.pem"}
|
||||
|
||||
- name: Cilium | Create Cilium node manifests
|
||||
template:
|
||||
|
|
|
@ -6,6 +6,7 @@ cloud_machine_type: "n1-standard-2"
|
|||
mode: ha
|
||||
|
||||
# Deployment settings
|
||||
kubeadm_control_plane: true
|
||||
kube_network_plugin: flannel
|
||||
helm_enabled: true
|
||||
kubernetes_audit: true
|
||||
|
|
Loading…
Reference in a new issue