Fix ansible syntax to avoid ansible deprecation warnings (#3512)
* failed * version_compare * succeeded * skipped * success * version_compare becomes version since ansible 2.5 * ansible minimal version updated in doc and spec * last version_compare
This commit is contained in:
parent
bfd4ccbeaa
commit
7bec169d58
75 changed files with 153 additions and 153 deletions
|
@ -134,7 +134,7 @@ plugins can be deployed for a given single cluster.
|
|||
Requirements
|
||||
------------
|
||||
|
||||
- **Ansible v2.4 (or newer) and python-netaddr is installed on the machine
|
||||
- **Ansible v2.5 (or newer) and python-netaddr is installed on the machine
|
||||
that will run Ansible commands**
|
||||
- **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
|
||||
- The target servers must have **access to the Internet** in order to pull docker images. Otherwise, additional configuration is required (See [Offline Environment](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/downloads.md#offline-environment))
|
||||
|
|
|
@ -20,7 +20,7 @@ BuildRequires: python2-setuptools
|
|||
BuildRequires: python-d2to1
|
||||
BuildRequires: python2-pbr
|
||||
|
||||
Requires: ansible >= 2.4.0
|
||||
Requires: ansible >= 2.5.0
|
||||
Requires: python-jinja2 >= 2.10
|
||||
Requires: python-netaddr
|
||||
Requires: python-pbr
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
docker requires a minimum kernel version of
|
||||
{{ docker_kernel_min_version }} on
|
||||
{{ ansible_distribution }}-{{ ansible_distribution_version }}
|
||||
when: (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]) and (ansible_kernel|version_compare(docker_kernel_min_version, "<"))
|
||||
when: (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]) and (ansible_kernel is version(docker_kernel_min_version, "<"))
|
||||
tags:
|
||||
- facts
|
||||
|
||||
|
@ -58,7 +58,7 @@
|
|||
url: "{{docker_repo_key_info.url}}"
|
||||
state: present
|
||||
register: keyserver_task_result
|
||||
until: keyserver_task_result|succeeded
|
||||
until: keyserver_task_result is succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | d(3) }}"
|
||||
with_items: "{{ docker_repo_key_info.repo_keys }}"
|
||||
|
@ -79,7 +79,7 @@
|
|||
url: "{{dockerproject_repo_key_info.url}}"
|
||||
state: present
|
||||
register: keyserver_task_result
|
||||
until: keyserver_task_result|succeeded
|
||||
until: keyserver_task_result is succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | d(3) }}"
|
||||
with_items: "{{ dockerproject_repo_key_info.repo_keys }}"
|
||||
|
@ -134,7 +134,7 @@
|
|||
state: present
|
||||
update_cache: "{{ omit if ansible_distribution == 'Fedora' else True }}"
|
||||
register: docker_task_result
|
||||
until: docker_task_result|succeeded
|
||||
until: docker_task_result is succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | d(3) }}"
|
||||
with_items: "{{ docker_package_info.pkgs }}"
|
||||
|
@ -145,7 +145,7 @@
|
|||
- name: get available packages on Ubuntu
|
||||
command: apt-cache policy docker-ce
|
||||
when:
|
||||
- docker_task_result|failed
|
||||
- docker_task_result is failed
|
||||
- ansible_distribution == 'Ubuntu'
|
||||
register: available_packages
|
||||
|
||||
|
@ -153,7 +153,7 @@
|
|||
fail:
|
||||
msg: "{{available_packages}}"
|
||||
when:
|
||||
- docker_task_result|failed
|
||||
- docker_task_result is failed
|
||||
- ansible_distribution == 'Ubuntu'
|
||||
|
||||
# This is required to ensure any apt upgrade will not break kubernetes
|
||||
|
@ -185,7 +185,7 @@
|
|||
when: >
|
||||
dns_mode != 'none' and
|
||||
resolvconf_mode == 'docker_dns' and
|
||||
installed_docker_version.stdout|version_compare('1.12', '<')
|
||||
installed_docker_version.stdout is version('1.12', '<')
|
||||
|
||||
- name: Set docker systemd config
|
||||
import_tasks: systemd.yml
|
||||
|
|
|
@ -21,7 +21,7 @@ Environment=GOTRACEBACK=crash
|
|||
ExecReload=/bin/kill -s HUP $MAINPID
|
||||
Delegate=yes
|
||||
KillMode=process
|
||||
ExecStart={{ docker_bin_dir }}/docker{% if installed_docker_version.stdout|version_compare('17.03', '<') %} daemon{% else %}d{% endif %} \
|
||||
ExecStart={{ docker_bin_dir }}/docker{% if installed_docker_version.stdout is version('17.03', '<') %} daemon{% else %}d{% endif %} \
|
||||
{% if ansible_os_family == "Suse" %}
|
||||
--containerd /run/containerd/containerd.sock --add-runtime oci=/usr/bin/docker-runc \
|
||||
{% endif %}
|
||||
|
|
|
@ -41,7 +41,7 @@ docker_pkgs:
|
|||
docker_package_info:
|
||||
pkg_mgr: yum
|
||||
pkgs: |-
|
||||
{%- if docker_version | version_compare('17.04', '<') -%}
|
||||
{%- if docker_version is version('17.04', '<') -%}
|
||||
{{ docker_pkgs_use_docker_ce }}
|
||||
{%- else -%}
|
||||
{{ docker_pkgs }}
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
deb: "{{ rkt_download_url }}/{{ rkt_pkg_name }}"
|
||||
state: present
|
||||
register: rkt_task_result
|
||||
until: rkt_task_result|succeeded
|
||||
until: rkt_task_result is succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when: ansible_os_family == "Debian"
|
||||
|
@ -36,7 +36,7 @@
|
|||
pkg: "{{ rkt_download_url }}/{{ rkt_pkg_name }}"
|
||||
state: present
|
||||
register: rkt_task_result
|
||||
until: rkt_task_result|succeeded
|
||||
until: rkt_task_result is succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when:
|
||||
|
@ -48,7 +48,7 @@
|
|||
name: "{{ rkt_download_url }}/{{ rkt_pkg_name }}"
|
||||
state: present
|
||||
register: rkt_task_result
|
||||
until: rkt_task_result|succeeded
|
||||
until: rkt_task_result is succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when: ansible_os_family == "Suse"
|
||||
|
|
|
@ -31,7 +31,7 @@ spec:
|
|||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-cluster-critical
|
||||
{% endif %}
|
||||
serviceAccountName: dnsmasq
|
||||
|
|
|
@ -21,7 +21,7 @@ spec:
|
|||
kubernetes.io/cluster-service: "true"
|
||||
kubespray/dnsmasq-checksum: "{{ dnsmasq_stat.stat.checksum }}"
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-cluster-critical
|
||||
{% endif %}
|
||||
tolerations:
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
- name: container_download | Download containers if pull is required or told to always pull (delegate)
|
||||
command: "{{ docker_bin_dir }}/docker pull {{ pull_args }}"
|
||||
register: pull_task_result
|
||||
until: pull_task_result|succeeded
|
||||
until: pull_task_result is succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when:
|
||||
|
@ -30,7 +30,7 @@
|
|||
- name: container_download | Download containers if pull is required or told to always pull (all nodes)
|
||||
command: "{{ docker_bin_dir }}/docker pull {{ pull_args }}"
|
||||
register: pull_task_result
|
||||
until: pull_task_result|succeeded
|
||||
until: pull_task_result is succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when:
|
||||
|
|
|
@ -99,7 +99,7 @@
|
|||
delegate_facts: no
|
||||
become: false
|
||||
register: get_task
|
||||
until: get_task|succeeded
|
||||
until: get_task is succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when:
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
when:
|
||||
- dns_mode in ['kubedns', 'dnsmasq_kubedns']
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
- rbac_enabled and kubedns_version|version_compare("1.11.0", "<", strict=True)
|
||||
- rbac_enabled and kubedns_version is version("1.11.0", "<", strict=True)
|
||||
tags:
|
||||
- dnsmasq
|
||||
- kubedns
|
||||
|
|
|
@ -52,9 +52,9 @@
|
|||
when:
|
||||
- dns_mode != 'none'
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
- not item|skipped
|
||||
- not item is skipped
|
||||
register: resource_result
|
||||
until: resource_result|succeeded
|
||||
until: resource_result is succeeded
|
||||
retries: 4
|
||||
delay: 5
|
||||
tags:
|
||||
|
|
|
@ -68,4 +68,4 @@
|
|||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||
state: "latest"
|
||||
with_items: "{{ manifests.results }}"
|
||||
when: inventory_hostname == groups['kube-master'][0] and not item|skipped
|
||||
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
|
||||
|
|
|
@ -24,7 +24,7 @@ spec:
|
|||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-cluster-critical
|
||||
{% endif %}
|
||||
serviceAccountName: coredns
|
||||
|
|
|
@ -140,7 +140,7 @@ spec:
|
|||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-cluster-critical
|
||||
{% endif %}
|
||||
containers:
|
||||
|
|
|
@ -28,7 +28,7 @@ spec:
|
|||
labels:
|
||||
k8s-app: kubedns-autoscaler
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-cluster-critical
|
||||
{% endif %}
|
||||
# When having win nodes in cluster without this patch, this pod cloud try to be created in windows
|
||||
|
|
|
@ -27,7 +27,7 @@ spec:
|
|||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-cluster-critical
|
||||
{% endif %}
|
||||
# When having win nodes in cluster without this patch, this pod cloud try to be created in windows
|
||||
|
|
|
@ -12,7 +12,7 @@ spec:
|
|||
labels:
|
||||
app: netchecker-agent
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
|
||||
{% endif %}
|
||||
tolerations:
|
||||
|
|
|
@ -16,10 +16,10 @@ spec:
|
|||
# When having win nodes in cluster without this patch, this pod cloud try to be created in windows
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/os: linux
|
||||
{% if kube_version | version_compare('v1.6', '>=') %}
|
||||
{% if kube_version is version('v1.6', '>=') %}
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
{% endif %}
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
|
||||
{% endif %}
|
||||
tolerations:
|
||||
|
|
|
@ -11,7 +11,7 @@ spec:
|
|||
app: netchecker-server
|
||||
namespace: {{ netcheck_namespace }}
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: {% if netcheck_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
|
||||
{% endif %}
|
||||
containers:
|
||||
|
|
|
@ -49,7 +49,7 @@
|
|||
with_items: "{{ psp_manifests.results }}"
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
- not item|skipped
|
||||
- not item is skipped
|
||||
|
||||
- name: Kubernetes Apps | Add ClusterRoleBinding to admit nodes
|
||||
template:
|
||||
|
@ -130,8 +130,8 @@
|
|||
- rbac_enabled
|
||||
- cloud_provider is defined
|
||||
- cloud_provider == 'vsphere'
|
||||
- kube_version | version_compare('v1.9.0', '>=')
|
||||
- kube_version | version_compare('v1.9.3', '<=')
|
||||
- kube_version is version('v1.9.0', '>=')
|
||||
- kube_version is version('v1.9.3', '<=')
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
tags: vsphere
|
||||
|
||||
|
@ -146,8 +146,8 @@
|
|||
- cloud_provider == 'vsphere'
|
||||
- vsphere_cloud_provider.rc is defined
|
||||
- vsphere_cloud_provider.rc != 0
|
||||
- kube_version | version_compare('v1.9.0', '>=')
|
||||
- kube_version | version_compare('v1.9.3', '<=')
|
||||
- kube_version is version('v1.9.0', '>=')
|
||||
- kube_version is version('v1.9.3', '<=')
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
tags: vsphere
|
||||
|
||||
|
@ -164,8 +164,8 @@
|
|||
- cloud_provider == 'vsphere'
|
||||
- vsphere_cloud_provider.rc is defined
|
||||
- vsphere_cloud_provider.rc != 0
|
||||
- kube_version | version_compare('v1.9.0', '>=')
|
||||
- kube_version | version_compare('v1.9.3', '<=')
|
||||
- kube_version is version('v1.9.0', '>=')
|
||||
- kube_version is version('v1.9.3', '<=')
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
tags: vsphere
|
||||
|
||||
|
@ -178,7 +178,7 @@
|
|||
- name: PriorityClass | Copy k8s-cluster-critical-pc.yml file
|
||||
copy: src=k8s-cluster-critical-pc.yml dest={{ kube_config_dir }}/k8s-cluster-critical-pc.yml
|
||||
when:
|
||||
- kube_version|version_compare('v1.11.1', '>=')
|
||||
- kube_version is version('v1.11.1', '>=')
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: PriorityClass | Create k8s-cluster-critical
|
||||
|
@ -189,5 +189,5 @@
|
|||
filename: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml"
|
||||
state: latest
|
||||
when:
|
||||
- kube_version|version_compare('v1.11.1', '>=')
|
||||
- kube_version is version('v1.11.1', '>=')
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
|
|
|
@ -19,7 +19,7 @@ spec:
|
|||
app: cephfs-provisioner
|
||||
version: {{ cephfs_provisioner_image_tag }}
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: {% if cephfs_provisioner_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
|
||||
{% endif %}
|
||||
serviceAccount: cephfs-provisioner
|
||||
|
|
|
@ -18,7 +18,7 @@ spec:
|
|||
k8s-app: local-volume-provisioner
|
||||
version: {{ local_volume_provisioner_image_tag }}
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: {% if local_volume_provisioner_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
|
||||
{% endif %}
|
||||
serviceAccountName: local-volume-provisioner
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
--upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }}
|
||||
{% if rbac_enabled %} --service-account=tiller{% endif %}
|
||||
{% if tiller_node_selectors is defined %} --node-selectors {{ tiller_node_selectors }}{% endif %}
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %} --override spec.template.spec.priorityClassName={% if tiller_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{% endif %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %} --override spec.template.spec.priorityClassName={% if tiller_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{% endif %}
|
||||
{% if tiller_override is defined and tiller_override != "" %} --override {{ tiller_override }}{% endif %}
|
||||
{% if tiller_max_history is defined %} --history-max={{ tiller_max_history }}{% endif %}
|
||||
{% if tiller_enable_tls %} --tiller-tls --tiller-tls-verify --tiller-tls-cert={{ tiller_tls_cert }} --tiller-tls-key={{ tiller_tls_key }} --tls-ca-cert={{ tiller_tls_ca_cert }} {% endif %}
|
||||
|
@ -65,7 +65,7 @@
|
|||
{% if helm_stable_repo_url is defined %} --stable-repo-url {{ helm_stable_repo_url }}{% endif %}
|
||||
{% if rbac_enabled %} --service-account=tiller{% endif %}
|
||||
{% if tiller_node_selectors is defined %} --node-selectors {{ tiller_node_selectors }}{% endif %}
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %} --override spec.template.spec.priorityClassName={% if tiller_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{% endif %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %} --override spec.template.spec.priorityClassName={% if tiller_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{% endif %}
|
||||
{% if tiller_override is defined and tiller_override != "" %} --override {{ tiller_override }}{% endif %}
|
||||
{% if tiller_max_history is defined %} --history-max={{ tiller_max_history }}{% endif %}
|
||||
{% if tiller_enable_tls %} --tiller-tls --tiller-tls-verify --tiller-tls-cert={{ tiller_tls_cert }} --tiller-tls-key={{ tiller_tls_key }} --tls-ca-cert={{ tiller_tls_ca_cert }} {% endif %}
|
||||
|
@ -74,7 +74,7 @@
|
|||
| {{bin_dir}}/kubectl apply -f -
|
||||
changed_when: false
|
||||
when:
|
||||
- (tiller_override is defined and tiller_override != "") or (kube_version|version_compare('v1.11.1', '>='))
|
||||
- (tiller_override is defined and tiller_override != "") or (kube_version is version('v1.11.1', '>='))
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ spec:
|
|||
release: cert-manager
|
||||
annotations:
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: {% if cert_manager_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
|
||||
{% endif %}
|
||||
serviceAccountName: cert-manager
|
||||
|
|
|
@ -19,7 +19,7 @@ spec:
|
|||
app.kubernetes.io/name: default-backend
|
||||
app.kubernetes.io/part-of: ingress-nginx
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: {% if ingress_nginx_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
|
||||
{% endif %}
|
||||
terminationGracePeriodSeconds: 60
|
||||
|
|
|
@ -29,7 +29,7 @@ spec:
|
|||
nodeSelector:
|
||||
{{ ingress_nginx_nodeselector | to_nice_yaml }}
|
||||
{%- endif %}
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: {% if ingress_nginx_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
|
||||
{% endif %}
|
||||
containers:
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
with_items:
|
||||
- "{{ calico_node_manifests.results }}"
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0] and not item|skipped
|
||||
- inventory_hostname == groups['kube-master'][0] and not item is skipped
|
||||
|
||||
- name: "calico upgrade complete"
|
||||
shell: "{{ bin_dir }}/calico-upgrade complete --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml"
|
||||
|
|
|
@ -8,4 +8,4 @@
|
|||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||
state: "latest"
|
||||
with_items: "{{ canal_manifests.results }}"
|
||||
when: inventory_hostname == groups['kube-master'][0] and not item|skipped
|
||||
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||
state: "latest"
|
||||
with_items: "{{ cilium_node_manifests.results }}"
|
||||
when: inventory_hostname == groups['kube-master'][0] and not item|skipped
|
||||
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
|
||||
|
||||
- name: Cilium | Wait for pods to run
|
||||
command: "{{bin_dir}}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||
state: "latest"
|
||||
with_items: "{{ flannel_node_manifests.results }}"
|
||||
when: inventory_hostname == groups['kube-master'][0] and not item|skipped
|
||||
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
|
||||
|
||||
- name: Flannel | Wait for flannel subnet.env file presence
|
||||
wait_for:
|
||||
|
|
|
@ -34,4 +34,4 @@
|
|||
- "{{ calico_kube_manifests.results }}"
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
- not item|skipped
|
||||
- not item is skipped
|
||||
|
|
|
@ -29,7 +29,7 @@ spec:
|
|||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-cluster-critical
|
||||
{% endif %}
|
||||
containers:
|
||||
|
|
|
@ -21,7 +21,7 @@ spec:
|
|||
kubernetes.io/cluster-service: "true"
|
||||
version: v{{ registry_proxy_image_tag }}
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: {% if registry_namespace == 'kube-system' %}system-node-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
|
||||
{% endif %}
|
||||
serviceAccountName: registry-proxy
|
||||
|
|
|
@ -22,7 +22,7 @@ spec:
|
|||
version: v{{ registry_image_tag }}
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: {% if registry_namespace == 'kube-system' %}system-cluster-critical{% else %}k8s-cluster-critical{% endif %}{{''}}
|
||||
{% endif %}
|
||||
serviceAccountName: registry
|
||||
|
|
|
@ -34,19 +34,19 @@
|
|||
- name: sets kubeadm api version to v1alpha1
|
||||
set_fact:
|
||||
kubeadmConfig_api_version: v1alpha1
|
||||
when: kubeadm_output.stdout|version_compare('v1.11.0', '<')
|
||||
when: kubeadm_output.stdout is version('v1.11.0', '<')
|
||||
|
||||
- name: sets kubeadm api version to v1alpha2
|
||||
set_fact:
|
||||
kubeadmConfig_api_version: v1alpha2
|
||||
when:
|
||||
- kubeadm_output.stdout|version_compare('v1.11.0', '>=')
|
||||
- kubeadm_output.stdout|version_compare('v1.12.0', '<')
|
||||
- kubeadm_output.stdout is version('v1.11.0', '>=')
|
||||
- kubeadm_output.stdout is version('v1.12.0', '<')
|
||||
|
||||
- name: sets kubeadm api version to v1alpha3
|
||||
set_fact:
|
||||
kubeadmConfig_api_version: v1alpha3
|
||||
when: kubeadm_output.stdout|version_compare('v1.12.0', '>=')
|
||||
when: kubeadm_output.stdout is version('v1.12.0', '>=')
|
||||
|
||||
- name: Create kubeadm client config
|
||||
template:
|
||||
|
|
|
@ -80,7 +80,7 @@ kube_apiserver_admission_control:
|
|||
- ServiceAccount
|
||||
- DefaultStorageClass
|
||||
- >-
|
||||
{%- if kube_version | version_compare('v1.9', '<') -%}
|
||||
{%- if kube_version is version('v1.9', '<') -%}
|
||||
GenericAdmissionWebhook
|
||||
{%- else -%}
|
||||
MutatingAdmissionWebhook,ValidatingAdmissionWebhook
|
||||
|
|
|
@ -84,19 +84,19 @@
|
|||
- name: sets kubeadm api version to v1alpha1
|
||||
set_fact:
|
||||
kubeadmConfig_api_version: v1alpha1
|
||||
when: kubeadm_output.stdout|version_compare('v1.11.0', '<')
|
||||
when: kubeadm_output.stdout is version('v1.11.0', '<')
|
||||
|
||||
- name: sets kubeadm api version to v1alpha2
|
||||
set_fact:
|
||||
kubeadmConfig_api_version: v1alpha2
|
||||
when:
|
||||
- kubeadm_output.stdout|version_compare('v1.11.0', '>=')
|
||||
- kubeadm_output.stdout|version_compare('v1.12.0', '<')
|
||||
- kubeadm_output.stdout is version('v1.11.0', '>=')
|
||||
- kubeadm_output.stdout is version('v1.12.0', '<')
|
||||
|
||||
- name: sets kubeadm api version to v1alpha3
|
||||
set_fact:
|
||||
kubeadmConfig_api_version: v1alpha3
|
||||
when: kubeadm_output.stdout|version_compare('v1.12.0', '>=')
|
||||
when: kubeadm_output.stdout is version('v1.12.0', '>=')
|
||||
|
||||
# Nginx LB(default), If kubeadm_config_api_fqdn is defined, use other LB by kubeadm controlPlaneEndpoint.
|
||||
- name: set kubeadm_config_api_fqdn define
|
||||
|
|
|
@ -26,10 +26,10 @@ cloudProvider: {{ cloud_provider }}
|
|||
{% if kube_proxy_mode == 'ipvs' %}
|
||||
kubeProxy:
|
||||
config:
|
||||
{% if kube_version | version_compare('v1.10', '<') %}
|
||||
{% if kube_version is version('v1.10', '<') %}
|
||||
featureGates: SupportIPVSProxyMode=true
|
||||
{% endif %}
|
||||
{% if kube_version | version_compare('v1.10', '>=') %}
|
||||
{% if kube_version is version('v1.10', '>=') %}
|
||||
featureGates:
|
||||
SupportIPVSProxyMode: true
|
||||
{% endif %}
|
||||
|
@ -49,7 +49,7 @@ apiServerExtraArgs:
|
|||
insecure-bind-address: {{ kube_apiserver_insecure_bind_address }}
|
||||
{% endif %}
|
||||
insecure-port: "{{ kube_apiserver_insecure_port }}"
|
||||
{% if kube_version | version_compare('v1.10', '<') %}
|
||||
{% if kube_version is version('v1.10', '<') %}
|
||||
admission-control: {{ kube_apiserver_admission_control | join(',') }}
|
||||
{% else %}
|
||||
{% if kube_apiserver_enable_admission_plugins|length > 0 %}
|
||||
|
@ -60,7 +60,7 @@ apiServerExtraArgs:
|
|||
{% endif %}
|
||||
{% endif %}
|
||||
apiserver-count: "{{ kube_apiserver_count }}"
|
||||
{% if kube_version | version_compare('v1.9', '>=') %}
|
||||
{% if kube_version is version('v1.9', '>=') %}
|
||||
endpoint-reconciler-type: lease
|
||||
{% endif %}
|
||||
{% if etcd_events_cluster_enabled %}
|
||||
|
@ -72,7 +72,7 @@ apiServerExtraArgs:
|
|||
request-timeout: "{{ kube_apiserver_request_timeout }}"
|
||||
repair-malformed-updates: "false"
|
||||
enable-aggregator-routing: "{{ kube_api_aggregator_routing }}"
|
||||
{% if kube_api_anonymous_auth is defined and kube_version | version_compare('v1.5', '>=') %}
|
||||
{% if kube_api_anonymous_auth is defined and kube_version is version('v1.5', '>=') %}
|
||||
anonymous-auth: "{{ kube_api_anonymous_auth }}"
|
||||
{% endif %}
|
||||
{% if kube_basic_auth|default(true) %}
|
||||
|
|
|
@ -41,7 +41,7 @@ apiServerExtraArgs:
|
|||
insecure-bind-address: {{ kube_apiserver_insecure_bind_address }}
|
||||
{% endif %}
|
||||
insecure-port: "{{ kube_apiserver_insecure_port }}"
|
||||
{% if kube_version | version_compare('v1.10', '<') %}
|
||||
{% if kube_version is version('v1.10', '<') %}
|
||||
admission-control: {{ kube_apiserver_admission_control | join(',') }}
|
||||
{% else %}
|
||||
{% if kube_apiserver_enable_admission_plugins|length > 0 %}
|
||||
|
@ -52,7 +52,7 @@ apiServerExtraArgs:
|
|||
{% endif %}
|
||||
{% endif %}
|
||||
apiserver-count: "{{ kube_apiserver_count }}"
|
||||
{% if kube_version | version_compare('v1.9', '>=') %}
|
||||
{% if kube_version is version('v1.9', '>=') %}
|
||||
endpoint-reconciler-type: lease
|
||||
{% endif %}
|
||||
{% if etcd_events_cluster_enabled %}
|
||||
|
@ -64,7 +64,7 @@ apiServerExtraArgs:
|
|||
request-timeout: "{{ kube_apiserver_request_timeout }}"
|
||||
repair-malformed-updates: "false"
|
||||
enable-aggregator-routing: "{{ kube_api_aggregator_routing }}"
|
||||
{% if kube_api_anonymous_auth is defined and kube_version | version_compare('v1.5', '>=') %}
|
||||
{% if kube_api_anonymous_auth is defined and kube_version is version('v1.5', '>=') %}
|
||||
anonymous-auth: "{{ kube_api_anonymous_auth }}"
|
||||
{% endif %}
|
||||
{% if kube_basic_auth|default(true) %}
|
||||
|
|
|
@ -44,7 +44,7 @@ apiServerExtraArgs:
|
|||
insecure-bind-address: {{ kube_apiserver_insecure_bind_address }}
|
||||
{% endif %}
|
||||
insecure-port: "{{ kube_apiserver_insecure_port }}"
|
||||
{% if kube_version | version_compare('v1.10', '<') %}
|
||||
{% if kube_version is version('v1.10', '<') %}
|
||||
admission-control: {{ kube_apiserver_admission_control | join(',') }}
|
||||
{% else %}
|
||||
{% if kube_apiserver_enable_admission_plugins|length > 0 %}
|
||||
|
@ -55,7 +55,7 @@ apiServerExtraArgs:
|
|||
{% endif %}
|
||||
{% endif %}
|
||||
apiserver-count: "{{ kube_apiserver_count }}"
|
||||
{% if kube_version | version_compare('v1.9', '>=') %}
|
||||
{% if kube_version is version('v1.9', '>=') %}
|
||||
endpoint-reconciler-type: lease
|
||||
{% endif %}
|
||||
{% if etcd_events_cluster_enabled %}
|
||||
|
|
|
@ -11,10 +11,10 @@ metadata:
|
|||
kubespray.apiserver-cert/serial: "{{ apiserver_cert_serial }}"
|
||||
spec:
|
||||
hostNetwork: true
|
||||
{% if kube_version | version_compare('v1.6', '>=') %}
|
||||
{% if kube_version is version('v1.6', '>=') %}
|
||||
dnsPolicy: ClusterFirst
|
||||
{% endif %}
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-node-critical
|
||||
{% endif %}
|
||||
containers:
|
||||
|
@ -43,7 +43,7 @@ spec:
|
|||
{% if etcd_events_cluster_enabled %}
|
||||
- --etcd-servers-overrides=/events#{{ etcd_events_access_addresses_semicolon }}
|
||||
{% endif %}
|
||||
{% if kube_version | version_compare('v1.9', '<') %}
|
||||
{% if kube_version is version('v1.9', '<') %}
|
||||
- --etcd-quorum-read=true
|
||||
{% endif %}
|
||||
- --etcd-cafile={{ etcd_cert_dir }}/ca.pem
|
||||
|
@ -54,10 +54,10 @@ spec:
|
|||
{% endif %}
|
||||
- --bind-address={{ kube_apiserver_bind_address }}
|
||||
- --apiserver-count={{ kube_apiserver_count }}
|
||||
{% if kube_version | version_compare('v1.9', '>=') %}
|
||||
{% if kube_version is version('v1.9', '>=') %}
|
||||
- --endpoint-reconciler-type=lease
|
||||
{% endif %}
|
||||
{% if kube_version | version_compare('v1.10', '<') %}
|
||||
{% if kube_version is version('v1.10', '<') %}
|
||||
- --admission-control={{ kube_apiserver_admission_control | join(',') }}
|
||||
{% else %}
|
||||
{% if kube_apiserver_enable_admission_plugins|length > 0 %}
|
||||
|
@ -114,7 +114,7 @@ spec:
|
|||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if enable_network_policy %}
|
||||
{% if kube_version | version_compare('v1.8', '<') %}
|
||||
{% if kube_version is version('v1.8', '<') %}
|
||||
- --runtime-config=extensions/v1beta1/networkpolicies=true
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
@ -124,7 +124,7 @@ spec:
|
|||
- --cloud-provider={{ cloud_provider }}
|
||||
- --cloud-config={{ kube_config_dir }}/cloud_config
|
||||
{% endif %}
|
||||
{% if kube_api_anonymous_auth is defined and kube_version | version_compare('v1.5', '>=') %}
|
||||
{% if kube_api_anonymous_auth is defined and kube_version is version('v1.5', '>=') %}
|
||||
- --anonymous-auth={{ kube_api_anonymous_auth }}
|
||||
{% endif %}
|
||||
{% if authorization_modes %}
|
||||
|
@ -136,7 +136,7 @@ spec:
|
|||
{% if kube_feature_gates %}
|
||||
- --feature-gates={{ kube_feature_gates|join(',') }}
|
||||
{% endif %}
|
||||
{% if kube_version | version_compare('v1.9', '>=') %}
|
||||
{% if kube_version is version('v1.9', '>=') %}
|
||||
- --requestheader-client-ca-file={{ kube_cert_dir }}/{{ kube_front_proxy_ca }}
|
||||
{# FIXME(mattymo): Vault certs do not work with front-proxy-client #}
|
||||
{% if cert_management == "vault" %}
|
||||
|
|
|
@ -10,10 +10,10 @@ metadata:
|
|||
kubespray.controller-manager-cert/serial: "{{ controller_manager_cert_serial }}"
|
||||
spec:
|
||||
hostNetwork: true
|
||||
{% if kube_version | version_compare('v1.6', '>=') %}
|
||||
{% if kube_version is version('v1.6', '>=') %}
|
||||
dnsPolicy: ClusterFirst
|
||||
{% endif %}
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-node-critical
|
||||
{% endif %}
|
||||
containers:
|
||||
|
|
|
@ -9,10 +9,10 @@ metadata:
|
|||
kubespray.scheduler-cert/serial: "{{ scheduler_cert_serial }}"
|
||||
spec:
|
||||
hostNetwork: true
|
||||
{% if kube_version | version_compare('v1.6', '>=') %}
|
||||
{% if kube_version is version('v1.6', '>=') %}
|
||||
dnsPolicy: ClusterFirst
|
||||
{% endif %}
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-node-critical
|
||||
{% endif %}
|
||||
containers:
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
[Global]
|
||||
{% if kube_version | version_compare('v1.6', '>=') %}
|
||||
{% if kube_version is version('v1.6', '>=') %}
|
||||
zone={{ aws_zone|default("") }}
|
||||
vpc={{ aws_vpc|default("") }}
|
||||
subnetId={{ aws_subnet_id|default("") }}
|
||||
routeTableId={{ aws_route_table_id|default("") }}
|
||||
{% if kube_version | version_compare('v1.10', '>=') %}
|
||||
{% if kube_version is version('v1.10', '>=') %}
|
||||
roleArn={{ aws_role_arn|default("") }}
|
||||
{% endif %}
|
||||
kubernetesClusterTag={{ aws_kubernetes_cluster_tag|default("") }}
|
||||
kubernetesClusterId={{ aws_kubernetes_cluster_id|default("") }}
|
||||
disableSecurityGroupIngress={{ "true" if aws_disable_security_group_ingress|default(False) else "false" }}
|
||||
disableStrictZoneCheck={{ "true" if aws_disable_strict_zone_check|default(False) else "false" }}
|
||||
{% if kube_version | version_compare('v1.7', '>=') %}
|
||||
{% if kube_version is version('v1.7', '>=') %}
|
||||
elbSecurityGroup={{ aws_elb_security_group|default("") }}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
|
|
@ -17,7 +17,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
|
|||
{# start kubeadm specific settings #}
|
||||
--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \
|
||||
--kubeconfig={{ kube_config_dir }}/kubelet.conf \
|
||||
{% if kube_version | version_compare('v1.8', '<') %}
|
||||
{% if kube_version is version('v1.8', '<') %}
|
||||
--require-kubeconfig \
|
||||
{% endif %}
|
||||
{% if kubelet_authentication_token_webhook %}
|
||||
|
@ -29,7 +29,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
|
|||
--enforce-node-allocatable={{ kubelet_enforce_node_allocatable }} \
|
||||
--client-ca-file={{ kube_cert_dir }}/ca.crt \
|
||||
--pod-manifest-path={{ kube_manifest_dir }} \
|
||||
{% if kube_version | version_compare('v1.12.0', '<') %}
|
||||
{% if kube_version is version('v1.12.0', '<') %}
|
||||
--cadvisor-port={{ kube_cadvisor_port }} \
|
||||
{% endif %}
|
||||
{# end kubeadm specific settings #}
|
||||
|
@ -37,7 +37,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
|
|||
--node-status-update-frequency={{ kubelet_status_update_frequency }} \
|
||||
--cgroup-driver={{ kubelet_cgroup_driver|default(kubelet_cgroup_driver_detected) }} \
|
||||
--max-pods={{ kubelet_max_pods }} \
|
||||
{% if container_manager == 'docker' and kube_version | version_compare('v1.12.0', '<') %}
|
||||
{% if container_manager == 'docker' and kube_version is version('v1.12.0', '<') %}
|
||||
--docker-disable-shared-pid={{ kubelet_disable_shared_pid }} \
|
||||
{% endif %}
|
||||
{% if container_manager == 'crio' %}
|
||||
|
@ -46,7 +46,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
|
|||
{% endif %}
|
||||
--anonymous-auth=false \
|
||||
--read-only-port={{ kube_read_only_port }} \
|
||||
{% if kube_version | version_compare('v1.8', '<') %}
|
||||
{% if kube_version is version('v1.8', '<') %}
|
||||
--experimental-fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \
|
||||
{% else %}
|
||||
--fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \
|
||||
|
|
|
@ -12,12 +12,12 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
|
|||
{# Base kubelet args #}
|
||||
{% set kubelet_args_base %}
|
||||
--pod-manifest-path={{ kube_manifest_dir }} \
|
||||
{% if kube_version | version_compare('v1.12.0', '<') %}
|
||||
{% if kube_version is version('v1.12.0', '<') %}
|
||||
--cadvisor-port={{ kube_cadvisor_port }} \
|
||||
{% endif %}
|
||||
--pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }} \
|
||||
--node-status-update-frequency={{ kubelet_status_update_frequency }} \
|
||||
{% if container_manager == 'docker' and kube_version | version_compare('v1.12.0', '<') %}
|
||||
{% if container_manager == 'docker' and kube_version is version('v1.12.0', '<') %}
|
||||
--docker-disable-shared-pid={{ kubelet_disable_shared_pid }} \
|
||||
{% endif %}
|
||||
--client-ca-file={{ kube_cert_dir }}/ca.pem \
|
||||
|
@ -25,9 +25,9 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
|
|||
--tls-private-key-file={{ kube_cert_dir }}/node-{{ inventory_hostname }}-key.pem \
|
||||
--anonymous-auth=false \
|
||||
--read-only-port={{ kube_read_only_port }} \
|
||||
{% if kube_version | version_compare('v1.6', '>=') %}
|
||||
{% if kube_version is version('v1.6', '>=') %}
|
||||
{# flag got removed with 1.7.0 #}
|
||||
{% if kube_version | version_compare('v1.7', '<') %}
|
||||
{% if kube_version is version('v1.7', '<') %}
|
||||
--enable-cri={{ kubelet_enable_cri }} \
|
||||
{% endif %}
|
||||
{% if container_manager == 'crio' %}
|
||||
|
@ -37,7 +37,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
|
|||
--cgroup-driver={{ kubelet_cgroup_driver|default(kubelet_cgroup_driver_detected) }} \
|
||||
--cgroups-per-qos={{ kubelet_cgroups_per_qos }} \
|
||||
--max-pods={{ kubelet_max_pods }} \
|
||||
{% if kube_version | version_compare('v1.8', '<') %}
|
||||
{% if kube_version is version('v1.8', '<') %}
|
||||
--experimental-fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \
|
||||
{% else %}
|
||||
--fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \
|
||||
|
@ -68,7 +68,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
|
|||
{% set kubelet_args_dns %}{{ kubelet_args_cluster_dns }} --cluster-domain={{ dns_domain }} --resolv-conf={{ kube_resolv_conf }}{% endset %}
|
||||
|
||||
{# Location of the apiserver #}
|
||||
{% if kube_version | version_compare('v1.8', '<') %}
|
||||
{% if kube_version is version('v1.8', '<') %}
|
||||
{% set kubelet_args_kubeconfig %}--kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --require-kubeconfig{% endset %}
|
||||
{% else %}
|
||||
{% set kubelet_args_kubeconfig %}--kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml{% endset %}
|
||||
|
@ -76,7 +76,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
|
|||
|
||||
{% if standalone_kubelet|bool %}
|
||||
{# We are on a master-only host. Make the master unschedulable in this case. #}
|
||||
{% if kube_version | version_compare('v1.6', '>=') %}
|
||||
{% if kube_version is version('v1.6', '>=') %}
|
||||
{# Set taints on the master so that it's unschedulable by default. Use node-role.kubernetes.io/master taint like kubeadm. #}
|
||||
{% set kubelet_args_kubeconfig %}{{ kubelet_args_kubeconfig }} --register-with-taints=node-role.kubernetes.io/master=:NoSchedule{% endset %}
|
||||
{% else %}
|
||||
|
|
|
@ -9,13 +9,13 @@ metadata:
|
|||
kubespray.kube-proxy-cert/serial: "{{ kube_proxy_cert_serial }}"
|
||||
spec:
|
||||
hostNetwork: true
|
||||
{% if kube_version | version_compare('v1.6', '>=') %}
|
||||
{% if kube_version is version('v1.6', '>=') %}
|
||||
dnsPolicy: ClusterFirst
|
||||
{% endif %}
|
||||
# When having win nodes in cluster without this patch, this pod cloud try to be created in windows
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/os: linux
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-node-critical
|
||||
{% endif %}
|
||||
containers:
|
||||
|
@ -56,7 +56,7 @@ spec:
|
|||
- --masquerade-all
|
||||
{% elif kube_proxy_mode == 'ipvs' %}
|
||||
- --masquerade-all
|
||||
{% if kube_version | version_compare('v1.10', '<') %}
|
||||
{% if kube_version is version('v1.10', '<') %}
|
||||
- --feature-gates=SupportIPVSProxyMode=true
|
||||
{% endif %}
|
||||
- --ipvs-min-sync-period=5s
|
||||
|
|
|
@ -10,7 +10,7 @@ spec:
|
|||
# When having win nodes in cluster without this patch, this pod cloud try to be created in windows
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/os: linux
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-node-critical
|
||||
{% endif %}
|
||||
containers:
|
||||
|
|
|
@ -4,7 +4,7 @@ password = "{{ vsphere_password }}"
|
|||
port = {{ vsphere_vcenter_port }}
|
||||
insecure-flag = {{ vsphere_insecure }}
|
||||
|
||||
{% if kube_version | version_compare('v1.9.2', '>=') %}
|
||||
{% if kube_version is version('v1.9.2', '>=') %}
|
||||
datacenters = "{{ vsphere_datacenter }}"
|
||||
{% else %}
|
||||
datastore = "{{ vsphere_datastore }}"
|
||||
|
@ -19,7 +19,7 @@ vm-name = "{{ vsphere_vm_name }}"
|
|||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% if kube_version | version_compare('v1.9.2', '>=') %}
|
||||
{% if kube_version is version('v1.9.2', '>=') %}
|
||||
|
||||
[VirtualCenter "{{ vsphere_vcenter_ip }}"]
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
- name: Stop if ansible version is too low
|
||||
assert:
|
||||
that:
|
||||
- ansible_version.full|version_compare('2.3.0', '>=')
|
||||
- ansible_version.full is version('2.3.0', '>=')
|
||||
run_once: yes
|
||||
|
||||
- name: Stop if either kube-master, kube-node or etcd is empty
|
||||
|
@ -114,7 +114,7 @@
|
|||
|
||||
- name: Stop if kernel version is too low
|
||||
assert:
|
||||
that: ansible_kernel.split('-')[0]|version_compare('4.8', '>=')
|
||||
that: ansible_kernel.split('-')[0] is version('4.8', '>=')
|
||||
when: kube_network_plugin == 'cilium'
|
||||
ignore_errors: "{{ ignore_assert_errors }}"
|
||||
|
||||
|
@ -146,7 +146,7 @@
|
|||
- name: "Check that calico version is enought for upgrade"
|
||||
assert:
|
||||
that:
|
||||
- calico_version_on_server.stdout|version_compare('v2.6.5', '>=')
|
||||
- calico_version_on_server.stdout is version('v2.6.5', '>=')
|
||||
msg: "Your version of calico is not fresh enough for upgrade. Minimum version v2.6.5"
|
||||
when:
|
||||
- 'calico_version_on_server.stdout is defined'
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
update_cache: yes
|
||||
name: '*'
|
||||
register: yum_task_result
|
||||
until: yum_task_result|succeeded
|
||||
until: yum_task_result is succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when:
|
||||
|
@ -15,7 +15,7 @@
|
|||
- name: Expire management cache (YUM) for Updation - Redhat
|
||||
shell: yum clean expire-cache
|
||||
register: expire_cache_output
|
||||
until: expire_cache_output|succeeded
|
||||
until: expire_cache_output is succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when:
|
||||
|
@ -27,7 +27,7 @@
|
|||
- name: Update package management cache (YUM) - Redhat
|
||||
shell: yum makecache
|
||||
register: make_cache_output
|
||||
until: make_cache_output|succeeded
|
||||
until: make_cache_output is succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when:
|
||||
|
@ -40,7 +40,7 @@
|
|||
- name: Update package management cache (zypper) - SUSE
|
||||
shell: zypper -n --gpg-auto-import-keys ref
|
||||
register: make_cache_output
|
||||
until: make_cache_output|succeeded
|
||||
until: make_cache_output is succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when:
|
||||
|
@ -58,7 +58,7 @@
|
|||
- name: Install python-dnf for latest RedHat versions
|
||||
command: dnf install -y python-dnf yum
|
||||
register: dnf_task_result
|
||||
until: dnf_task_result|succeeded
|
||||
until: dnf_task_result is succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when:
|
||||
|
@ -86,7 +86,7 @@
|
|||
name: "{{ item }}"
|
||||
state: latest
|
||||
register: pkgs_task_result
|
||||
until: pkgs_task_result|succeeded
|
||||
until: pkgs_task_result is succeeded
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
with_items: "{{required_pkgs | default([]) | union(common_required_pkgs|default([]))}}"
|
||||
|
|
|
@ -296,7 +296,7 @@ feature_gate_v1_12: []
|
|||
## List of key=value pairs that describe feature gates for
|
||||
## the k8s cluster.
|
||||
kube_feature_gates: |-
|
||||
{%- if kube_version | version_compare('v1.12.0', '<') -%}
|
||||
{%- if kube_version is version('v1.12.0', '<') -%}
|
||||
{{ feature_gate_v1_11 }}
|
||||
{%- else -%}
|
||||
{{ feature_gate_v1_12 }}
|
||||
|
|
|
@ -63,7 +63,7 @@
|
|||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
delegate_to: "{{groups['etcd'][0]}}"
|
||||
when:
|
||||
- calico_version | version_compare("v3.0.0", ">=")
|
||||
- calico_version is version("v3.0.0", ">=")
|
||||
|
||||
- name: Calico-rr | Configure route reflector (legacy)
|
||||
command: |-
|
||||
|
@ -81,7 +81,7 @@
|
|||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
delegate_to: "{{groups['etcd'][0]}}"
|
||||
when:
|
||||
- calico_version | version_compare("v3.0.0", "<")
|
||||
- calico_version is version("v3.0.0", "<")
|
||||
|
||||
- meta: flush_handlers
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
- name: "Check that calico version is enough for upgrade"
|
||||
assert:
|
||||
that:
|
||||
- calico_version_on_server.stdout|version_compare('v2.6.5', '>=')
|
||||
- calico_version_on_server.stdout is version('v2.6.5', '>=')
|
||||
msg: "Your version of calico is not fresh enough for upgrade"
|
||||
when: calico_upgrade_enabled
|
||||
|
||||
|
@ -28,8 +28,8 @@
|
|||
set_fact:
|
||||
calico_upgrade_needed: True
|
||||
when:
|
||||
- calico_version_on_server.stdout|version_compare('v2.6.5', '>=')
|
||||
- calico_version_on_server.stdout|version_compare('v3.0.0', '<')
|
||||
- calico_version_on_server.stdout is version('v2.6.5', '>=')
|
||||
- calico_version_on_server.stdout is version('v3.0.0', '<')
|
||||
|
||||
when:
|
||||
- 'calico_version_on_server.stdout is defined'
|
||||
|
|
|
@ -102,7 +102,7 @@
|
|||
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||
when:
|
||||
- 'calico_conf.stdout == "0"'
|
||||
- calico_version | version_compare("v3.0.0", ">=")
|
||||
- calico_version is version("v3.0.0", ">=")
|
||||
|
||||
- name: Calico | Configure calico network pool (legacy)
|
||||
shell: >
|
||||
|
@ -119,7 +119,7 @@
|
|||
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||
when:
|
||||
- 'calico_conf.stdout == "0"'
|
||||
- calico_version | version_compare("v3.0.0", "<")
|
||||
- calico_version is version("v3.0.0", "<")
|
||||
|
||||
- name: "Determine nodeToNodeMesh needed state"
|
||||
set_fact:
|
||||
|
@ -144,19 +144,19 @@
|
|||
run_once: true
|
||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||
when:
|
||||
- calico_version | version_compare('v3.0.0', '>=')
|
||||
- calico_version is version('v3.0.0', '>=')
|
||||
|
||||
- name: Calico | Set global as_num (legacy)
|
||||
command: "{{ bin_dir}}/calicoctl config set asNumber {{ global_as_num }}"
|
||||
run_once: true
|
||||
when:
|
||||
- calico_version | version_compare('v3.0.0', '<')
|
||||
- calico_version is version('v3.0.0', '<')
|
||||
|
||||
- name: Calico | Disable node mesh (legacy)
|
||||
command: "{{ bin_dir }}/calicoctl config set nodeToNodeMesh off"
|
||||
run_once: yes
|
||||
when:
|
||||
- calico_version | version_compare('v3.0.0', '<')
|
||||
- calico_version is version('v3.0.0', '<')
|
||||
- nodeToMeshEnabled|default(True)
|
||||
|
||||
- name: Calico | Configure node asNumber for per node peering
|
||||
|
@ -176,7 +176,7 @@
|
|||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when:
|
||||
- calico_version | version_compare('v3.0.0', '>=')
|
||||
- calico_version is version('v3.0.0', '>=')
|
||||
- peer_with_router|default(false)
|
||||
- inventory_hostname in groups['k8s-cluster']
|
||||
- local_as is defined
|
||||
|
@ -199,7 +199,7 @@
|
|||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when:
|
||||
- calico_version | version_compare('v3.0.0', '<')
|
||||
- calico_version is version('v3.0.0', '<')
|
||||
- peer_with_router|default(false)
|
||||
- inventory_hostname in groups['k8s-cluster']
|
||||
- local_as is defined
|
||||
|
@ -223,7 +223,7 @@
|
|||
with_items:
|
||||
- "{{ peers|rejectattr('scope','equalto', 'global')|default([]) }}"
|
||||
when:
|
||||
- calico_version | version_compare('v3.0.0', '>=')
|
||||
- calico_version is version('v3.0.0', '>=')
|
||||
- peer_with_router|default(false)
|
||||
- inventory_hostname in groups['k8s-cluster']
|
||||
|
||||
|
@ -280,7 +280,7 @@
|
|||
with_items: "{{ peers|selectattr('scope','equalto', 'global')|default([]) }}"
|
||||
run_once: true
|
||||
when:
|
||||
- calico_version | version_compare('v3.0.0', '<')
|
||||
- calico_version is version('v3.0.0', '<')
|
||||
- peer_with_router|default(false)
|
||||
- inventory_hostname in groups['k8s-cluster']
|
||||
|
||||
|
@ -302,7 +302,7 @@
|
|||
with_items:
|
||||
- "{{ groups['calico-rr'] | default([]) }}"
|
||||
when:
|
||||
- calico_version | version_compare('v3.0.0', '>=')
|
||||
- calico_version is version('v3.0.0', '>=')
|
||||
- peer_with_calico_rr|default(false)
|
||||
- inventory_hostname in groups['k8s-cluster']
|
||||
- hostvars[item]['cluster_id'] == cluster_id
|
||||
|
@ -322,7 +322,7 @@
|
|||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
with_items: "{{ groups['calico-rr'] | default([]) }}"
|
||||
when:
|
||||
- calico_version | version_compare('v3.0.0', '<')
|
||||
- calico_version is version('v3.0.0', '<')
|
||||
- not calico_upgrade_enabled
|
||||
- peer_with_calico_rr|default(false)
|
||||
- hostvars[item]['cluster_id'] == cluster_id
|
||||
|
|
|
@ -22,7 +22,7 @@ spec:
|
|||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
kubespray.etcd-cert/serial: "{{ etcd_client_cert_serial }}"
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-node-critical
|
||||
{% endif %}
|
||||
hostNetwork: true
|
||||
|
|
|
@ -18,7 +18,7 @@ spec:
|
|||
labels:
|
||||
k8s-app: canal-node
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-node-critical
|
||||
{% endif %}
|
||||
hostNetwork: true
|
||||
|
|
|
@ -32,7 +32,7 @@ spec:
|
|||
prometheus.io/port: "9090"
|
||||
{% endif %}
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-node-critical
|
||||
{% endif %}
|
||||
serviceAccountName: cilium
|
||||
|
|
|
@ -19,7 +19,7 @@ spec:
|
|||
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-node-critical
|
||||
{% endif %}
|
||||
# The API proxy must run in the host network namespace so that
|
||||
|
|
|
@ -18,7 +18,7 @@ spec:
|
|||
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-node-critical
|
||||
{% endif %}
|
||||
hostNetwork: true
|
||||
|
|
|
@ -17,7 +17,7 @@ spec:
|
|||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-node-critical
|
||||
{% endif %}
|
||||
hostNetwork: true
|
||||
|
|
|
@ -17,7 +17,7 @@ spec:
|
|||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-node-critical
|
||||
{% endif %}
|
||||
hostNetwork: true
|
||||
|
|
|
@ -19,7 +19,7 @@ spec:
|
|||
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-node-critical
|
||||
{% endif %}
|
||||
# The netmaster must run in the host network namespace so that
|
||||
|
|
|
@ -23,7 +23,7 @@ spec:
|
|||
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-node-critical
|
||||
{% endif %}
|
||||
hostNetwork: true
|
||||
|
|
|
@ -20,7 +20,7 @@ spec:
|
|||
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-node-critical
|
||||
{% endif %}
|
||||
hostNetwork: true
|
||||
|
|
|
@ -55,7 +55,7 @@ spec:
|
|||
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-node-critical
|
||||
{% endif %}
|
||||
serviceAccountName: flannel
|
||||
|
|
|
@ -63,7 +63,7 @@ spec:
|
|||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-cluster-critical
|
||||
{% endif %}
|
||||
serviceAccountName: kube-router
|
||||
|
|
|
@ -118,7 +118,7 @@ items:
|
|||
# Mark pod as critical for rescheduling (Will have no effect starting with kubernetes 1.12)
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
{% if kube_version|version_compare('v1.11.1', '>=') %}
|
||||
{% if kube_version is version('v1.11.1', '>=') %}
|
||||
priorityClassName: system-node-critical
|
||||
{% endif %}
|
||||
containers:
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
|
||||
- name: Ensure minimum version for drain label selector if necessary
|
||||
assert:
|
||||
that: "kubectl_version.stdout.split(' ')[-1] | version_compare('v1.10.0', '>=')"
|
||||
that: "kubectl_version.stdout.split(' ')[-1] is version('v1.10.0', '>=')"
|
||||
when:
|
||||
- drain_nodes
|
||||
- needs_cordoning
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
headers: "{{ vault_client_headers }}"
|
||||
status_code: "{{ vault_successful_http_codes | join(',') }}"
|
||||
register: vault_health_check
|
||||
until: vault_health_check|succeeded
|
||||
until: vault_health_check is succeeded
|
||||
retries: 10
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
run_once: yes
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
url: "http://localhost:{{ vault_port }}/"
|
||||
secret_shares: 1
|
||||
secret_threshold: 1
|
||||
until: "vault_temp_init|succeeded"
|
||||
until: "vault_temp_init is succeeded"
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
register: vault_temp_init
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
shell: docker stop {{ vault_temp_container_name }} || rkt stop {{ vault_temp_container_name }}
|
||||
failed_when: false
|
||||
register: vault_temp_stop
|
||||
changed_when: vault_temp_stop|succeeded
|
||||
changed_when: vault_temp_stop is succeeded
|
||||
|
||||
# Check if vault is reachable on the localhost
|
||||
- name: check_vault | Attempt to pull local https Vault health
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
method: HEAD
|
||||
status_code: 200,429,501,503
|
||||
register: vault_leader_check
|
||||
until: "vault_leader_check|succeeded"
|
||||
until: "vault_leader_check is succeeded"
|
||||
retries: 10
|
||||
|
||||
- name: find_leader | Set fact for current http leader
|
||||
|
|
|
@ -55,7 +55,7 @@
|
|||
no_log: true
|
||||
|
||||
- debug: var=agents.content|from_json
|
||||
failed_when: not agents|success and not agents.content=='{}'
|
||||
failed_when: not agents is success and not agents.content=='{}'
|
||||
run_once: true
|
||||
|
||||
- name: Check netchecker status
|
||||
|
@ -70,7 +70,7 @@
|
|||
when: not agents.content=='{}'
|
||||
|
||||
- debug: var=result.content|from_json
|
||||
failed_when: not result|success
|
||||
failed_when: not result is success
|
||||
run_once: true
|
||||
when: not agents.content=='{}'
|
||||
delegate_to: "{{groups['kube-master'][0]}}"
|
||||
|
|
Loading…
Reference in a new issue