Add noqa and disable .ansible-lint global exclusions (#6410)
This commit is contained in:
parent
b680cdd0e4
commit
e70f27dd79
74 changed files with 163 additions and 170 deletions
|
@ -2,15 +2,8 @@
|
||||||
parseable: true
|
parseable: true
|
||||||
skip_list:
|
skip_list:
|
||||||
# see https://docs.ansible.com/ansible-lint/rules/default_rules.html for a list of all default rules
|
# see https://docs.ansible.com/ansible-lint/rules/default_rules.html for a list of all default rules
|
||||||
# The following rules throw errors.
|
|
||||||
# These either still need to be corrected in the repository and the rules re-enabled or documented why they are skipped on purpose.
|
# DO NOT add any other rules to this skip_list, instead use local `# noqa` with a comment explaining WHY it is necessary
|
||||||
- '301'
|
|
||||||
- '302'
|
|
||||||
- '303'
|
|
||||||
- '305'
|
|
||||||
- '306'
|
|
||||||
- '404'
|
|
||||||
- '503'
|
|
||||||
|
|
||||||
# These rules are intentionally skipped:
|
# These rules are intentionally skipped:
|
||||||
#
|
#
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
- name: Query Azure VMs
|
- name: Query Azure VMs # noqa 301
|
||||||
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
||||||
register: vm_list_cmd
|
register: vm_list_cmd
|
||||||
|
|
||||||
|
|
|
@ -1,14 +1,14 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
- name: Query Azure VMs IPs
|
- name: Query Azure VMs IPs # noqa 301
|
||||||
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
|
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
|
||||||
register: vm_ip_list_cmd
|
register: vm_ip_list_cmd
|
||||||
|
|
||||||
- name: Query Azure VMs Roles
|
- name: Query Azure VMs Roles # noqa 301
|
||||||
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
||||||
register: vm_list_cmd
|
register: vm_list_cmd
|
||||||
|
|
||||||
- name: Query Azure Load Balancer Public IP
|
- name: Query Azure Load Balancer Public IP # noqa 301
|
||||||
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
|
command: az network public-ip show -o json -g {{ azure_resource_group }} -n kubernetes-api-pubip
|
||||||
register: lb_pubip_cmd
|
register: lb_pubip_cmd
|
||||||
|
|
||||||
|
|
|
@ -69,7 +69,7 @@
|
||||||
|
|
||||||
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
|
# Running systemd-machine-id-setup doesn't create a unique id for each node container on Debian,
|
||||||
# handle manually
|
# handle manually
|
||||||
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave)
|
- name: Re-create unique machine-id (as we may just get what comes in the docker image), needed by some CNIs for mac address seeding (notably weave) # noqa 301
|
||||||
raw: |
|
raw: |
|
||||||
echo {{ item | hash('sha1') }} > /etc/machine-id.new
|
echo {{ item | hash('sha1') }} > /etc/machine-id.new
|
||||||
mv -b /etc/machine-id.new /etc/machine-id
|
mv -b /etc/machine-id.new /etc/machine-id
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
register: glusterfs_ppa_added
|
register: glusterfs_ppa_added
|
||||||
when: glusterfs_ppa_use
|
when: glusterfs_ppa_use
|
||||||
|
|
||||||
- name: Ensure GlusterFS client will reinstall if the PPA was just added.
|
- name: Ensure GlusterFS client will reinstall if the PPA was just added. # noqa 503
|
||||||
apt:
|
apt:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: absent
|
state: absent
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
register: glusterfs_ppa_added
|
register: glusterfs_ppa_added
|
||||||
when: glusterfs_ppa_use
|
when: glusterfs_ppa_use
|
||||||
|
|
||||||
- name: Ensure GlusterFS will reinstall if the PPA was just added.
|
- name: Ensure GlusterFS will reinstall if the PPA was just added. # noqa 503
|
||||||
apt:
|
apt:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
state: absent
|
state: absent
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
- name: "Delete bootstrap Heketi."
|
- name: "Delete bootstrap Heketi."
|
||||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"deploy-heketi\""
|
||||||
when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0"
|
when: "heketi_resources.stdout|from_json|json_query('items[*]')|length > 0"
|
||||||
- name: "Ensure there is nothing left over."
|
- name: "Ensure there is nothing left over." # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"deploy-heketi\" -o=json"
|
||||||
register: "heketi_result"
|
register: "heketi_result"
|
||||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
- name: "Copy topology configuration into container."
|
- name: "Copy topology configuration into container."
|
||||||
changed_when: false
|
changed_when: false
|
||||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ initial_heketi_pod_name }}:/tmp/topology.json"
|
||||||
- name: "Load heketi topology."
|
- name: "Load heketi topology." # noqa 503
|
||||||
when: "render.changed"
|
when: "render.changed"
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||||
register: "load_heketi"
|
register: "load_heketi"
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
- name: "Provision database volume."
|
- name: "Provision database volume."
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
|
command: "{{ bin_dir }}/kubectl exec {{ initial_heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} setup-openshift-heketi-storage"
|
||||||
when: "heketi_database_volume_exists is undefined"
|
when: "heketi_database_volume_exists is undefined"
|
||||||
- name: "Copy configuration from pod."
|
- name: "Copy configuration from pod." # noqa 301
|
||||||
become: true
|
become: true
|
||||||
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
command: "{{ bin_dir }}/kubectl cp {{ initial_heketi_pod_name }}:/heketi-storage.json {{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
||||||
- name: "Get heketi volume ids."
|
- name: "Get heketi volume ids."
|
||||||
|
|
|
@ -10,10 +10,10 @@
|
||||||
template:
|
template:
|
||||||
src: "topology.json.j2"
|
src: "topology.json.j2"
|
||||||
dest: "{{ kube_config_dir }}/topology.json"
|
dest: "{{ kube_config_dir }}/topology.json"
|
||||||
- name: "Copy topology configuration into container."
|
- name: "Copy topology configuration into container." # noqa 503
|
||||||
when: "rendering.changed"
|
when: "rendering.changed"
|
||||||
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl cp {{ kube_config_dir }}/topology.json {{ heketi_pod_name }}:/tmp/topology.json"
|
||||||
- name: "Load heketi topology."
|
- name: "Load heketi topology." # noqa 503
|
||||||
when: "rendering.changed"
|
when: "rendering.changed"
|
||||||
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
command: "{{ bin_dir }}/kubectl exec {{ heketi_pod_name }} -- heketi-cli --user admin --secret {{ heketi_admin_key }} topology load --json=/tmp/topology.json"
|
||||||
- name: "Get heketi topology."
|
- name: "Get heketi topology."
|
||||||
|
|
|
@ -22,7 +22,7 @@
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: "Remove volume groups."
|
- name: "Remove volume groups." # noqa 301
|
||||||
environment:
|
environment:
|
||||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||||
become: true
|
become: true
|
||||||
|
@ -30,7 +30,7 @@
|
||||||
with_items: "{{ volume_groups.stdout_lines }}"
|
with_items: "{{ volume_groups.stdout_lines }}"
|
||||||
loop_control: { loop_var: "volume_group" }
|
loop_control: { loop_var: "volume_group" }
|
||||||
|
|
||||||
- name: "Remove physical volume from cluster disks."
|
- name: "Remove physical volume from cluster disks." # noqa 301
|
||||||
environment:
|
environment:
|
||||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH / CentOS conservative path management
|
||||||
become: true
|
become: true
|
||||||
|
|
|
@ -1,43 +1,43 @@
|
||||||
---
|
---
|
||||||
- name: "Remove storage class."
|
- name: "Remove storage class." # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
command: "{{ bin_dir }}/kubectl delete storageclass gluster"
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
- name: "Tear down heketi."
|
- name: "Tear down heketi." # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\""
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
- name: "Tear down heketi."
|
- name: "Tear down heketi." # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
command: "{{ bin_dir }}/kubectl delete all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\""
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
- name: "Tear down bootstrap."
|
- name: "Tear down bootstrap."
|
||||||
include_tasks: "../provision/tasks/bootstrap/tear-down.yml"
|
include_tasks: "../provision/tasks/bootstrap/tear-down.yml"
|
||||||
- name: "Ensure there is nothing left over."
|
- name: "Ensure there is nothing left over." # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-pod\" -o=json"
|
||||||
register: "heketi_result"
|
register: "heketi_result"
|
||||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
- name: "Ensure there is nothing left over."
|
- name: "Ensure there is nothing left over." # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
command: "{{ bin_dir }}/kubectl get all,service,jobs,deployment,secret --selector=\"glusterfs=heketi-deployment\" -o=json"
|
||||||
register: "heketi_result"
|
register: "heketi_result"
|
||||||
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
until: "heketi_result.stdout|from_json|json_query('items[*]')|length == 0"
|
||||||
retries: 60
|
retries: 60
|
||||||
delay: 5
|
delay: 5
|
||||||
- name: "Tear down glusterfs."
|
- name: "Tear down glusterfs." # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
command: "{{ bin_dir }}/kubectl delete daemonset.extensions/glusterfs"
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
- name: "Remove heketi storage service."
|
- name: "Remove heketi storage service." # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
command: "{{ bin_dir }}/kubectl delete service heketi-storage-endpoints"
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
- name: "Remove heketi gluster role binding"
|
- name: "Remove heketi gluster role binding" # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
command: "{{ bin_dir }}/kubectl delete clusterrolebinding heketi-gluster-admin"
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
- name: "Remove heketi config secret"
|
- name: "Remove heketi config secret" # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
command: "{{ bin_dir }}/kubectl delete secret heketi-config-secret"
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
- name: "Remove heketi db backup"
|
- name: "Remove heketi db backup" # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
command: "{{ bin_dir }}/kubectl delete secret heketi-db-backup"
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
- name: "Remove heketi service account"
|
- name: "Remove heketi service account" # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
command: "{{ bin_dir }}/kubectl delete serviceaccount heketi-service-account"
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
- name: "Get secrets"
|
- name: "Get secrets"
|
||||||
|
|
|
@ -16,13 +16,13 @@
|
||||||
src: get_cinder_pvs.sh
|
src: get_cinder_pvs.sh
|
||||||
dest: /tmp
|
dest: /tmp
|
||||||
mode: u+rwx
|
mode: u+rwx
|
||||||
- name: Get PVs provisioned by in-tree cloud provider
|
- name: Get PVs provisioned by in-tree cloud provider # noqa 301
|
||||||
command: /tmp/get_cinder_pvs.sh
|
command: /tmp/get_cinder_pvs.sh
|
||||||
register: pvs
|
register: pvs
|
||||||
- name: Remove get_cinder_pvs.sh
|
- name: Remove get_cinder_pvs.sh
|
||||||
file:
|
file:
|
||||||
path: /tmp/get_cinder_pvs.sh
|
path: /tmp/get_cinder_pvs.sh
|
||||||
state: absent
|
state: absent
|
||||||
- name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation
|
- name: Rewrite the "pv.kubernetes.io/provisioned-by" annotation # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl annotate --overwrite pv {{ item }} pv.kubernetes.io/provisioned-by=cinder.csi.openstack.org"
|
command: "{{ bin_dir }}/kubectl annotate --overwrite pv {{ item }} pv.kubernetes.io/provisioned-by=cinder.csi.openstack.org"
|
||||||
loop: "{{ pvs.stdout_lines | list }}"
|
loop: "{{ pvs.stdout_lines | list }}"
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
vars:
|
vars:
|
||||||
download: "{{ download_defaults | combine(downloads.crictl) }}"
|
download: "{{ download_defaults | combine(downloads.crictl) }}"
|
||||||
|
|
||||||
- name: Install crictl config
|
- name: Install crictl config # noqa 404
|
||||||
template:
|
template:
|
||||||
src: ../templates/crictl.yaml.j2
|
src: ../templates/crictl.yaml.j2
|
||||||
dest: /etc/crictl.yaml
|
dest: /etc/crictl.yaml
|
||||||
|
|
|
@ -34,7 +34,7 @@
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
|
||||||
- name: disable unified_cgroup_hierarchy in Fedora 31+
|
- name: disable unified_cgroup_hierarchy in Fedora 31+ # noqa 305
|
||||||
shell:
|
shell:
|
||||||
cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
|
cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
|
||||||
when:
|
when:
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
vars:
|
vars:
|
||||||
download: "{{ download_defaults | combine(downloads.crictl) }}"
|
download: "{{ download_defaults | combine(downloads.crictl) }}"
|
||||||
|
|
||||||
- name: Install crictl config
|
- name: Install crictl config # noqa 404
|
||||||
template:
|
template:
|
||||||
src: ../templates/crictl.yaml.j2
|
src: ../templates/crictl.yaml.j2
|
||||||
dest: /etc/crictl.yaml
|
dest: /etc/crictl.yaml
|
||||||
|
@ -21,7 +21,7 @@
|
||||||
group: no
|
group: no
|
||||||
delegate_to: "{{ inventory_hostname }}"
|
delegate_to: "{{ inventory_hostname }}"
|
||||||
|
|
||||||
- name: Get crictl completion
|
- name: Get crictl completion # noqa 305
|
||||||
shell: "{{ bin_dir }}/crictl completion"
|
shell: "{{ bin_dir }}/crictl completion"
|
||||||
changed_when: False
|
changed_when: False
|
||||||
register: cri_completion
|
register: cri_completion
|
||||||
|
|
|
@ -59,7 +59,7 @@
|
||||||
- ansible_distribution == "CentOS"
|
- ansible_distribution == "CentOS"
|
||||||
- ansible_distribution_major_version == "8"
|
- ansible_distribution_major_version == "8"
|
||||||
|
|
||||||
- name: Ensure latest version of libseccom installed
|
- name: Ensure latest version of libseccom installed # noqa 303
|
||||||
command: "yum update -y libseccomp"
|
command: "yum update -y libseccomp"
|
||||||
when:
|
when:
|
||||||
- ansible_distribution == "CentOS"
|
- ansible_distribution == "CentOS"
|
||||||
|
|
|
@ -47,7 +47,7 @@
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
|
||||||
- name: disable unified_cgroup_hierarchy in Fedora 31+
|
- name: disable unified_cgroup_hierarchy in Fedora 31+ # noqa 305
|
||||||
shell:
|
shell:
|
||||||
cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
|
cmd: grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"
|
||||||
when:
|
when:
|
||||||
|
|
|
@ -28,13 +28,13 @@
|
||||||
set_fact:
|
set_fact:
|
||||||
docker_dns_search_domains: "{{ docker_dns_search_domains + searchdomains|default([]) }}"
|
docker_dns_search_domains: "{{ docker_dns_search_domains + searchdomains|default([]) }}"
|
||||||
|
|
||||||
- name: check system nameservers
|
- name: check system nameservers # noqa 306
|
||||||
shell: grep "^nameserver" /etc/resolv.conf | sed -r 's/^nameserver\s*([^#\s]+)\s*(#.*)?/\1/'
|
shell: grep "^nameserver" /etc/resolv.conf | sed -r 's/^nameserver\s*([^#\s]+)\s*(#.*)?/\1/'
|
||||||
changed_when: False
|
changed_when: False
|
||||||
register: system_nameservers
|
register: system_nameservers
|
||||||
check_mode: no
|
check_mode: no
|
||||||
|
|
||||||
- name: check system search domains
|
- name: check system search domains # noqa 306
|
||||||
shell: grep "^search" /etc/resolv.conf | sed -r 's/^search\s*([^#]+)\s*(#.*)?/\1/'
|
shell: grep "^search" /etc/resolv.conf | sed -r 's/^search\s*([^#]+)\s*(#.*)?/\1/'
|
||||||
changed_when: False
|
changed_when: False
|
||||||
register: system_search_domains
|
register: system_search_domains
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
notify: restart docker
|
notify: restart docker
|
||||||
when: http_proxy is defined or https_proxy is defined
|
when: http_proxy is defined or https_proxy is defined
|
||||||
|
|
||||||
- name: get systemd version
|
- name: get systemd version # noqa 306
|
||||||
# noqa 303 - systemctl is called intentionally here
|
# noqa 303 - systemctl is called intentionally here
|
||||||
shell: systemctl --version | head -n 1 | cut -d " " -f 2
|
shell: systemctl --version | head -n 1 | cut -d " " -f 2
|
||||||
register: systemd_version
|
register: systemd_version
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
# the template, just replace all instances of {{ `{{` }} with {{ and {{ '}}' }} with }}.
|
# the template, just replace all instances of {{ `{{` }} with {{ and {{ '}}' }} with }}.
|
||||||
# It will output something like the following:
|
# It will output something like the following:
|
||||||
# nginx:1.15,gcr.io/google-containers/kube-proxy:v1.14.1,gcr.io/google-containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9,gcr.io/google-containers/kube-apiserver:v1.14.1,etc...
|
# nginx:1.15,gcr.io/google-containers/kube-proxy:v1.14.1,gcr.io/google-containers/kube-proxy@sha256:44af2833c6cbd9a7fc2e9d2f5244a39dfd2e31ad91bf9d4b7d810678db738ee9,gcr.io/google-containers/kube-apiserver:v1.14.1,etc...
|
||||||
- name: check_pull_required | Generate a list of information about the images on a node
|
- name: check_pull_required | Generate a list of information about the images on a node # noqa 305
|
||||||
shell: "{{ image_info_command }}"
|
shell: "{{ image_info_command }}"
|
||||||
no_log: true
|
no_log: true
|
||||||
register: docker_images
|
register: docker_images
|
||||||
|
|
|
@ -63,7 +63,7 @@
|
||||||
- pull_required or download_run_once
|
- pull_required or download_run_once
|
||||||
- not image_is_cached
|
- not image_is_cached
|
||||||
|
|
||||||
- name: download_container | Save and compress image
|
- name: download_container | Save and compress image # noqa 305
|
||||||
shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}"
|
shell: "{{ image_save_command_on_localhost if download_localhost else image_save_command }}"
|
||||||
delegate_to: "{{ download_delegate }}"
|
delegate_to: "{{ download_delegate }}"
|
||||||
delegate_facts: no
|
delegate_facts: no
|
||||||
|
@ -103,7 +103,7 @@
|
||||||
- pull_required
|
- pull_required
|
||||||
- download_force_cache
|
- download_force_cache
|
||||||
|
|
||||||
- name: download_container | Load image into docker
|
- name: download_container | Load image into docker # noqa 305
|
||||||
shell: "{{ image_load_command }}"
|
shell: "{{ image_load_command }}"
|
||||||
register: container_load_status
|
register: container_load_status
|
||||||
failed_when: container_load_status is failed
|
failed_when: container_load_status is failed
|
||||||
|
|
|
@ -32,7 +32,7 @@
|
||||||
- localhost
|
- localhost
|
||||||
- asserts
|
- asserts
|
||||||
|
|
||||||
- name: prep_download | On localhost, check if user has access to docker without using sudo
|
- name: prep_download | On localhost, check if user has access to docker without using sudo # noqa 305
|
||||||
shell: "{{ image_info_command_on_localhost }}"
|
shell: "{{ image_info_command_on_localhost }}"
|
||||||
delegate_to: localhost
|
delegate_to: localhost
|
||||||
connection: local
|
connection: local
|
||||||
|
@ -68,7 +68,7 @@
|
||||||
- localhost
|
- localhost
|
||||||
- asserts
|
- asserts
|
||||||
|
|
||||||
- name: prep_download | Register docker images info
|
- name: prep_download | Register docker images info # noqa 305
|
||||||
shell: "{{ image_info_command }}"
|
shell: "{{ image_info_command }}"
|
||||||
no_log: true
|
no_log: true
|
||||||
register: docker_images
|
register: docker_images
|
||||||
|
|
|
@ -30,7 +30,7 @@
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
state: file
|
state: file
|
||||||
|
|
||||||
- name: prep_kubeadm_images | Generate list of required images
|
- name: prep_kubeadm_images | Generate list of required images # noqa 306
|
||||||
shell: "{{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -v coredns"
|
shell: "{{ bin_dir }}/kubeadm config images list --config={{ kube_config_dir }}/kubeadm-images.yaml | grep -v coredns"
|
||||||
register: kubeadm_images_raw
|
register: kubeadm_images_raw
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Configure | Check if etcd cluster is healthy
|
- name: Configure | Check if etcd cluster is healthy # noqa 306
|
||||||
shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'"
|
shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'"
|
||||||
register: etcd_cluster_is_healthy
|
register: etcd_cluster_is_healthy
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
@ -16,7 +16,7 @@
|
||||||
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
|
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
|
||||||
ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
|
ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
|
||||||
|
|
||||||
- name: Configure | Check if etcd-events cluster is healthy
|
- name: Configure | Check if etcd-events cluster is healthy # noqa 306
|
||||||
shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'"
|
shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'"
|
||||||
register: etcd_events_cluster_is_healthy
|
register: etcd_events_cluster_is_healthy
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
@ -73,7 +73,7 @@
|
||||||
ignore_errors: "{{ etcd_events_cluster_is_healthy.rc == 0 }}"
|
ignore_errors: "{{ etcd_events_cluster_is_healthy.rc == 0 }}"
|
||||||
when: is_etcd_master and etcd_events_cluster_setup
|
when: is_etcd_master and etcd_events_cluster_setup
|
||||||
|
|
||||||
- name: Configure | Wait for etcd cluster to be healthy
|
- name: Configure | Wait for etcd cluster to be healthy # noqa 306
|
||||||
shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'"
|
shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'"
|
||||||
register: etcd_cluster_is_healthy
|
register: etcd_cluster_is_healthy
|
||||||
until: etcd_cluster_is_healthy.rc == 0
|
until: etcd_cluster_is_healthy.rc == 0
|
||||||
|
@ -94,7 +94,7 @@
|
||||||
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
|
ETCDCTL_CACERT: "{{ etcd_cert_dir }}/ca.pem"
|
||||||
ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
|
ETCDCTL_ENDPOINTS: "{{ etcd_access_addresses }}"
|
||||||
|
|
||||||
- name: Configure | Wait for etcd-events cluster to be healthy
|
- name: Configure | Wait for etcd-events cluster to be healthy # noqa 306
|
||||||
shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'"
|
shell: "{{ bin_dir }}/etcdctl endpoint --cluster status && {{ bin_dir }}/etcdctl endpoint --cluster health 2>&1 | grep -q -v 'Error: unhealthy cluster'"
|
||||||
register: etcd_events_cluster_is_healthy
|
register: etcd_events_cluster_is_healthy
|
||||||
until: etcd_events_cluster_is_healthy.rc == 0
|
until: etcd_events_cluster_is_healthy.rc == 0
|
||||||
|
|
|
@ -139,7 +139,7 @@
|
||||||
inventory_hostname in groups['k8s-cluster']) and
|
inventory_hostname in groups['k8s-cluster']) and
|
||||||
sync_certs|default(false) and inventory_hostname not in groups['etcd']
|
sync_certs|default(false) and inventory_hostname not in groups['etcd']
|
||||||
|
|
||||||
- name: Gen_certs | Copy certs on nodes
|
- name: Gen_certs | Copy certs on nodes # noqa 306
|
||||||
shell: "base64 -d <<< '{{ etcd_node_certs.stdout|quote }}' | tar xz -C {{ etcd_cert_dir }}"
|
shell: "base64 -d <<< '{{ etcd_node_certs.stdout|quote }}' | tar xz -C {{ etcd_cert_dir }}"
|
||||||
args:
|
args:
|
||||||
executable: /bin/bash
|
executable: /bin/bash
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Join Member | Add member to etcd-events cluster
|
- name: Join Member | Add member to etcd-events cluster # noqa 301 305
|
||||||
shell: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_events_peer_url }}"
|
shell: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_events_peer_url }}"
|
||||||
register: member_add_result
|
register: member_add_result
|
||||||
until: member_add_result.rc == 0
|
until: member_add_result.rc == 0
|
||||||
|
@ -24,7 +24,7 @@
|
||||||
{%- endif -%}
|
{%- endif -%}
|
||||||
{%- endfor -%}
|
{%- endfor -%}
|
||||||
|
|
||||||
- name: Join Member | Ensure member is in etcd-events cluster
|
- name: Join Member | Ensure member is in etcd-events cluster # noqa 306
|
||||||
shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_events_access_address }}"
|
shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_events_access_address }}"
|
||||||
register: etcd_events_member_in_cluster
|
register: etcd_events_member_in_cluster
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Join Member | Add member to etcd cluster
|
- name: Join Member | Add member to etcd cluster # noqa 301 305
|
||||||
shell: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_peer_url }}"
|
shell: "{{ bin_dir }}/etcdctl member add {{ etcd_member_name }} --peer-urls={{ etcd_peer_url }}"
|
||||||
register: member_add_result
|
register: member_add_result
|
||||||
until: member_add_result.rc == 0
|
until: member_add_result.rc == 0
|
||||||
|
@ -24,7 +24,7 @@
|
||||||
{%- endif -%}
|
{%- endif -%}
|
||||||
{%- endfor -%}
|
{%- endfor -%}
|
||||||
|
|
||||||
- name: Join Member | Ensure member is in etcd cluster
|
- name: Join Member | Ensure member is in etcd cluster # noqa 306
|
||||||
shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_access_address }}"
|
shell: "{{ bin_dir }}/etcdctl member list | grep -q {{ etcd_access_address }}"
|
||||||
register: etcd_member_in_cluster
|
register: etcd_member_in_cluster
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
|
@ -23,14 +23,14 @@
|
||||||
remote_src: true
|
remote_src: true
|
||||||
register: etcd_ca_cert
|
register: etcd_ca_cert
|
||||||
|
|
||||||
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS)
|
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS) # noqa 503
|
||||||
command: update-ca-certificates
|
command: update-ca-certificates
|
||||||
when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Coreos", "Container Linux by CoreOS", "Flatcar", "Flatcar Container Linux by Kinvolk", "Suse"]
|
when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Coreos", "Container Linux by CoreOS", "Flatcar", "Flatcar Container Linux by Kinvolk", "Suse"]
|
||||||
|
|
||||||
- name: Gen_certs | update ca-certificates (RedHat)
|
- name: Gen_certs | update ca-certificates (RedHat) # noqa 503
|
||||||
command: update-ca-trust extract
|
command: update-ca-trust extract
|
||||||
when: etcd_ca_cert.changed and ansible_os_family == "RedHat"
|
when: etcd_ca_cert.changed and ansible_os_family == "RedHat"
|
||||||
|
|
||||||
- name: Gen_certs | update ca-certificates (ClearLinux)
|
- name: Gen_certs | update ca-certificates (ClearLinux) # noqa 503
|
||||||
command: clrtrust add "{{ ca_cert_path }}"
|
command: clrtrust add "{{ ca_cert_path }}"
|
||||||
when: etcd_ca_cert.changed and ansible_os_family == "ClearLinux"
|
when: etcd_ca_cert.changed and ansible_os_family == "ClearLinux"
|
||||||
|
|
|
@ -32,7 +32,7 @@
|
||||||
register: helmcert_master
|
register: helmcert_master
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- name: Gen_helm_tiller_certs | run cert generation script
|
- name: Gen_helm_tiller_certs | run cert generation script # noqa 301
|
||||||
run_once: yes
|
run_once: yes
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
command: "{{ helm_script_dir }}/helm-make-ssl.sh -e {{ helm_home_dir }} -d {{ helm_tiller_cert_dir }}"
|
command: "{{ helm_script_dir }}/helm-make-ssl.sh -e {{ helm_home_dir }} -d {{ helm_tiller_cert_dir }}"
|
||||||
|
@ -57,7 +57,7 @@
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ helm_client_certs }}"
|
- "{{ helm_client_certs }}"
|
||||||
|
|
||||||
- name: Gen_helm_tiller_certs | Gather helm client certs
|
- name: Gen_helm_tiller_certs | Gather helm client certs # noqa 306
|
||||||
# noqa 303 - tar is called intentionally here, but maybe this should be done with the slurp module
|
# noqa 303 - tar is called intentionally here, but maybe this should be done with the slurp module
|
||||||
shell: "tar cfz - -C {{ helm_home_dir }} {{ helm_client_certs|join(' ') }} | base64 --wrap=0"
|
shell: "tar cfz - -C {{ helm_home_dir }} {{ helm_client_certs|join(' ') }} | base64 --wrap=0"
|
||||||
args:
|
args:
|
||||||
|
@ -85,7 +85,7 @@
|
||||||
mode: "0600"
|
mode: "0600"
|
||||||
when: sync_helm_certs|default(false) and inventory_hostname != groups['kube-master'][0]
|
when: sync_helm_certs|default(false) and inventory_hostname != groups['kube-master'][0]
|
||||||
|
|
||||||
- name: Gen_helm_tiller_certs | Unpack helm certs on masters
|
- name: Gen_helm_tiller_certs | Unpack helm certs on masters # noqa 306
|
||||||
shell: "base64 -d < {{ helm_cert_tempfile.path }} | tar xz -C {{ helm_home_dir }}"
|
shell: "base64 -d < {{ helm_cert_tempfile.path }} | tar xz -C {{ helm_home_dir }}"
|
||||||
no_log: true
|
no_log: true
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
|
@ -52,7 +52,7 @@
|
||||||
- helm_version is version('v3.0.0', '<')
|
- helm_version is version('v3.0.0', '<')
|
||||||
|
|
||||||
# FIXME: https://github.com/helm/helm/issues/6374
|
# FIXME: https://github.com/helm/helm/issues/6374
|
||||||
- name: Helm | Install/upgrade helm
|
- name: Helm | Install/upgrade helm # noqa 306
|
||||||
shell: >
|
shell: >
|
||||||
{{ bin_dir }}/helm init --tiller-namespace={{ tiller_namespace }}
|
{{ bin_dir }}/helm init --tiller-namespace={{ tiller_namespace }}
|
||||||
{% if helm_skip_refresh %} --skip-refresh{% endif %}
|
{% if helm_skip_refresh %} --skip-refresh{% endif %}
|
||||||
|
@ -78,7 +78,7 @@
|
||||||
environment: "{{ proxy_env }}"
|
environment: "{{ proxy_env }}"
|
||||||
|
|
||||||
# FIXME: https://github.com/helm/helm/issues/4063
|
# FIXME: https://github.com/helm/helm/issues/4063
|
||||||
- name: Helm | Force apply tiller overrides if necessary
|
- name: Helm | Force apply tiller overrides if necessary # noqa 306
|
||||||
shell: >
|
shell: >
|
||||||
{{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }} --tiller-namespace={{ tiller_namespace }}
|
{{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }} --tiller-namespace={{ tiller_namespace }}
|
||||||
{% if helm_skip_refresh %} --skip-refresh{% endif %}
|
{% if helm_skip_refresh %} --skip-refresh{% endif %}
|
||||||
|
@ -108,7 +108,7 @@
|
||||||
- helm_version is version('v3.0.0', '>=')
|
- helm_version is version('v3.0.0', '>=')
|
||||||
- helm_stable_repo_url is defined
|
- helm_stable_repo_url is defined
|
||||||
|
|
||||||
- name: Make sure bash_completion.d folder exists
|
- name: Make sure bash_completion.d folder exists # noqa 503
|
||||||
file:
|
file:
|
||||||
name: "/etc/bash_completion.d/"
|
name: "/etc/bash_completion.d/"
|
||||||
state: directory
|
state: directory
|
||||||
|
@ -116,7 +116,7 @@
|
||||||
- ((helm_container is defined and helm_container.changed) or (helm_task_result is defined and helm_task_result.changed))
|
- ((helm_container is defined and helm_container.changed) or (helm_task_result is defined and helm_task_result.changed))
|
||||||
- ansible_os_family in ["ClearLinux"]
|
- ansible_os_family in ["ClearLinux"]
|
||||||
|
|
||||||
- name: Helm | Set up bash completion
|
- name: Helm | Set up bash completion # noqa 503
|
||||||
shell: "umask 022 && {{ bin_dir }}/helm completion bash >/etc/bash_completion.d/helm.sh"
|
shell: "umask 022 && {{ bin_dir }}/helm completion bash >/etc/bash_completion.d/helm.sh"
|
||||||
when:
|
when:
|
||||||
- ((helm_container is defined and helm_container.changed) or (helm_task_result is defined and helm_task_result.changed))
|
- ((helm_container is defined and helm_container.changed) or (helm_task_result is defined and helm_task_result.changed))
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: "calico upgrade complete"
|
- name: "calico upgrade complete" # noqa 305
|
||||||
shell: "{{ bin_dir }}/calico-upgrade complete --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml"
|
shell: "{{ bin_dir }}/calico-upgrade complete --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Rotate Tokens | Get default token name
|
- name: Rotate Tokens | Get default token name # noqa 306
|
||||||
shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets -o custom-columns=name:{.metadata.name} --no-headers | grep -m1 default-token"
|
shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets -o custom-columns=name:{.metadata.name} --no-headers | grep -m1 default-token"
|
||||||
register: default_token
|
register: default_token
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
@ -29,7 +29,7 @@
|
||||||
|
|
||||||
# FIXME(mattymo): Exclude built in secrets that were automatically rotated,
|
# FIXME(mattymo): Exclude built in secrets that were automatically rotated,
|
||||||
# instead of filtering manually
|
# instead of filtering manually
|
||||||
- name: Rotate Tokens | Get all serviceaccount tokens to expire
|
- name: Rotate Tokens | Get all serviceaccount tokens to expire # noqa 306
|
||||||
shell: >-
|
shell: >-
|
||||||
{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets --all-namespaces
|
{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets --all-namespaces
|
||||||
-o 'jsonpath={range .items[*]}{"\n"}{.metadata.namespace}{" "}{.metadata.name}{" "}{.type}{end}'
|
-o 'jsonpath={range .items[*]}{"\n"}{.metadata.namespace}{" "}{.metadata.name}{" "}{.type}{end}'
|
||||||
|
|
|
@ -48,7 +48,7 @@
|
||||||
timeout: 180
|
timeout: 180
|
||||||
|
|
||||||
# NOTE(mattymo): Please forgive this workaround
|
# NOTE(mattymo): Please forgive this workaround
|
||||||
- name: Generate admin kubeconfig with external api endpoint
|
- name: Generate admin kubeconfig with external api endpoint # noqa 302
|
||||||
shell: >-
|
shell: >-
|
||||||
mkdir -p {{ kube_config_dir }}/external_kubeconfig &&
|
mkdir -p {{ kube_config_dir }}/external_kubeconfig &&
|
||||||
{{ bin_dir }}/kubeadm
|
{{ bin_dir }}/kubeadm
|
||||||
|
|
|
@ -22,7 +22,7 @@
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- name: Calculate kubeadm CA cert hash
|
- name: Calculate kubeadm CA cert hash # noqa 306
|
||||||
shell: openssl x509 -pubkey -in {{ kube_cert_dir }}/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
|
shell: openssl x509 -pubkey -in {{ kube_cert_dir }}/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
|
||||||
register: kubeadm_ca_hash
|
register: kubeadm_ca_hash
|
||||||
when:
|
when:
|
||||||
|
@ -107,7 +107,7 @@
|
||||||
|
|
||||||
# FIXME(mattymo): Need to point to localhost, otherwise masters will all point
|
# FIXME(mattymo): Need to point to localhost, otherwise masters will all point
|
||||||
# incorrectly to first master, creating SPoF.
|
# incorrectly to first master, creating SPoF.
|
||||||
- name: Update server field in kube-proxy kubeconfig
|
- name: Update server field in kube-proxy kubeconfig # noqa 306
|
||||||
shell: >-
|
shell: >-
|
||||||
{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get configmap kube-proxy -n kube-system -o yaml
|
{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get configmap kube-proxy -n kube-system -o yaml
|
||||||
| sed 's#server:.*#server: https://127.0.0.1:{{ kube_apiserver_port }}#g'
|
| sed 's#server:.*#server: https://127.0.0.1:{{ kube_apiserver_port }}#g'
|
||||||
|
@ -131,7 +131,7 @@
|
||||||
group: root
|
group: root
|
||||||
mode: "0644"
|
mode: "0644"
|
||||||
|
|
||||||
- name: Restart all kube-proxy pods to ensure that they load the new configmap
|
- name: Restart all kube-proxy pods to ensure that they load the new configmap # noqa 305
|
||||||
shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
|
shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_to: "{{ groups['kube-master']|first }}"
|
delegate_to: "{{ groups['kube-master']|first }}"
|
||||||
|
@ -157,7 +157,7 @@
|
||||||
|
|
||||||
# FIXME(jjo): need to post-remove kube-proxy until https://github.com/kubernetes/kubeadm/issues/776
|
# FIXME(jjo): need to post-remove kube-proxy until https://github.com/kubernetes/kubeadm/issues/776
|
||||||
# is fixed
|
# is fixed
|
||||||
- name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services
|
- name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services # noqa 305
|
||||||
shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete daemonset -n kube-system kube-proxy"
|
shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete daemonset -n kube-system kube-proxy"
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_to: "{{ groups['kube-master']|first }}"
|
delegate_to: "{{ groups['kube-master']|first }}"
|
||||||
|
|
|
@ -47,7 +47,7 @@
|
||||||
when:
|
when:
|
||||||
- old_apiserver_cert.stat.exists
|
- old_apiserver_cert.stat.exists
|
||||||
|
|
||||||
- name: kubeadm | Forcefully delete old static pods
|
- name: kubeadm | Forcefully delete old static pods # noqa 306
|
||||||
shell: "docker ps -f name=k8s_{{ item }} -q | xargs --no-run-if-empty docker rm -f"
|
shell: "docker ps -f name=k8s_{{ item }} -q | xargs --no-run-if-empty docker rm -f"
|
||||||
with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
||||||
when:
|
when:
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
register: kube_apiserver_manifest_replaced
|
register: kube_apiserver_manifest_replaced
|
||||||
when: etcd_secret_changed|default(false)
|
when: etcd_secret_changed|default(false)
|
||||||
|
|
||||||
- name: "Pre-upgrade | Delete master containers forcefully"
|
- name: "Pre-upgrade | Delete master containers forcefully" # noqa 306 503
|
||||||
shell: "docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f"
|
shell: "docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f"
|
||||||
with_items:
|
with_items:
|
||||||
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
||||||
|
|
|
@ -45,7 +45,7 @@
|
||||||
tags:
|
tags:
|
||||||
- kube-proxy
|
- kube-proxy
|
||||||
|
|
||||||
- name: Verify if br_netfilter module exists
|
- name: Verify if br_netfilter module exists # noqa 305
|
||||||
shell: "modinfo br_netfilter"
|
shell: "modinfo br_netfilter"
|
||||||
environment:
|
environment:
|
||||||
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH's conservative path management
|
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH's conservative path management
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: "Pre-upgrade | check if kubelet container exists"
|
- name: "Pre-upgrade | check if kubelet container exists" # noqa 306
|
||||||
shell: >-
|
shell: >-
|
||||||
{% if container_manager in ['crio', 'docker'] %}
|
{% if container_manager in ['crio', 'docker'] %}
|
||||||
docker ps -af name=kubelet | grep kubelet
|
docker ps -af name=kubelet | grep kubelet
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
- Preinstall | reload kubelet
|
- Preinstall | reload kubelet
|
||||||
when: is_fedora_coreos
|
when: is_fedora_coreos
|
||||||
|
|
||||||
- name: Preinstall | reload NetworkManager
|
- name: Preinstall | reload NetworkManager # noqa 303
|
||||||
command: systemctl restart NetworkManager.service
|
command: systemctl restart NetworkManager.service
|
||||||
when: is_fedora_coreos
|
when: is_fedora_coreos
|
||||||
|
|
||||||
|
|
|
@ -158,7 +158,7 @@
|
||||||
when:
|
when:
|
||||||
- kube_network_plugin == 'calico'
|
- kube_network_plugin == 'calico'
|
||||||
|
|
||||||
- name: "Get current version of calico cluster version"
|
- name: "Get current version of calico cluster version" # noqa 306
|
||||||
shell: "{{ bin_dir }}/calicoctl.sh version | grep 'Cluster Version:' | awk '{ print $3}'"
|
shell: "{{ bin_dir }}/calicoctl.sh version | grep 'Cluster Version:' | awk '{ print $3}'"
|
||||||
register: calico_version_on_server
|
register: calico_version_on_server
|
||||||
run_once: yes
|
run_once: yes
|
||||||
|
|
|
@ -24,14 +24,14 @@
|
||||||
set_fact:
|
set_fact:
|
||||||
is_fedora_coreos: "{{ ostree.stat.exists and os_variant_coreos is not changed }}"
|
is_fedora_coreos: "{{ ostree.stat.exists and os_variant_coreos is not changed }}"
|
||||||
|
|
||||||
- name: check resolvconf
|
- name: check resolvconf # noqa 305
|
||||||
shell: which resolvconf
|
shell: which resolvconf
|
||||||
register: resolvconf
|
register: resolvconf
|
||||||
failed_when: false
|
failed_when: false
|
||||||
changed_when: false
|
changed_when: false
|
||||||
check_mode: no
|
check_mode: no
|
||||||
|
|
||||||
- name: check systemd-resolved
|
- name: check systemd-resolved # noqa 303
|
||||||
command: systemctl is-active systemd-resolved
|
command: systemctl is-active systemd-resolved
|
||||||
register: systemd_resolved_enabled
|
register: systemd_resolved_enabled
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Update package management cache (zypper) - SUSE
|
- name: Update package management cache (zypper) - SUSE # noqa 305
|
||||||
shell: zypper -n --gpg-auto-import-keys ref
|
shell: zypper -n --gpg-auto-import-keys ref
|
||||||
register: make_cache_output
|
register: make_cache_output
|
||||||
until: make_cache_output is succeeded
|
until: make_cache_output is succeeded
|
||||||
|
|
|
@ -20,12 +20,12 @@
|
||||||
changed_when: False
|
changed_when: False
|
||||||
register: fs_type
|
register: fs_type
|
||||||
|
|
||||||
- name: run growpart
|
- name: run growpart # noqa 503
|
||||||
command: growpart /dev/sda 1
|
command: growpart /dev/sda 1
|
||||||
when: growpart_needed.changed
|
when: growpart_needed.changed
|
||||||
environment:
|
environment:
|
||||||
LC_ALL: C
|
LC_ALL: C
|
||||||
|
|
||||||
- name: run xfs_growfs
|
- name: run xfs_growfs # noqa 503
|
||||||
command: xfs_growfs /dev/sda1
|
command: xfs_growfs /dev/sda1
|
||||||
when: growpart_needed.changed and 'XFS' in fs_type.stdout
|
when: growpart_needed.changed and 'XFS' in fs_type.stdout
|
||||||
|
|
|
@ -34,7 +34,7 @@
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
when: gen_tokens|default(false)
|
when: gen_tokens|default(false)
|
||||||
|
|
||||||
- name: Gen_tokens | Get list of tokens from first master
|
- name: Gen_tokens | Get list of tokens from first master # noqa 305
|
||||||
shell: "(find {{ kube_token_dir }} -maxdepth 1 -type f)"
|
shell: "(find {{ kube_token_dir }} -maxdepth 1 -type f)"
|
||||||
register: tokens_list
|
register: tokens_list
|
||||||
check_mode: no
|
check_mode: no
|
||||||
|
@ -42,7 +42,7 @@
|
||||||
run_once: true
|
run_once: true
|
||||||
when: sync_tokens|default(false)
|
when: sync_tokens|default(false)
|
||||||
|
|
||||||
- name: Gen_tokens | Gather tokens
|
- name: Gen_tokens | Gather tokens # noqa 306
|
||||||
shell: "tar cfz - {{ tokens_list.stdout_lines | join(' ') }} | base64 --wrap=0"
|
shell: "tar cfz - {{ tokens_list.stdout_lines | join(' ') }} | base64 --wrap=0"
|
||||||
args:
|
args:
|
||||||
warn: false
|
warn: false
|
||||||
|
@ -52,7 +52,7 @@
|
||||||
run_once: true
|
run_once: true
|
||||||
when: sync_tokens|default(false)
|
when: sync_tokens|default(false)
|
||||||
|
|
||||||
- name: Gen_tokens | Copy tokens on masters
|
- name: Gen_tokens | Copy tokens on masters # noqa 306
|
||||||
shell: "echo '{{ tokens_data.stdout|quote }}' | base64 -d | tar xz -C /"
|
shell: "echo '{{ tokens_data.stdout|quote }}' | base64 -d | tar xz -C /"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname in groups['kube-master']
|
- inventory_hostname in groups['kube-master']
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
- name: Calico-rr | Pre-upgrade tasks
|
- name: Calico-rr | Pre-upgrade tasks
|
||||||
include_tasks: pre.yml
|
include_tasks: pre.yml
|
||||||
|
|
||||||
- name: Calico-rr | Fetch current node object
|
- name: Calico-rr | Fetch current node object # noqa 301
|
||||||
command: "{{ bin_dir }}/calicoctl.sh get node {{ inventory_hostname }} -ojson"
|
command: "{{ bin_dir }}/calicoctl.sh get node {{ inventory_hostname }} -ojson"
|
||||||
register: calico_rr_node
|
register: calico_rr_node
|
||||||
until: calico_rr_node is succeeded
|
until: calico_rr_node is succeeded
|
||||||
|
@ -15,12 +15,12 @@
|
||||||
{{ calico_rr_node.stdout | from_json | combine({ 'spec': { 'bgp':
|
{{ calico_rr_node.stdout | from_json | combine({ 'spec': { 'bgp':
|
||||||
{ 'routeReflectorClusterID': cluster_id }}}, recursive=True) }}
|
{ 'routeReflectorClusterID': cluster_id }}}, recursive=True) }}
|
||||||
|
|
||||||
- name: Calico-rr | Configure route reflector
|
- name: Calico-rr | Configure route reflector # noqa 301 305
|
||||||
shell: "{{ bin_dir }}/calicoctl.sh replace -f-"
|
shell: "{{ bin_dir }}/calicoctl.sh replace -f-"
|
||||||
args:
|
args:
|
||||||
stdin: "{{ calico_rr_node_patched | to_json }}"
|
stdin: "{{ calico_rr_node_patched | to_json }}"
|
||||||
|
|
||||||
- name: Calico-rr | Set label for route reflector
|
- name: Calico-rr | Set label for route reflector # noqa 301
|
||||||
command: >-
|
command: >-
|
||||||
{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }}
|
{{ bin_dir }}/calicoctl.sh label node {{ inventory_hostname }}
|
||||||
'i-am-a-route-reflector=true' --overwrite
|
'i-am-a-route-reflector=true' --overwrite
|
||||||
|
|
|
@ -37,7 +37,7 @@
|
||||||
when:
|
when:
|
||||||
- "calico_vxlan_mode in ['Always', 'CrossSubnet']"
|
- "calico_vxlan_mode in ['Always', 'CrossSubnet']"
|
||||||
|
|
||||||
- name: "Get current version of calico cluster version"
|
- name: "Get current version of calico cluster version" # noqa 306
|
||||||
shell: "{{ bin_dir }}/calicoctl.sh version | grep 'Cluster Version:' | awk '{ print $3}'"
|
shell: "{{ bin_dir }}/calicoctl.sh version | grep 'Cluster Version:' | awk '{ print $3}'"
|
||||||
register: calico_version_on_server
|
register: calico_version_on_server
|
||||||
run_once: yes
|
run_once: yes
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
mode: 0755
|
mode: 0755
|
||||||
remote_src: yes
|
remote_src: yes
|
||||||
|
|
||||||
- name: Calico | Check if host has NetworkManager
|
- name: Calico | Check if host has NetworkManager # noqa 303
|
||||||
command: systemctl show NetworkManager
|
command: systemctl show NetworkManager
|
||||||
register: nm_check
|
register: nm_check
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
@ -84,7 +84,7 @@
|
||||||
run_once: true
|
run_once: true
|
||||||
when: calico_datastore == "etcd"
|
when: calico_datastore == "etcd"
|
||||||
|
|
||||||
- name: Calico | Check if calico network pool has already been configured
|
- name: Calico | Check if calico network pool has already been configured # noqa 306
|
||||||
shell: >
|
shell: >
|
||||||
{{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr | default(kube_pods_subnet) }}" | wc -l
|
{{ bin_dir }}/calicoctl.sh get ippool | grep -w "{{ calico_pool_cidr | default(kube_pods_subnet) }}" | wc -l
|
||||||
register: calico_conf
|
register: calico_conf
|
||||||
|
@ -131,7 +131,7 @@
|
||||||
loop_control:
|
loop_control:
|
||||||
label: "{{ item.item.file }}"
|
label: "{{ item.item.file }}"
|
||||||
|
|
||||||
- name: Calico | Configure calico network pool (version < v3.3.0)
|
- name: Calico | Configure calico network pool (version < v3.3.0) # noqa 306
|
||||||
shell: >
|
shell: >
|
||||||
echo "
|
echo "
|
||||||
{ "kind": "IPPool",
|
{ "kind": "IPPool",
|
||||||
|
@ -149,7 +149,7 @@
|
||||||
- 'calico_conf.stdout == "0"'
|
- 'calico_conf.stdout == "0"'
|
||||||
- calico_version is version("v3.3.0", "<")
|
- calico_version is version("v3.3.0", "<")
|
||||||
|
|
||||||
- name: Calico | Configure calico network pool (version >= v3.3.0)
|
- name: Calico | Configure calico network pool (version >= v3.3.0) # noqa 306
|
||||||
shell: >
|
shell: >
|
||||||
echo "
|
echo "
|
||||||
{ "kind": "IPPool",
|
{ "kind": "IPPool",
|
||||||
|
@ -176,7 +176,7 @@
|
||||||
- inventory_hostname in groups['k8s-cluster']
|
- inventory_hostname in groups['k8s-cluster']
|
||||||
run_once: yes
|
run_once: yes
|
||||||
|
|
||||||
- name: Calico | Set global as_num
|
- name: Calico | Set global as_num # noqa 306
|
||||||
shell: >
|
shell: >
|
||||||
echo '
|
echo '
|
||||||
{ "kind": "BGPConfiguration",
|
{ "kind": "BGPConfiguration",
|
||||||
|
@ -192,7 +192,7 @@
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
- name: Calico | Configure peering with router(s) at global scope
|
- name: Calico | Configure peering with router(s) at global scope # noqa 306
|
||||||
shell: >
|
shell: >
|
||||||
echo '{
|
echo '{
|
||||||
"apiVersion": "projectcalico.org/v3",
|
"apiVersion": "projectcalico.org/v3",
|
||||||
|
@ -214,7 +214,7 @@
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
- peer_with_router|default(false)
|
- peer_with_router|default(false)
|
||||||
|
|
||||||
- name: Calico | Configure peering with route reflectors at global scope
|
- name: Calico | Configure peering with route reflectors at global scope # noqa 306
|
||||||
shell: |
|
shell: |
|
||||||
echo '{
|
echo '{
|
||||||
"apiVersion": "projectcalico.org/v3",
|
"apiVersion": "projectcalico.org/v3",
|
||||||
|
@ -236,7 +236,7 @@
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
- peer_with_calico_rr|default(false)
|
- peer_with_calico_rr|default(false)
|
||||||
|
|
||||||
- name: Calico | Configure route reflectors to peer with each other
|
- name: Calico | Configure route reflectors to peer with each other # noqa 306
|
||||||
shell: >
|
shell: >
|
||||||
echo '{
|
echo '{
|
||||||
"apiVersion": "projectcalico.org/v3",
|
"apiVersion": "projectcalico.org/v3",
|
||||||
|
@ -309,7 +309,7 @@
|
||||||
- inventory_hostname not in groups['kube-master']
|
- inventory_hostname not in groups['kube-master']
|
||||||
- calico_datastore == "kdd"
|
- calico_datastore == "kdd"
|
||||||
|
|
||||||
- name: Calico | Configure node asNumber for per node peering
|
- name: Calico | Configure node asNumber for per node peering # noqa 306
|
||||||
shell: >
|
shell: >
|
||||||
echo '{
|
echo '{
|
||||||
"apiVersion": "projectcalico.org/v3",
|
"apiVersion": "projectcalico.org/v3",
|
||||||
|
@ -333,7 +333,7 @@
|
||||||
- local_as is defined
|
- local_as is defined
|
||||||
- groups['calico-rr'] | default([]) | length == 0
|
- groups['calico-rr'] | default([]) | length == 0
|
||||||
|
|
||||||
- name: Calico | Configure peering with router(s) at node scope
|
- name: Calico | Configure peering with router(s) at node scope # noqa 306
|
||||||
shell: >
|
shell: >
|
||||||
echo '{
|
echo '{
|
||||||
"apiVersion": "projectcalico.org/v3",
|
"apiVersion": "projectcalico.org/v3",
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Calico | Get kubelet hostname
|
- name: Calico | Get kubelet hostname # noqa 306
|
||||||
shell: >-
|
shell: >-
|
||||||
{{ bin_dir }}/kubectl get node -o custom-columns='NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=="InternalIP")].address'
|
{{ bin_dir }}/kubectl get node -o custom-columns='NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=="InternalIP")].address'
|
||||||
| egrep "{{ ansible_all_ipv4_addresses | join('$|') }}$" | cut -d" " -f1
|
| egrep "{{ ansible_all_ipv4_addresses | join('$|') }}$" | cut -d" " -f1
|
||||||
|
|
|
@ -8,11 +8,11 @@
|
||||||
command: ip link del dummy0
|
command: ip link del dummy0
|
||||||
when: dummy0.stat.exists
|
when: dummy0.stat.exists
|
||||||
|
|
||||||
- name: reset | get remaining routes set by bird
|
- name: reset | get remaining routes set by bird # noqa 301
|
||||||
command: ip route show proto bird
|
command: ip route show proto bird
|
||||||
register: bird_routes
|
register: bird_routes
|
||||||
|
|
||||||
- name: reset | remove remaining routes set by bird
|
- name: reset | remove remaining routes set by bird # noqa 301
|
||||||
command: "ip route del {{ bird_route }} proto bird"
|
command: "ip route del {{ bird_route }} proto bird"
|
||||||
with_items: "{{ bird_routes.stdout_lines }}"
|
with_items: "{{ bird_routes.stdout_lines }}"
|
||||||
loop_control:
|
loop_control:
|
||||||
|
|
|
@ -16,11 +16,11 @@
|
||||||
- "etcdv2"
|
- "etcdv2"
|
||||||
- "etcdv3"
|
- "etcdv3"
|
||||||
|
|
||||||
- name: "Tests data migration (dry-run)"
|
- name: "Tests data migration (dry-run)" # noqa 301 305
|
||||||
shell: "{{ bin_dir }}/calico-upgrade dry-run --output-dir=/tmp --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml"
|
shell: "{{ bin_dir }}/calico-upgrade dry-run --output-dir=/tmp --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml"
|
||||||
register: calico_upgrade_test_data
|
register: calico_upgrade_test_data
|
||||||
failed_when: '"Successfully" not in calico_upgrade_test_data.stdout'
|
failed_when: '"Successfully" not in calico_upgrade_test_data.stdout'
|
||||||
|
|
||||||
- name: "If test migration is success continue with calico data real migration"
|
- name: "If test migration is success continue with calico data real migration" # noqa 301 305
|
||||||
shell: "{{ bin_dir }}/calico-upgrade start --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml --output-dir=/tmp/calico_upgrade"
|
shell: "{{ bin_dir }}/calico-upgrade start --no-prompts --apiconfigv1 /etc/calico/etcdv2.yml --apiconfigv3 /etc/calico/etcdv3.yml --output-dir=/tmp/calico_upgrade"
|
||||||
register: calico_upgrade_migration_data
|
register: calico_upgrade_migration_data
|
||||||
|
|
|
@ -21,7 +21,7 @@
|
||||||
- contiv_kubectl.stat.exists
|
- contiv_kubectl.stat.exists
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
- name: reset | Copy contiv temporary cleanup script
|
- name: reset | Copy contiv temporary cleanup script # noqa 404
|
||||||
copy:
|
copy:
|
||||||
src: ../files/contiv-cleanup.sh # Not in role_path so we must trick...
|
src: ../files/contiv-cleanup.sh # Not in role_path so we must trick...
|
||||||
dest: /opt/cni/bin/cleanup
|
dest: /opt/cni/bin/cleanup
|
||||||
|
@ -31,7 +31,7 @@
|
||||||
when:
|
when:
|
||||||
- contiv_kubectl.stat.exists
|
- contiv_kubectl.stat.exists
|
||||||
|
|
||||||
- name: reset | Lay down contiv cleanup template
|
- name: reset | Lay down contiv cleanup template # noqa 404
|
||||||
template:
|
template:
|
||||||
src: ../templates/contiv-cleanup.yml.j2 # Not in role_path so we must trick...
|
src: ../templates/contiv-cleanup.yml.j2 # Not in role_path so we must trick...
|
||||||
dest: "{{ kube_config_dir }}/contiv-cleanup.yml" # kube_config_dir is used here as contiv_config_dir is not necessarily set at reset
|
dest: "{{ kube_config_dir }}/contiv-cleanup.yml" # kube_config_dir is used here as contiv_config_dir is not necessarily set at reset
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Kube-OVN | Label ovn-db node
|
- name: Kube-OVN | Label ovn-db node # noqa 305
|
||||||
shell: >-
|
shell: >-
|
||||||
{{ bin_dir }}/kubectl label --overwrite node {{ groups['kube-master'] | first }} kube-ovn/role=master
|
{{ bin_dir }}/kubectl label --overwrite node {{ groups['kube-master'] | first }} kube-ovn/role=master
|
||||||
when:
|
when:
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Macvlan | Retrieve Pod Cidr
|
- name: Macvlan | Retrieve Pod Cidr # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl get nodes {{ kube_override_hostname | default(inventory_hostname) }} -o jsonpath='{.spec.podCIDR}'"
|
command: "{{ bin_dir }}/kubectl get nodes {{ kube_override_hostname | default(inventory_hostname) }} -o jsonpath='{.spec.podCIDR}'"
|
||||||
register: node_pod_cidr_cmd
|
register: node_pod_cidr_cmd
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
|
@ -8,7 +8,7 @@
|
||||||
set_fact:
|
set_fact:
|
||||||
node_pod_cidr={{ node_pod_cidr_cmd.stdout }}
|
node_pod_cidr={{ node_pod_cidr_cmd.stdout }}
|
||||||
|
|
||||||
- name: Macvlan | Retrieve default gateway network interface
|
- name: Macvlan | Retrieve default gateway network interface # noqa 301
|
||||||
become: false
|
become: false
|
||||||
raw: ip -4 route list 0/0 | sed 's/.*dev \([[:alnum:]]*\).*/\1/'
|
raw: ip -4 route list 0/0 | sed 's/.*dev \([[:alnum:]]*\).*/\1/'
|
||||||
register: node_default_gateway_interface_cmd
|
register: node_default_gateway_interface_cmd
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Get etcd endpoint health
|
- name: Get etcd endpoint health # noqa 305
|
||||||
shell: "{{ bin_dir }}/etcdctl endpoint health"
|
shell: "{{ bin_dir }}/etcdctl endpoint health"
|
||||||
register: etcd_endpoint_health
|
register: etcd_endpoint_health
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
|
@ -57,7 +57,7 @@
|
||||||
- groups['broken_etcd']
|
- groups['broken_etcd']
|
||||||
- "item.rc != 0 and not 'No such file or directory' in item.stderr"
|
- "item.rc != 0 and not 'No such file or directory' in item.stderr"
|
||||||
|
|
||||||
- name: Get etcd cluster members
|
- name: Get etcd cluster members # noqa 305
|
||||||
shell: "{{ bin_dir }}/etcdctl member list"
|
shell: "{{ bin_dir }}/etcdctl member list"
|
||||||
register: member_list
|
register: member_list
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
@ -73,7 +73,7 @@
|
||||||
- not healthy
|
- not healthy
|
||||||
- has_quorum
|
- has_quorum
|
||||||
|
|
||||||
- name: Remove broken cluster members
|
- name: Remove broken cluster members # noqa 305
|
||||||
shell: "{{ bin_dir }}/etcdctl member remove {{ item[1].replace(' ','').split(',')[0] }}"
|
shell: "{{ bin_dir }}/etcdctl member remove {{ item[1].replace(' ','').split(',')[0] }}"
|
||||||
environment:
|
environment:
|
||||||
ETCDCTL_API: 3
|
ETCDCTL_API: 3
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Save etcd snapshot
|
- name: Save etcd snapshot # noqa 305
|
||||||
shell: "{{ bin_dir }}/etcdctl snapshot save /tmp/snapshot.db"
|
shell: "{{ bin_dir }}/etcdctl snapshot save /tmp/snapshot.db"
|
||||||
environment:
|
environment:
|
||||||
- ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
- ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||||
|
@ -25,7 +25,7 @@
|
||||||
path: "{{ etcd_data_dir }}"
|
path: "{{ etcd_data_dir }}"
|
||||||
state: absent
|
state: absent
|
||||||
|
|
||||||
- name: Restore etcd snapshot
|
- name: Restore etcd snapshot # noqa 301 305
|
||||||
shell: "{{ bin_dir }}/etcdctl snapshot restore /tmp/snapshot.db --name {{ etcd_member_name }} --initial-cluster {{ etcd_member_name }}={{ etcd_peer_url }} --initial-cluster-token k8s_etcd --initial-advertise-peer-urls {{ etcd_peer_url }} --data-dir {{ etcd_data_dir }}"
|
shell: "{{ bin_dir }}/etcdctl snapshot restore /tmp/snapshot.db --name {{ etcd_member_name }} --initial-cluster {{ etcd_member_name }}={{ etcd_peer_url }} --initial-cluster-token k8s_etcd --initial-advertise-peer-urls {{ etcd_peer_url }} --data-dir {{ etcd_data_dir }}"
|
||||||
environment:
|
environment:
|
||||||
- ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
- ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Wait for apiserver
|
- name: Wait for apiserver # noqa 305
|
||||||
shell: "{{ bin_dir }}/kubectl get nodes"
|
shell: "{{ bin_dir }}/kubectl get nodes"
|
||||||
environment:
|
environment:
|
||||||
- KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
|
- KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
|
||||||
|
@ -10,7 +10,7 @@
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when: groups['broken_kube-master']
|
when: groups['broken_kube-master']
|
||||||
|
|
||||||
- name: Delete broken kube-master nodes from cluster
|
- name: Delete broken kube-master nodes from cluster # noqa 305
|
||||||
shell: "{{ bin_dir }}/kubectl delete node {{ item }}"
|
shell: "{{ bin_dir }}/kubectl delete node {{ item }}"
|
||||||
environment:
|
environment:
|
||||||
- KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
|
- KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Delete node
|
- name: Delete node # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl delete node {{ kube_override_hostname|default(inventory_hostname) }}"
|
command: "{{ bin_dir }}/kubectl delete node {{ kube_override_hostname|default(inventory_hostname) }}"
|
||||||
delegate_to: "{{ groups['kube-master']|first }}"
|
delegate_to: "{{ groups['kube-master']|first }}"
|
||||||
ignore_errors: yes
|
ignore_errors: yes
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: cordon-node | Mark all nodes as unschedulable before drain
|
- name: cordon-node | Mark all nodes as unschedulable before drain # noqa 301
|
||||||
command: >-
|
command: >-
|
||||||
{{ bin_dir }}/kubectl cordon {{ hostvars[item]['kube_override_hostname']|default(item) }}
|
{{ bin_dir }}/kubectl cordon {{ hostvars[item]['kube_override_hostname']|default(item) }}
|
||||||
with_items:
|
with_items:
|
||||||
|
@ -9,7 +9,7 @@
|
||||||
run_once: true
|
run_once: true
|
||||||
ignore_errors: yes
|
ignore_errors: yes
|
||||||
|
|
||||||
- name: remove-node | Drain node except daemonsets resource
|
- name: remove-node | Drain node except daemonsets resource # noqa 301
|
||||||
command: >-
|
command: >-
|
||||||
{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf drain
|
{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf drain
|
||||||
--force
|
--force
|
||||||
|
|
|
@ -34,7 +34,7 @@
|
||||||
delegate_to: "{{ groups['etcd']|first }}"
|
delegate_to: "{{ groups['etcd']|first }}"
|
||||||
when: inventory_hostname in groups['etcd']
|
when: inventory_hostname in groups['etcd']
|
||||||
|
|
||||||
- name: Remove etcd member from cluster
|
- name: Remove etcd member from cluster # noqa 305
|
||||||
shell: "{{ bin_dir }}/etcdctl member remove {{ etcd_member_id.stdout }}"
|
shell: "{{ bin_dir }}/etcdctl member remove {{ etcd_member_id.stdout }}"
|
||||||
register: etcd_member_in_cluster
|
register: etcd_member_in_cluster
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
|
@ -41,12 +41,12 @@
|
||||||
tags:
|
tags:
|
||||||
- docker
|
- docker
|
||||||
|
|
||||||
- name: reset | systemctl daemon-reload
|
- name: reset | systemctl daemon-reload # noqa 503
|
||||||
systemd:
|
systemd:
|
||||||
daemon_reload: true
|
daemon_reload: true
|
||||||
when: services_removed.changed or docker_dropins_removed.changed
|
when: services_removed.changed or docker_dropins_removed.changed
|
||||||
|
|
||||||
- name: reset | remove all containers
|
- name: reset | remove all containers # noqa 306
|
||||||
shell: "{{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv"
|
shell: "{{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv"
|
||||||
register: remove_all_containers
|
register: remove_all_containers
|
||||||
retries: 4
|
retries: 4
|
||||||
|
@ -56,7 +56,7 @@
|
||||||
tags:
|
tags:
|
||||||
- docker
|
- docker
|
||||||
|
|
||||||
- name: reset | restart docker if needed
|
- name: reset | restart docker if needed # noqa 503
|
||||||
service:
|
service:
|
||||||
name: docker
|
name: docker
|
||||||
state: restarted
|
state: restarted
|
||||||
|
@ -64,7 +64,7 @@
|
||||||
tags:
|
tags:
|
||||||
- docker
|
- docker
|
||||||
|
|
||||||
- name: reset | stop all cri containers
|
- name: reset | stop all cri containers # noqa 306
|
||||||
shell: "crictl ps -aq | xargs -r crictl -t 60s stop"
|
shell: "crictl ps -aq | xargs -r crictl -t 60s stop"
|
||||||
register: remove_all_cri_containers
|
register: remove_all_cri_containers
|
||||||
retries: 5
|
retries: 5
|
||||||
|
@ -75,7 +75,7 @@
|
||||||
- containerd
|
- containerd
|
||||||
when: container_manager in ["crio", "containerd"]
|
when: container_manager in ["crio", "containerd"]
|
||||||
|
|
||||||
- name: reset | remove all cri containers
|
- name: reset | remove all cri containers # noqa 306
|
||||||
shell: "crictl ps -aq | xargs -r crictl -t 60s rm"
|
shell: "crictl ps -aq | xargs -r crictl -t 60s rm"
|
||||||
register: remove_all_cri_containers
|
register: remove_all_cri_containers
|
||||||
retries: 5
|
retries: 5
|
||||||
|
@ -86,7 +86,7 @@
|
||||||
- containerd
|
- containerd
|
||||||
when: container_manager in ["crio", "containerd"] and deploy_container_engine|default(true)
|
when: container_manager in ["crio", "containerd"] and deploy_container_engine|default(true)
|
||||||
|
|
||||||
- name: reset | stop all cri pods
|
- name: reset | stop all cri pods # noqa 306
|
||||||
shell: "crictl pods -q | xargs -r crictl -t 60s stopp"
|
shell: "crictl pods -q | xargs -r crictl -t 60s stopp"
|
||||||
register: remove_all_cri_containers
|
register: remove_all_cri_containers
|
||||||
retries: 5
|
retries: 5
|
||||||
|
@ -97,7 +97,7 @@
|
||||||
- containerd
|
- containerd
|
||||||
when: container_manager in ["crio", "containerd"]
|
when: container_manager in ["crio", "containerd"]
|
||||||
|
|
||||||
- name: reset | remove all cri pods
|
- name: reset | remove all cri pods # noqa 306
|
||||||
shell: "crictl pods -q | xargs -r crictl -t 60s rmp"
|
shell: "crictl pods -q | xargs -r crictl -t 60s rmp"
|
||||||
register: remove_all_cri_containers
|
register: remove_all_cri_containers
|
||||||
retries: 5
|
retries: 5
|
||||||
|
@ -130,7 +130,7 @@
|
||||||
tags:
|
tags:
|
||||||
- services
|
- services
|
||||||
|
|
||||||
- name: reset | gather mounted kubelet dirs
|
- name: reset | gather mounted kubelet dirs # noqa 306 301
|
||||||
shell: mount | grep /var/lib/kubelet/ | awk '{print $3}' | tac
|
shell: mount | grep /var/lib/kubelet/ | awk '{print $3}' | tac
|
||||||
args:
|
args:
|
||||||
warn: false
|
warn: false
|
||||||
|
@ -139,7 +139,7 @@
|
||||||
tags:
|
tags:
|
||||||
- mounts
|
- mounts
|
||||||
|
|
||||||
- name: reset | unmount kubelet dirs
|
- name: reset | unmount kubelet dirs # noqa 301
|
||||||
command: umount -f {{ item }}
|
command: umount -f {{ item }}
|
||||||
with_items: "{{ mounted_dirs.stdout_lines }}"
|
with_items: "{{ mounted_dirs.stdout_lines }}"
|
||||||
register: umount_dir
|
register: umount_dir
|
||||||
|
@ -161,7 +161,7 @@
|
||||||
tags:
|
tags:
|
||||||
- iptables
|
- iptables
|
||||||
|
|
||||||
- name: Clear IPVS virtual server table
|
- name: Clear IPVS virtual server table # noqa 305
|
||||||
shell: "ipvsadm -C"
|
shell: "ipvsadm -C"
|
||||||
when:
|
when:
|
||||||
- kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s-cluster']
|
- kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s-cluster']
|
||||||
|
|
|
@ -112,7 +112,7 @@
|
||||||
{%- endfor %}
|
{%- endfor %}
|
||||||
when: "'etcd' in groups"
|
when: "'etcd' in groups"
|
||||||
|
|
||||||
- name: Storing commands output
|
- name: Storing commands output # noqa 306
|
||||||
shell: "{{ item.cmd }} 2>&1 | tee {{ item.name }}"
|
shell: "{{ item.cmd }} 2>&1 | tee {{ item.name }}"
|
||||||
failed_when: false
|
failed_when: false
|
||||||
with_items: "{{ commands }}"
|
with_items: "{{ commands }}"
|
||||||
|
|
|
@ -34,7 +34,7 @@
|
||||||
when:
|
when:
|
||||||
- item.value.converted|bool
|
- item.value.converted|bool
|
||||||
|
|
||||||
- name: Resize images
|
- name: Resize images # noqa 301
|
||||||
command: qemu-img resize {{ images_dir }}/{{ item.key }}.qcow2 +8G
|
command: qemu-img resize {{ images_dir }}/{{ item.key }}.qcow2 +8G
|
||||||
with_dict:
|
with_dict:
|
||||||
- "{{ images }}"
|
- "{{ images }}"
|
||||||
|
@ -45,15 +45,15 @@
|
||||||
src: Dockerfile
|
src: Dockerfile
|
||||||
dest: "{{ images_dir }}/Dockerfile"
|
dest: "{{ images_dir }}/Dockerfile"
|
||||||
|
|
||||||
- name: Create docker images for each OS
|
- name: Create docker images for each OS # noqa 301
|
||||||
command: docker build -t {{ registry }}/vm-{{ item.key }} --build-arg cloud_image="{{ item.key }}.qcow2" {{ images_dir }}
|
command: docker build -t {{ registry }}/vm-{{ item.key }} --build-arg cloud_image="{{ item.key }}.qcow2" {{ images_dir }}
|
||||||
with_dict:
|
with_dict:
|
||||||
- "{{ images }}"
|
- "{{ images }}"
|
||||||
|
|
||||||
- name: docker login
|
- name: docker login # noqa 301
|
||||||
command: docker login -u="{{ docker_user }}" -p="{{ docker_password }}" "{{ docker_host }}"
|
command: docker login -u="{{ docker_user }}" -p="{{ docker_password }}" "{{ docker_host }}"
|
||||||
|
|
||||||
- name: docker push image
|
- name: docker push image # noqa 301
|
||||||
command: docker push {{ registry }}/vm-{{ item.key }}:latest
|
command: docker push {{ registry }}/vm-{{ item.key }}:latest
|
||||||
with_dict:
|
with_dict:
|
||||||
- "{{ images }}"
|
- "{{ images }}"
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
instance_tags: "{{ aws.tags }}"
|
instance_tags: "{{ aws.tags }}"
|
||||||
register: ec2
|
register: ec2
|
||||||
|
|
||||||
- name: Template the inventory
|
- name: Template the inventory # noqa 404
|
||||||
template:
|
template:
|
||||||
src: ../templates/inventory-aws.j2
|
src: ../templates/inventory-aws.j2
|
||||||
dest: "{{ inventory_path }}"
|
dest: "{{ inventory_path }}"
|
||||||
|
|
|
@ -86,7 +86,7 @@
|
||||||
msg: "{{ droplets }}, {{ inventory_path }}"
|
msg: "{{ droplets }}, {{ inventory_path }}"
|
||||||
when: state == 'present'
|
when: state == 'present'
|
||||||
|
|
||||||
- name: Template the inventory
|
- name: Template the inventory # noqa 404
|
||||||
template:
|
template:
|
||||||
src: ../templates/inventory-do.j2
|
src: ../templates/inventory-do.j2
|
||||||
dest: "{{ inventory_path }}"
|
dest: "{{ inventory_path }}"
|
||||||
|
|
|
@ -49,7 +49,7 @@
|
||||||
add_host: hostname={{ item.public_ip }} groupname="waitfor_hosts"
|
add_host: hostname={{ item.public_ip }} groupname="waitfor_hosts"
|
||||||
with_items: '{{ gce.instance_data }}'
|
with_items: '{{ gce.instance_data }}'
|
||||||
|
|
||||||
- name: Template the inventory
|
- name: Template the inventory # noqa 404
|
||||||
template:
|
template:
|
||||||
src: ../templates/inventory-gce.j2
|
src: ../templates/inventory-gce.j2
|
||||||
dest: "{{ inventory_path }}"
|
dest: "{{ inventory_path }}"
|
||||||
|
@ -60,7 +60,7 @@
|
||||||
state: directory
|
state: directory
|
||||||
when: mode in ['scale', 'separate-scale', 'ha-scale']
|
when: mode in ['scale', 'separate-scale', 'ha-scale']
|
||||||
|
|
||||||
- name: Template fake hosts group vars
|
- name: Template fake hosts group vars # noqa 404
|
||||||
template:
|
template:
|
||||||
src: ../templates/fake_hosts.yml.j2
|
src: ../templates/fake_hosts.yml.j2
|
||||||
dest: "{{ inventory_path|dirname }}/group_vars/fake_hosts.yml"
|
dest: "{{ inventory_path|dirname }}/group_vars/fake_hosts.yml"
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
loop_control:
|
loop_control:
|
||||||
index_var: vm_id
|
index_var: vm_id
|
||||||
|
|
||||||
- name: Wait for vms to have ipaddress assigned
|
- name: Wait for vms to have ipaddress assigned # noqa 301 306
|
||||||
shell: "kubectl get vmis -n {{ test_name }} instance-{{ vm_id }} -o json | jq '.status.interfaces[].ipAddress' | tr -d '\"'"
|
shell: "kubectl get vmis -n {{ test_name }} instance-{{ vm_id }} -o json | jq '.status.interfaces[].ipAddress' | tr -d '\"'"
|
||||||
register: vm_ips
|
register: vm_ips
|
||||||
loop: "{{ range(1, vm_count|int + 1, 1) | list }}"
|
loop: "{{ range(1, vm_count|int + 1, 1) | list }}"
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
state: absent
|
state: absent
|
||||||
name: "{{ test_name }}"
|
name: "{{ test_name }}"
|
||||||
|
|
||||||
- name: Wait for namespace {{ test_name }} to be fully deleted
|
- name: Wait for namespace {{ test_name }} to be fully deleted # noqa 305
|
||||||
shell: kubectl get ns {{ test_name }}
|
shell: kubectl get ns {{ test_name }}
|
||||||
register: delete_namespace
|
register: delete_namespace
|
||||||
failed_when:
|
failed_when:
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
expire_days: 2
|
expire_days: 2
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- name: Generate uniq bucket name prefix
|
- name: Generate uniq bucket name prefix # noqa 301
|
||||||
raw: date +%Y%m%d
|
raw: date +%Y%m%d
|
||||||
register: out
|
register: out
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@
|
||||||
no_log: True
|
no_log: True
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
|
||||||
- name: Apply the lifecycle rules
|
- name: Apply the lifecycle rules # noqa 301
|
||||||
command: "{{ dir }}/google-cloud-sdk/bin/gsutil lifecycle set {{ dir }}/gcs_life.json gs://{{ test_name }}"
|
command: "{{ dir }}/google-cloud-sdk/bin/gsutil lifecycle set {{ dir }}/gcs_life.json gs://{{ test_name }}"
|
||||||
environment:
|
environment:
|
||||||
BOTO_CONFIG: "{{ dir }}/.boto"
|
BOTO_CONFIG: "{{ dir }}/.boto"
|
||||||
|
|
|
@ -15,7 +15,7 @@
|
||||||
- import_role:
|
- import_role:
|
||||||
name: cluster-dump
|
name: cluster-dump
|
||||||
|
|
||||||
- name: Check kubectl output
|
- name: Check kubectl output # noqa 301 305
|
||||||
shell: "{{ bin_dir }}/kubectl get nodes"
|
shell: "{{ bin_dir }}/kubectl get nodes"
|
||||||
register: get_nodes
|
register: get_nodes
|
||||||
no_log: true
|
no_log: true
|
||||||
|
@ -23,7 +23,7 @@
|
||||||
- debug:
|
- debug:
|
||||||
msg: "{{ get_nodes.stdout.split('\n') }}"
|
msg: "{{ get_nodes.stdout.split('\n') }}"
|
||||||
|
|
||||||
- name: Check that all nodes are running and ready
|
- name: Check that all nodes are running and ready # noqa 301 305
|
||||||
shell: "{{ bin_dir }}/kubectl get nodes --no-headers -o yaml"
|
shell: "{{ bin_dir }}/kubectl get nodes --no-headers -o yaml"
|
||||||
register: get_nodes_yaml
|
register: get_nodes_yaml
|
||||||
until:
|
until:
|
||||||
|
|
|
@ -15,7 +15,7 @@
|
||||||
- import_role:
|
- import_role:
|
||||||
name: cluster-dump
|
name: cluster-dump
|
||||||
|
|
||||||
- name: Check kubectl output
|
- name: Check kubectl output # noqa 301 305
|
||||||
shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
|
shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
|
||||||
register: get_pods
|
register: get_pods
|
||||||
no_log: true
|
no_log: true
|
||||||
|
@ -23,7 +23,7 @@
|
||||||
- debug:
|
- debug:
|
||||||
msg: "{{ get_pods.stdout.split('\n') }}"
|
msg: "{{ get_pods.stdout.split('\n') }}"
|
||||||
|
|
||||||
- name: Check that all pods are running and ready
|
- name: Check that all pods are running and ready # noqa 301 305
|
||||||
shell: "{{ bin_dir }}/kubectl get pods --all-namespaces --no-headers -o yaml"
|
shell: "{{ bin_dir }}/kubectl get pods --all-namespaces --no-headers -o yaml"
|
||||||
register: run_pods_log
|
register: run_pods_log
|
||||||
until:
|
until:
|
||||||
|
@ -36,7 +36,7 @@
|
||||||
failed_when: false
|
failed_when: false
|
||||||
no_log: true
|
no_log: true
|
||||||
|
|
||||||
- name: Check kubectl output
|
- name: Check kubectl output # noqa 301 305
|
||||||
shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
|
shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
|
||||||
register: get_pods
|
register: get_pods
|
||||||
no_log: true
|
no_log: true
|
||||||
|
|
|
@ -15,10 +15,10 @@
|
||||||
bin_dir: "/usr/local/bin"
|
bin_dir: "/usr/local/bin"
|
||||||
when: not ansible_os_family in ["CoreOS", "Coreos", "Container Linux by CoreOS", "Flatcar", "Flatcar Container Linux by Kinvolk"]
|
when: not ansible_os_family in ["CoreOS", "Coreos", "Container Linux by CoreOS", "Flatcar", "Flatcar Container Linux by Kinvolk"]
|
||||||
|
|
||||||
- name: Create test namespace
|
- name: Create test namespace # noqa 301 305
|
||||||
shell: "{{ bin_dir }}/kubectl create namespace test"
|
shell: "{{ bin_dir }}/kubectl create namespace test"
|
||||||
|
|
||||||
- name: Run 2 busybox pods in test ns
|
- name: Run 2 busybox pods in test ns # noqa 301 305
|
||||||
shell: "{{ bin_dir }}/kubectl run {{ item }} --image={{ test_image_repo }}:{{ test_image_tag }} --namespace test --command -- tail -f /dev/null"
|
shell: "{{ bin_dir }}/kubectl run {{ item }} --image={{ test_image_repo }}:{{ test_image_tag }} --namespace test --command -- tail -f /dev/null"
|
||||||
loop:
|
loop:
|
||||||
- busybox1
|
- busybox1
|
||||||
|
@ -27,7 +27,7 @@
|
||||||
- import_role:
|
- import_role:
|
||||||
name: cluster-dump
|
name: cluster-dump
|
||||||
|
|
||||||
- name: Check that all pods are running and ready
|
- name: Check that all pods are running and ready # noqa 301 305
|
||||||
shell: "{{ bin_dir }}/kubectl get pods --namespace test --no-headers -o yaml"
|
shell: "{{ bin_dir }}/kubectl get pods --namespace test --no-headers -o yaml"
|
||||||
register: run_pods_log
|
register: run_pods_log
|
||||||
until:
|
until:
|
||||||
|
@ -40,7 +40,7 @@
|
||||||
failed_when: false
|
failed_when: false
|
||||||
no_log: true
|
no_log: true
|
||||||
|
|
||||||
- name: Get pod names
|
- name: Get pod names # noqa 301 305
|
||||||
shell: "{{ bin_dir }}/kubectl get pods -n test -o json"
|
shell: "{{ bin_dir }}/kubectl get pods -n test -o json"
|
||||||
register: pods
|
register: pods
|
||||||
no_log: true
|
no_log: true
|
||||||
|
@ -49,19 +49,19 @@
|
||||||
msg: "{{ pods.stdout.split('\n') }}"
|
msg: "{{ pods.stdout.split('\n') }}"
|
||||||
failed_when: not run_pods_log is success
|
failed_when: not run_pods_log is success
|
||||||
|
|
||||||
- name: Get hostnet pods
|
- name: Get hostnet pods # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl get pods -n test -o
|
command: "{{ bin_dir }}/kubectl get pods -n test -o
|
||||||
jsonpath='{range .items[?(.spec.hostNetwork)]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
|
jsonpath='{range .items[?(.spec.hostNetwork)]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
|
||||||
register: hostnet_pods
|
register: hostnet_pods
|
||||||
no_log: true
|
no_log: true
|
||||||
|
|
||||||
- name: Get running pods
|
- name: Get running pods # noqa 301
|
||||||
command: "{{ bin_dir }}/kubectl get pods -n test -o
|
command: "{{ bin_dir }}/kubectl get pods -n test -o
|
||||||
jsonpath='{range .items[?(.status.phase==\"Running\")]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
|
jsonpath='{range .items[?(.status.phase==\"Running\")]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
|
||||||
register: running_pods
|
register: running_pods
|
||||||
no_log: true
|
no_log: true
|
||||||
|
|
||||||
- name: Check kubectl output
|
- name: Check kubectl output # noqa 301 305
|
||||||
shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
|
shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
|
||||||
register: get_pods
|
register: get_pods
|
||||||
no_log: true
|
no_log: true
|
||||||
|
@ -89,7 +89,7 @@
|
||||||
- item in pods_running
|
- item in pods_running
|
||||||
with_items: "{{ pod_ips }}"
|
with_items: "{{ pod_ips }}"
|
||||||
|
|
||||||
- name: Ping between pods is working
|
- name: Ping between pods is working # noqa 305
|
||||||
shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
|
shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
|
||||||
when:
|
when:
|
||||||
- not item[0] in pods_hostnet
|
- not item[0] in pods_hostnet
|
||||||
|
@ -98,7 +98,7 @@
|
||||||
- "{{ pod_names }}"
|
- "{{ pod_names }}"
|
||||||
- "{{ pod_ips }}"
|
- "{{ pod_ips }}"
|
||||||
|
|
||||||
- name: Ping between hostnet pods is working
|
- name: Ping between hostnet pods is working # noqa 305
|
||||||
shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
|
shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
|
||||||
when:
|
when:
|
||||||
- item[0] in pods_hostnet
|
- item[0] in pods_hostnet
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
- hosts: kube-node
|
- hosts: kube-node
|
||||||
tasks:
|
tasks:
|
||||||
- name: Test tunl0 routes
|
- name: Test tunl0 routes # noqa 306
|
||||||
shell: "! /sbin/ip ro | grep '/26 via' | grep -v tunl0"
|
shell: "! /sbin/ip ro | grep '/26 via' | grep -v tunl0"
|
||||||
when:
|
when:
|
||||||
- (ipip|default(true) or cloud_provider is defined)
|
- (ipip|default(true) or cloud_provider is defined)
|
||||||
|
@ -14,7 +14,7 @@
|
||||||
netchecker_port: 31081
|
netchecker_port: 31081
|
||||||
|
|
||||||
tasks:
|
tasks:
|
||||||
- name: Flannel | Disable tx and rx offloading on VXLAN interfaces (see https://github.com/coreos/flannel/pull/1282)
|
- name: Flannel | Disable tx and rx offloading on VXLAN interfaces (see https://github.com/coreos/flannel/pull/1282) # noqa 305
|
||||||
shell: "ethtool --offload flannel.1 rx off tx off"
|
shell: "ethtool --offload flannel.1 rx off tx off"
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
when:
|
when:
|
||||||
|
@ -33,7 +33,7 @@
|
||||||
- import_role:
|
- import_role:
|
||||||
name: cluster-dump
|
name: cluster-dump
|
||||||
|
|
||||||
- name: Wait for netchecker server
|
- name: Wait for netchecker server # noqa 306
|
||||||
shell: "{{ bin_dir }}/kubectl get pods -o wide --namespace {{ netcheck_namespace }} | grep ^netchecker-server"
|
shell: "{{ bin_dir }}/kubectl get pods -o wide --namespace {{ netcheck_namespace }} | grep ^netchecker-server"
|
||||||
register: ncs_pod
|
register: ncs_pod
|
||||||
until: ncs_pod.stdout.find('Running') != -1
|
until: ncs_pod.stdout.find('Running') != -1
|
||||||
|
@ -41,7 +41,7 @@
|
||||||
delay: 10
|
delay: 10
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
- name: Wait for netchecker agents
|
- name: Wait for netchecker agents # noqa 306
|
||||||
shell: "{{ bin_dir }}/kubectl get pods -o wide --namespace {{ netcheck_namespace }} | grep '^netchecker-agent-.*Running'"
|
shell: "{{ bin_dir }}/kubectl get pods -o wide --namespace {{ netcheck_namespace }} | grep '^netchecker-agent-.*Running'"
|
||||||
register: nca_pod
|
register: nca_pod
|
||||||
until: nca_pod.stdout_lines|length >= groups['k8s-cluster']|intersect(ansible_play_hosts)|length * 2
|
until: nca_pod.stdout_lines|length >= groups['k8s-cluster']|intersect(ansible_play_hosts)|length * 2
|
||||||
|
@ -214,7 +214,7 @@
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
- kube_network_plugin_multus|default(false)|bool
|
- kube_network_plugin_multus|default(false)|bool
|
||||||
|
|
||||||
- name: Check secondary macvlan interface
|
- name: Check secondary macvlan interface # noqa 305
|
||||||
shell: "{{ bin_dir }}/kubectl exec samplepod -- ip addr show dev net1"
|
shell: "{{ bin_dir }}/kubectl exec samplepod -- ip addr show dev net1"
|
||||||
register: output
|
register: output
|
||||||
until: output.rc == 0
|
until: output.rc == 0
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: Generate dump folder
|
- name: Generate dump folder # noqa 305
|
||||||
shell: "{{ bin_dir }}/kubectl cluster-info dump --all-namespaces --output-directory /tmp/cluster-dump"
|
shell: "{{ bin_dir }}/kubectl cluster-info dump --all-namespaces --output-directory /tmp/cluster-dump"
|
||||||
no_log: true
|
no_log: true
|
||||||
when: inventory_hostname in groups['kube-master']
|
when: inventory_hostname in groups['kube-master']
|
||||||
|
|
Loading…
Reference in a new issue