c12s-kubespray/tests/testcases/040_check-network-adv.yml
2021-11-19 07:58:51 -08:00

223 lines
7.3 KiB
YAML

---
- hosts: kube_node
tasks:
- name: Test tunl0 routes
shell: "set -o pipefail && ! /sbin/ip ro | grep '/26 via' | grep -v tunl0"
args:
executable: /bin/bash
when:
- (ipip|default(true) or cloud_provider is defined)
- kube_network_plugin|default('calico') == 'calico'
- hosts: k8s_cluster
vars:
agent_report_interval: 10
netcheck_namespace: default
netchecker_port: 31081
tasks:
- name: Force binaries directory for Container Linux by CoreOS and Flatcar
set_fact:
bin_dir: "/opt/bin"
when: ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
- name: Force binaries directory on other hosts
set_fact:
bin_dir: "/usr/local/bin"
when: not ansible_os_family in ["Flatcar", "Flatcar Container Linux by Kinvolk"]
- import_role: # noqa unnamed-task
name: cluster-dump
- name: Wait for netchecker server
shell: "set -o pipefail && {{ bin_dir }}/kubectl get pods -o wide --namespace {{ netcheck_namespace }} | grep ^netchecker-server"
args:
executable: /bin/bash
register: ncs_pod
until: ncs_pod.stdout.find('Running') != -1
retries: 3
delay: 10
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Wait for netchecker agents
shell: "set -o pipefail && {{ bin_dir }}/kubectl get pods -o wide --namespace {{ netcheck_namespace }} | grep '^netchecker-agent-.*Running'"
args:
executable: /bin/bash
register: nca_pod
until: nca_pod.stdout_lines|length >= groups['k8s_cluster']|intersect(ansible_play_hosts)|length * 2
retries: 3
delay: 10
failed_when: false
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Get netchecker pods
command: "{{ bin_dir }}/kubectl -n {{ netcheck_namespace }} describe pod -l app={{ item }}"
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
no_log: false
with_items:
- netchecker-agent
- netchecker-agent-hostnet
when: not nca_pod is success
- debug: # noqa unnamed-task
var: nca_pod.stdout_lines
failed_when: not nca_pod is success
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Get netchecker agents
uri:
url: "http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/agents/"
return_content: yes
run_once: true
delegate_to: "{{ groups['kube_control_plane'][0] }}"
register: agents
retries: 18
delay: "{{ agent_report_interval }}"
until: agents.content|length > 0 and
agents.content[0] == '{' and
agents.content|from_json|length >= groups['k8s_cluster']|intersect(ansible_play_hosts)|length * 2
failed_when: false
no_log: true
- debug: # noqa unnamed-task
var: agents.content | from_json
failed_when: not agents is success and not agents.content=='{}'
run_once: true
when:
- agents.content is defined
- agents.content
- agents.content[0] == '{'
- name: Check netchecker status
uri:
url: "http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/connectivity_check"
status_code: 200
return_content: yes
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: true
register: result
retries: 3
delay: "{{ agent_report_interval }}"
until: result.content|length > 0 and
result.content[0] == '{'
no_log: true
failed_when: false
when:
- agents.content != '{}'
- debug: # noqa unnamed-task
var: ncs_pod
run_once: true
when: not result is success
- name: Get kube-proxy logs
command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app=kube-proxy"
no_log: false
when:
- inventory_hostname == groups['kube_control_plane'][0]
- not result is success
- name: Get logs from other apps
command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app={{ item }} --all-containers"
when:
- inventory_hostname == groups['kube_control_plane'][0]
- not result is success
no_log: false
with_items:
- kube-router
- flannel
- canal-node
- calico-node
- cilium
- debug: # noqa unnamed-task
var: result.content | from_json
failed_when: not result is success
run_once: true
when:
- not agents.content == '{}'
- result.content
- result.content[0] == '{'
- debug: # noqa unnamed-task
var: result
failed_when: not result is success
run_once: true
when:
- not agents.content == '{}'
- debug: # noqa unnamed-task
msg: "Cannot get reports from agents, consider as PASSING"
run_once: true
when:
- agents.content == '{}'
- name: Create macvlan network conf
# We cannot use only shell: below because Ansible will render the text
# with leading spaces, which means the shell will never find the string
# EOF at the beginning of a line. We can avoid Ansible's unhelpful
# heuristics by using the cmd parameter like this:
shell:
cmd: |
cat <<EOF | {{ bin_dir }}/kubectl create -f -
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: macvlan-conf
spec:
config: '{
"cniVersion": "0.4.0",
"type": "macvlan",
"master": "eth0",
"mode": "bridge",
"ipam": {
"type": "host-local",
"subnet": "192.168.1.0/24",
"rangeStart": "192.168.1.200",
"rangeEnd": "192.168.1.216",
"routes": [
{ "dst": "0.0.0.0/0" }
],
"gateway": "192.168.1.1"
}
}'
EOF
when:
- inventory_hostname == groups['kube_control_plane'][0]
- kube_network_plugin_multus|default(false)|bool
- name: Annotate pod with macvlan network
# We cannot use only shell: below because Ansible will render the text
# with leading spaces, which means the shell will never find the string
# EOF at the beginning of a line. We can avoid Ansible's unhelpful
# heuristics by using the cmd parameter like this:
shell:
cmd: |
cat <<EOF | {{ bin_dir }}/kubectl create -f -
apiVersion: v1
kind: Pod
metadata:
name: samplepod
annotations:
k8s.v1.cni.cncf.io/networks: macvlan-conf
spec:
containers:
- name: samplepod
command: ["/bin/bash", "-c", "sleep 2000000000000"]
image: dougbtv/centos-network
EOF
when:
- inventory_hostname == groups['kube_control_plane'][0]
- kube_network_plugin_multus|default(false)|bool
- name: Check secondary macvlan interface
command: "{{ bin_dir }}/kubectl exec samplepod -- ip addr show dev net1"
register: output
until: output.rc == 0
retries: 90
changed_when: false
when:
- inventory_hostname == groups['kube_control_plane'][0]
- kube_network_plugin_multus|default(false)|bool