Merge pull request #726 from bogdando/netcheck_ci

Enable netchecker for CI
This commit is contained in:
Matthew Mosesohn 2016-12-14 17:19:30 +03:00 committed by GitHub
commit e6fe9d5807
4 changed files with 115 additions and 13 deletions

View file

@ -127,7 +127,7 @@ script:
-e inventory_path=${PWD}/inventory/inventory.ini -e inventory_path=${PWD}/inventory/inventory.ini
-e cloud_region=${CLOUD_REGION} -e cloud_region=${CLOUD_REGION}
# Create cluster # Create cluster with netchecker app deployed
- > - >
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
@ -136,15 +136,16 @@ script:
-e download_run_once=true -e download_run_once=true
-e download_localhost=true -e download_localhost=true
-e local_release_dir=/var/tmp/releases -e local_release_dir=/var/tmp/releases
-e deploy_netchecker=true
cluster.yml cluster.yml
# Tests Cases # Tests Cases
## Test Master API ## Test Master API
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} tests/testcases/010_check-apiserver.yml $LOG_LEVEL - $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/010_check-apiserver.yml $LOG_LEVEL
## Create a POD
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/020_check-create-pod.yml $LOG_LEVEL
## Ping the between 2 pod ## Ping the between 2 pod
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL - $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
## Advanced DNS checks
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/040_check-network-adv.yml $LOG_LEVEL
after_script: after_script:
- > - >

View file

@ -39,14 +39,14 @@ should_api_server_respond() {
assertion__status_code_is_success $? assertion__status_code_is_success $?
} }
should_create_pod() {
ansible-playbook -i inventory.ini -s ${private_key} testcases/020_check-create-pod.yml -vv
assertion__status_code_is_success $?
}
should_pod_be_in_expected_subnet() { should_pod_be_in_expected_subnet() {
ansible-playbook -i inventory.ini -s ${private_key} testcases/030_check-network.yml -vv ansible-playbook -i inventory.ini -s ${private_key} testcases/030_check-network.yml -vv
assertion__status_code_is_success $? assertion__status_code_is_success $?
} }
should_resolve_cluster_dns() {
ansible-playbook -i inventory.ini -s ${private_key} testcases/040_check-network-adv.yml -vv
assertion__status_code_is_success $?
}

View file

@ -15,16 +15,44 @@
- name: Get pod names - name: Get pod names
shell: "{{bin_dir}}/kubectl get pods -o json" shell: "{{bin_dir}}/kubectl get pods -o json"
register: pods register: pods
no_log: true
- name: Get hostnet pods
command: "{{bin_dir}}/kubectl get pods -o
jsonpath='{range .items[?(.spec.hostNetwork)]}{.metadata.name} {.status.podIP} {end}'"
register: hostnet_pods
- name: Get running pods
command: "{{bin_dir}}/kubectl get pods -o
jsonpath='{range .items[?(.status.phase==\"Running\")]}{.metadata.name} {.status.podIP} {end}'"
register: running_pods
- set_fact: - set_fact:
pod_names: "{{ (pods.stdout | from_json)['items'] | map(attribute = 'metadata.name') | list }}" pod_names: "{{ (pods.stdout | from_json)['items'] | map(attribute = 'metadata.name') | list }}"
pod_ips: "{{ (pods.stdout | from_json)['items'] | map(attribute = 'status.podIP') | list }}" pod_ips: "{{ (pods.stdout | from_json)['items'] | selectattr('status.podIP', 'defined') | map(attribute = 'status.podIP') | list }}"
pods_hostnet: |
{% set list = hostnet_pods.stdout.split(" ") %}
{{list}}
pods_running: |
{% set list = running_pods.stdout.split(" ") %}
{{list}}
- name: Check pods IP are in correct network - name: Check pods IP are in correct network
assert: assert:
that: item | ipaddr(kube_pods_subnet) that: item | ipaddr(kube_pods_subnet)
when: not item in pods_hostnet and item in pods_running
with_items: "{{pod_ips}}" with_items: "{{pod_ips}}"
- name: Ping between pods is working - name: Ping between pods is working
shell: "{{bin_dir}}/kubectl exec {{pod_names[0]}} -- ping -c 4 {{ pod_ips[1] }}" shell: "{{bin_dir}}/kubectl exec {{item[0]}} -- ping -c 4 {{ item[1] }}"
when: not item[0] in pods_hostnet and not item[1] in pods_hostnet
with_nested:
- "{{pod_names}}"
- "{{pod_ips}}"
- name: Ping between hostnet pods is working
shell: "{{bin_dir}}/kubectl exec {{item[0]}} -- ping -c 4 {{ item[1] }}"
when: item[0] in pods_hostnet and item[1] in pods_hostnet
with_nested:
- "{{pod_names}}"
- "{{pod_ips}}"

View file

@ -0,0 +1,73 @@
---
- hosts: kube-node
tasks:
- name: Test tunl0 routes
shell: "! /sbin/ip ro | grep '/26 via' | grep -v tunl0"
when: (ipip|default(false) or cloud_provider is defined) and (kube_network_plugin == 'calico')
- hosts: k8s-cluster
vars:
agent_report_interval: 10
netcheck_namespace: default
netchecker_port: 31081
tasks:
- name: Force binaries directory for CoreOS
set_fact:
bin_dir: "/opt/bin"
when: ansible_os_family == "CoreOS"
- set_fact:
bin_dir: "/usr/local/bin"
when: ansible_os_family != "CoreOS"
- name: Wait for netchecker server
shell: "{{ bin_dir }}/kubectl get pods --namespace {{netcheck_namespace}} | grep ^netchecker-server"
delegate_to: "{{groups['kube-master'][0]}}"
run_once: true
register: ncs_pod
until: ncs_pod.stdout.find('Running') != -1
retries: 3
delay: 10
- name: Wait for netchecker agents
shell: "{{ bin_dir }}/kubectl get pods --namespace {{netcheck_namespace}} | grep '^netchecker-agent-.*Running'"
run_once: true
delegate_to: "{{groups['kube-master'][0]}}"
register: nca_pod
until: "{{ nca_pod.stdout_lines|length }} >= {{ groups['kube-node']|length * 2 }}"
retries: 3
delay: 10
- name: Get netchecker agents
uri: url=http://localhost:{{netchecker_port}}/api/v1/agents/ return_content=yes
run_once: true
delegate_to: "{{groups['kube-node'][0]}}"
register: agents
retries: 3
delay: "{{ agent_report_interval }}"
until: "{{ agents.content|length > 0 and
agents.content[0] == '{' and
agents.content|from_json|length >= groups['kube-node']|length * 2 }}"
ignore_errors: true
no_log: true
- debug: var=agents.content|from_json
failed_when: not agents|success
delegate_to: "{{groups['kube-node'][0]}}"
run_once: true
- name: Check netchecker status
uri: url=http://localhost:{{netchecker_port}}/api/v1/connectivity_check status_code=200 return_content=yes
delegate_to: "{{groups['kube-node'][0]}}"
run_once: true
register: result
retries: 3
delay: "{{ agent_report_interval }}"
no_log: true
ignore_errors: true
- debug: var=result.content|from_json
failed_when: not result|success
delegate_to: "{{groups['kube-node'][0]}}"
run_once: true