Merge pull request #138 from kubespray/integration_tests
Integration tests
This commit is contained in:
commit
451ee18c4a
19 changed files with 1581 additions and 26 deletions
154
.travis.yml
154
.travis.yml
|
@ -1,38 +1,142 @@
|
||||||
sudo: required
|
sudo: false
|
||||||
dist: trusty
|
|
||||||
language: python
|
|
||||||
python: "2.7"
|
|
||||||
|
|
||||||
addons:
|
git:
|
||||||
hosts:
|
depth: 5
|
||||||
- node1
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
- SITE=cluster.yml ANSIBLE_VERSION=2.0.0
|
global:
|
||||||
|
GCE_USER=travis
|
||||||
|
SSH_USER=$GCE_USER
|
||||||
|
TEST_ID=$TRAVIS_JOB_NUMBER
|
||||||
|
CONTAINER_ENGINE=docker
|
||||||
|
PRIVATE_KEY=$GCE_PRIVATE_KEY
|
||||||
|
ANSIBLE_KEEP_REMOTE_FILES=1
|
||||||
|
matrix:
|
||||||
|
# Debian Jessie
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=flannel
|
||||||
|
CLOUD_IMAGE=debian-8
|
||||||
|
CLOUD_REGION=europe-west1-b
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=calico
|
||||||
|
CLOUD_IMAGE=debian-8
|
||||||
|
CLOUD_REGION=europe-west1-b
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=weave
|
||||||
|
CLOUD_IMAGE=debian-8
|
||||||
|
CLOUD_REGION=europe-west1-b
|
||||||
|
|
||||||
install:
|
# Centos 7
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=flannel
|
||||||
|
CLOUD_IMAGE=centos-7-sudo
|
||||||
|
CLOUD_REGION=us-central1-c
|
||||||
|
|
||||||
|
# - >-
|
||||||
|
# KUBE_NETWORK_PLUGIN=calico
|
||||||
|
# CLOUD_IMAGE=centos-7-sudo
|
||||||
|
# CLOUD_REGION=us-central1-c
|
||||||
|
|
||||||
|
|
||||||
|
# # Redhat 7
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=flannel
|
||||||
|
CLOUD_IMAGE=rhel-7-sudo
|
||||||
|
CLOUD_REGION=us-east1-d
|
||||||
|
|
||||||
|
# - >-
|
||||||
|
# KUBE_NETWORK_PLUGIN=calico
|
||||||
|
# CLOUD_IMAGE=rhel-7-sudo
|
||||||
|
# CLOUD_REGION=us-east1-d
|
||||||
|
|
||||||
|
# Ubuntu 14.04
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=flannel
|
||||||
|
CLOUD_IMAGE=ubuntu-1404-trusty
|
||||||
|
CLOUD_REGION=europe-west1-c
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=calico
|
||||||
|
CLOUD_IMAGE=ubuntu-1404-trusty
|
||||||
|
CLOUD_REGION=europe-west1-c
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=weave
|
||||||
|
CLOUD_IMAGE=ubuntu-1404-trusty
|
||||||
|
CLOUD_REGION=europe-west1-c
|
||||||
|
|
||||||
|
# # Ubuntu 15.10
|
||||||
|
# - >-
|
||||||
|
# KUBE_NETWORK_PLUGIN=flannel
|
||||||
|
# CLOUD_IMAGE=ubuntu-1510-wily
|
||||||
|
# CLOUD_REGION=us-central1-a
|
||||||
|
# - >-
|
||||||
|
# KUBE_NETWORK_PLUGIN=calico
|
||||||
|
# CLOUD_IMAGE=ubuntu-1510-wily
|
||||||
|
# CLOUD_REGION=us-central1-a
|
||||||
|
# - >-
|
||||||
|
# KUBE_NETWORK_PLUGIN=weave
|
||||||
|
# CLOUD_IMAGE=ubuntu-1510-wily
|
||||||
|
# CLOUD_REGION=us-central1-a
|
||||||
|
|
||||||
|
|
||||||
|
matrix:
|
||||||
|
allow_failures:
|
||||||
|
- env: KUBE_NETWORK_PLUGIN=flannel CLOUD_IMAGE=centos-7-sudo CLOUD_REGION=us-central1-c
|
||||||
|
- env: KUBE_NETWORK_PLUGIN=flannel CLOUD_IMAGE=rhel-7-sudo CLOUD_REGION=us-east1-d
|
||||||
|
- env: KUBE_NETWORK_PLUGIN=weave CLOUD_IMAGE=ubuntu-1404-trusty CLOUD_REGION=europe-west1-c
|
||||||
|
|
||||||
|
before_install:
|
||||||
# Install Ansible.
|
# Install Ansible.
|
||||||
- sudo -H pip install ansible==${ANSIBLE_VERSION}
|
- pip install --user boto -U
|
||||||
- sudo -H pip install netaddr
|
- pip install --user ansible
|
||||||
|
- pip install --user netaddr
|
||||||
|
- pip install --user apache-libcloud
|
||||||
|
|
||||||
cache:
|
cache:
|
||||||
directories:
|
- directories:
|
||||||
- $HOME/releases
|
|
||||||
- $HOME/.cache/pip
|
- $HOME/.cache/pip
|
||||||
|
- $HOME/.local
|
||||||
|
|
||||||
before_script:
|
before_script:
|
||||||
- export PATH=$PATH:/usr/local/bin
|
- echo "RUN $TRAVIS_JOB_NUMBER $KUBE_NETWORK_PLUGIN $CONTAINER_ENGINE "
|
||||||
|
- mkdir -p $HOME/.ssh
|
||||||
|
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
||||||
|
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
|
||||||
|
- chmod 400 $HOME/.ssh/id_rsa
|
||||||
|
- chmod 755 $HOME/.local/bin/ansible-playbook
|
||||||
|
- $HOME/.local/bin/ansible-playbook --version
|
||||||
|
- cp tests/ansible.cfg .
|
||||||
|
# - "echo $HOME/.local/bin/ansible-playbook -i inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml"
|
||||||
|
|
||||||
script:
|
script:
|
||||||
# Check the role/playbook's syntax.
|
|
||||||
- "sudo -H ansible-playbook -i inventory/local-tests.cfg $SITE --syntax-check"
|
|
||||||
|
|
||||||
# Run the role/playbook with ansible-playbook.
|
|
||||||
- "sudo -H ansible-playbook -i inventory/local-tests.cfg $SITE --connection=local"
|
|
||||||
|
|
||||||
# Run the role/playbook again, checking to make sure it's idempotent.
|
|
||||||
- >
|
- >
|
||||||
sudo -H ansible-playbook -i inventory/local-tests.cfg $SITE --connection=local
|
$HOME/.local/bin/ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts -c local $LOG_LEVEL
|
||||||
| tee /dev/stderr | grep -q 'changed=0.*failed=0'
|
-e test_id=${TEST_ID}
|
||||||
&& (echo 'Idempotence test: pass' && exit 0)
|
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
|| (echo 'Idempotence test: fail' && exit 1)
|
-e gce_project_id=${GCE_PROJECT_ID}
|
||||||
|
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||||
|
-e gce_pem_file=${HOME}/.ssh/gce
|
||||||
|
-e cloud_image=${CLOUD_IMAGE}
|
||||||
|
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||||
|
-e cloud_region=${CLOUD_REGION}
|
||||||
|
|
||||||
|
# Create cluster
|
||||||
|
- "$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} cluster.yml"
|
||||||
|
# Tests Cases
|
||||||
|
## Test Master API
|
||||||
|
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
||||||
|
## Create a POD
|
||||||
|
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/020_check-create-pod.yml $LOG_LEVEL
|
||||||
|
## Ping the between 2 pod
|
||||||
|
- $HOME/.local/bin/ansible-playbook -i setup-kubernetes/inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
|
||||||
|
|
||||||
|
after_script:
|
||||||
|
- >
|
||||||
|
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
|
||||||
|
-e test_id=${TEST_ID}
|
||||||
|
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
|
-e gce_project_id=${GCE_PROJECT_ID}
|
||||||
|
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||||
|
-e gce_pem_file=${HOME}/.ssh/gce
|
||||||
|
-e cloud_image=${CLOUD_IMAGE}
|
||||||
|
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||||
|
-e cloud_region=${CLOUD_REGION}
|
||||||
|
|
4
ansible.cfg
Normal file
4
ansible.cfg
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
[ssh_connection]
|
||||||
|
pipelining=True
|
||||||
|
[defaults]
|
||||||
|
host_key_checking=False
|
|
@ -1 +1 @@
|
||||||
Subproject commit c1c0ce8e27b430cff8e5b0f1519707eb892f5e67
|
Subproject commit 71c7bf98210e8907554a26e25cc9c2a3ece8cffd
|
33
tests/README.md
Normal file
33
tests/README.md
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
# k8s-integration-tests
|
||||||
|
|
||||||
|
*Work In Progress*
|
||||||
|
|
||||||
|
## Test environment variables
|
||||||
|
|
||||||
|
### Common
|
||||||
|
|
||||||
|
Variable | Description | Required | Default
|
||||||
|
--------------------- | -------------------------------------- | ---------- | --------
|
||||||
|
`TEST_ID` | A unique execution ID for this test | Yes |
|
||||||
|
`KUBE_NETWORK_PLUGIN` | The network plugin (calico or flannel) | Yes |
|
||||||
|
`PRIVATE_KEY_FILE` | The path to the SSH private key file | No |
|
||||||
|
|
||||||
|
### AWS Tests
|
||||||
|
|
||||||
|
Variable | Description | Required | Default
|
||||||
|
--------------------- | ----------------------------------------------- | ---------- | ---------
|
||||||
|
`AWS_ACCESS_KEY` | The Amazon Access Key ID | Yes |
|
||||||
|
`AWS_SECRET_KEY` | The Amazon Secret Access Key | Yes |
|
||||||
|
`AWS_AMI_ID` | The AMI ID to deploy | Yes |
|
||||||
|
`AWS_KEY_PAIR_NAME` | The name of the EC2 key pair to use | Yes |
|
||||||
|
`AWS_SECURITY_GROUP` | The EC2 Security Group to use | No | default
|
||||||
|
`AWS_REGION` | The EC2 region | No | eu-central-1
|
||||||
|
|
||||||
|
#### Use private ssh key
|
||||||
|
|
||||||
|
##### Key
|
||||||
|
|
||||||
|
```bash
|
||||||
|
openssl pkcs12 -in gce-secure.p12 -passin pass:notasecret -nodes -nocerts | openssl rsa -out gce-secure.pem
|
||||||
|
cat gce-secure.pem |base64 -w0 > GCE_PEM_FILE`
|
||||||
|
```
|
4
tests/ansible.cfg
Normal file
4
tests/ansible.cfg
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
[ssh_connection]
|
||||||
|
pipelining=True
|
||||||
|
[defaults]
|
||||||
|
host_key_checking=False
|
33
tests/cloud_playbooks/create-aws.yml
Normal file
33
tests/cloud_playbooks/create-aws.yml
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
---
|
||||||
|
- hosts: localhost
|
||||||
|
sudo: False
|
||||||
|
gather_facts: False
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Provision a set of instances
|
||||||
|
ec2:
|
||||||
|
key_name: "{{ aws.key_name }}"
|
||||||
|
aws_access_key: "{{ aws.access_key }}"
|
||||||
|
aws_secret_key: "{{ aws.secret_key }}"
|
||||||
|
region: "{{ aws.region }}"
|
||||||
|
group_id: "{{ aws.group}}"
|
||||||
|
instance_type: "{{ aws.instance_type}}"
|
||||||
|
image: "{{ aws.ami_id }}"
|
||||||
|
wait: true
|
||||||
|
count: "{{ aws.count }}"
|
||||||
|
instance_tags: "{{ aws.tags }}"
|
||||||
|
register: ec2
|
||||||
|
|
||||||
|
- name: Template the inventory
|
||||||
|
template:
|
||||||
|
src: templates/inventory.ini.j2
|
||||||
|
dest: "{{ inventory_path }}"
|
||||||
|
|
||||||
|
- name: Wait until SSH is available
|
||||||
|
local_action:
|
||||||
|
module: wait_for
|
||||||
|
host: "{{ item.public_ip }}"
|
||||||
|
port: 22
|
||||||
|
timeout: 300
|
||||||
|
state: started
|
||||||
|
with_items: ec2.instances
|
33
tests/cloud_playbooks/create-gce.yml
Normal file
33
tests/cloud_playbooks/create-gce.yml
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
---
|
||||||
|
- hosts: localhost
|
||||||
|
sudo: False
|
||||||
|
gather_facts: no
|
||||||
|
vars:
|
||||||
|
cloud_machine_type: g1-small
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: replace_test_id
|
||||||
|
set_fact:
|
||||||
|
test_name: "{{test_id |regex_replace('\\.', '-')}}"
|
||||||
|
|
||||||
|
- name: Create gce instances
|
||||||
|
gce:
|
||||||
|
instance_names: "k8s-{{test_name}}-1,k8s-{{test_name}}-2,k8s-{{test_name}}-3"
|
||||||
|
machine_type: "{{ cloud_machine_type }}"
|
||||||
|
image: "{{ cloud_image }}"
|
||||||
|
service_account_email: "{{ gce_service_account_email }}"
|
||||||
|
pem_file: "{{ gce_pem_file }}"
|
||||||
|
project_id: "{{ gce_project_id }}"
|
||||||
|
zone: "{{cloud_region}}"
|
||||||
|
metadata: '{"test_id": "{{test_id}}", "network": "{{kube_network_plugin}}"}'
|
||||||
|
tags: "build-{{test_name}},{{kube_network_plugin}}"
|
||||||
|
register: gce
|
||||||
|
|
||||||
|
- name: Template the inventory
|
||||||
|
template:
|
||||||
|
src: ../templates/inventory-gce.j2
|
||||||
|
dest: "{{ inventory_path }}"
|
||||||
|
|
||||||
|
- name: Wait for SSH to come up
|
||||||
|
wait_for: host={{item.public_ip}} port=22 delay=10 timeout=180 state=started
|
||||||
|
with_items: gce.instance_data
|
15
tests/cloud_playbooks/delete-aws.yml
Normal file
15
tests/cloud_playbooks/delete-aws.yml
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
---
|
||||||
|
- hosts: kube-node
|
||||||
|
sudo: False
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Gather EC2 facts
|
||||||
|
action: ec2_facts
|
||||||
|
|
||||||
|
- name: Terminate EC2 instances
|
||||||
|
local_action:
|
||||||
|
module: ec2
|
||||||
|
state: absent
|
||||||
|
instance_ids: "{{ ansible_ec2_instance_id }}"
|
||||||
|
region: "{{ ansible_ec2_placement_region }}"
|
||||||
|
wait: True
|
24
tests/cloud_playbooks/delete-gce.yml
Normal file
24
tests/cloud_playbooks/delete-gce.yml
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
---
|
||||||
|
- hosts: localhost
|
||||||
|
sudo: False
|
||||||
|
gather_facts: no
|
||||||
|
vars:
|
||||||
|
cloud_machine_type: f1-micro
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: replace_test_id
|
||||||
|
set_fact:
|
||||||
|
test_name: "{{test_id |regex_replace('\\.', '-')}}"
|
||||||
|
|
||||||
|
- name: delete gce instances
|
||||||
|
gce:
|
||||||
|
instance_names: "k8s-{{test_name}}-1,k8s-{{test_name}}-2,k8s-{{test_name}}-3"
|
||||||
|
machine_type: "{{ cloud_machine_type }}"
|
||||||
|
image: "{{ cloud_image }}"
|
||||||
|
service_account_email: "{{ gce_service_account_email }}"
|
||||||
|
pem_file: "{{ gce_pem_file }}"
|
||||||
|
project_id: "{{ gce_project_id }}"
|
||||||
|
zone: "{{cloud_region | default('europe-west1-b')}}"
|
||||||
|
metadata: '{"test_id": "{{test_id}}", "network": "{{kube_network_plugin}}"}'
|
||||||
|
state: 'absent'
|
||||||
|
register: gce
|
12
tests/local_inventory/host_vars/localhost
Normal file
12
tests/local_inventory/host_vars/localhost
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
aws:
|
||||||
|
key_name: "{{ key_name | default('ansibl8s') }}"
|
||||||
|
access_key: "{{ aws_access_key }}"
|
||||||
|
secret_key: "{{ aws_secret_key }}"
|
||||||
|
region: "{{ aws_region | default('eu-west-1') }}" # default to eu-west-1
|
||||||
|
group: "{{ aws_security_group | default ('default')}}"
|
||||||
|
instance_type: t2.micro
|
||||||
|
ami_id: "{{ aws_ami_id | default('ami-02724d1f') }}" # default to Debian Jessie
|
||||||
|
count: 3
|
||||||
|
tags:
|
||||||
|
test_id: "{{ test_id }}"
|
||||||
|
network_plugin: "{{ kube_network_plugin }}"
|
1
tests/local_inventory/hosts.cfg
Normal file
1
tests/local_inventory/hosts.cfg
Normal file
|
@ -0,0 +1 @@
|
||||||
|
localhost ansible_connection=localx
|
8
tests/run-tests.sh
Executable file
8
tests/run-tests.sh
Executable file
|
@ -0,0 +1,8 @@
|
||||||
|
#! /bin/bash
|
||||||
|
|
||||||
|
# curl -# -C - -o shebang-unit https://raw.github.com/arpinum-oss/shebang-unit/master/releases/shebang-unit
|
||||||
|
# chmod +x shebang-unit
|
||||||
|
|
||||||
|
now=$(date +"%Y%m%d%H%M%S")
|
||||||
|
mkdir -p ${PWD}/tests-results
|
||||||
|
./shebang-unit --reporters=simple,junit --output-file=${PWD}/tests-results/junit_report-${now}.xml tests
|
52
tests/scripts/ansibl8s_test.sh
Normal file
52
tests/scripts/ansibl8s_test.sh
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
#! /bin/bash
|
||||||
|
|
||||||
|
global_setup() {
|
||||||
|
git clone https://github.com/ansibl8s/setup-kubernetes.git setup-kubernetes
|
||||||
|
private_key=""
|
||||||
|
if [ ! -z ${PRIVATE_KEY_FILE} ]
|
||||||
|
then
|
||||||
|
private_key="--private-key=${PRIVATE_KEY_FILE}"
|
||||||
|
fi
|
||||||
|
ansible-playbook create.yml -i hosts -u admin -s \
|
||||||
|
-e test_id=${TEST_ID} \
|
||||||
|
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN} \
|
||||||
|
-e aws_access_key=${AWS_ACCESS_KEY} \
|
||||||
|
-e aws_secret_key=${AWS_SECRET_KEY} \
|
||||||
|
-e aws_ami_id=${AWS_AMI_ID} \
|
||||||
|
-e aws_security_group=${AWS_SECURITY_GROUP} \
|
||||||
|
-e key_name=${AWS_KEY_PAIR_NAME} \
|
||||||
|
-e inventory_path=${PWD}/inventory.ini \
|
||||||
|
-e aws_region=${AWS_REGION}
|
||||||
|
}
|
||||||
|
|
||||||
|
global_teardown() {
|
||||||
|
if [ -f inventory.ini ];
|
||||||
|
then
|
||||||
|
ansible-playbook -i inventory.ini -u admin delete.yml
|
||||||
|
fi
|
||||||
|
rm -rf ${PWD}/setup-kubernetes
|
||||||
|
}
|
||||||
|
|
||||||
|
should_deploy_cluster() {
|
||||||
|
ansible-playbook -i inventory.ini -s ${private_key} -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml
|
||||||
|
|
||||||
|
assertion__status_code_is_success $?
|
||||||
|
}
|
||||||
|
|
||||||
|
should_api_server_respond() {
|
||||||
|
ansible-playbook -i inventory.ini ${private_key} testcases/check-apiserver.yml
|
||||||
|
|
||||||
|
assertion__status_code_is_success $?
|
||||||
|
}
|
||||||
|
|
||||||
|
should_create_pod() {
|
||||||
|
ansible-playbook -i inventory.ini -s ${private_key} testcases/check-create-pod.yml -vv
|
||||||
|
|
||||||
|
assertion__status_code_is_success $?
|
||||||
|
}
|
||||||
|
|
||||||
|
should_pod_be_in_expected_subnet() {
|
||||||
|
ansible-playbook -i inventory.ini -s ${private_key} testcases/check-network.yml -vv
|
||||||
|
|
||||||
|
assertion__status_code_is_success $?
|
||||||
|
}
|
1146
tests/shebang-unit
Executable file
1146
tests/shebang-unit
Executable file
File diff suppressed because it is too large
Load diff
20
tests/templates/inventory-gce.j2
Normal file
20
tests/templates/inventory-gce.j2
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
node1 ansible_ssh_host={{gce.instance_data[0].public_ip}}
|
||||||
|
node2 ansible_ssh_host={{gce.instance_data[1].public_ip}}
|
||||||
|
node3 ansible_ssh_host={{gce.instance_data[2].public_ip}}
|
||||||
|
|
||||||
|
[kube-master]
|
||||||
|
node1
|
||||||
|
node2
|
||||||
|
|
||||||
|
[kube-node]
|
||||||
|
node1
|
||||||
|
node2
|
||||||
|
node3
|
||||||
|
|
||||||
|
[etcd]
|
||||||
|
node1
|
||||||
|
node2
|
||||||
|
|
||||||
|
[k8s-cluster:children]
|
||||||
|
kube-node
|
||||||
|
kube-master
|
20
tests/templates/inventory.ini.j2
Normal file
20
tests/templates/inventory.ini.j2
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
node1 ansible_ssh_host={{ec2.instances[0].public_ip}} ansible_ssh_user=admin
|
||||||
|
node2 ansible_ssh_host={{ec2.instances[1].public_ip}} ansible_ssh_user=admin
|
||||||
|
node3 ansible_ssh_host={{ec2.instances[2].public_ip}} ansible_ssh_user=admin
|
||||||
|
|
||||||
|
[kube-master]
|
||||||
|
node1
|
||||||
|
node2
|
||||||
|
|
||||||
|
[kube-node]
|
||||||
|
node1
|
||||||
|
node2
|
||||||
|
node3
|
||||||
|
|
||||||
|
[etcd]
|
||||||
|
node1
|
||||||
|
node2
|
||||||
|
|
||||||
|
[k8s-cluster:children]
|
||||||
|
kube-node
|
||||||
|
kube-master
|
12
tests/testcases/010_check-apiserver.yml
Normal file
12
tests/testcases/010_check-apiserver.yml
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
---
|
||||||
|
- hosts: kube-master
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Check the API servers are responding
|
||||||
|
local_action:
|
||||||
|
module: uri
|
||||||
|
url: https://{{ansible_ssh_host}}/api/v1
|
||||||
|
user: kube
|
||||||
|
password: changeme
|
||||||
|
validate_certs: no
|
||||||
|
status_code: 200
|
13
tests/testcases/020_check-create-pod.yml
Normal file
13
tests/testcases/020_check-create-pod.yml
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
---
|
||||||
|
- hosts: node1
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Run a replica controller composed of 2 pods
|
||||||
|
shell: "kubectl run test --image=busybox --replicas=2 --command -- tail -f /dev/null"
|
||||||
|
|
||||||
|
- name: Pods are running
|
||||||
|
shell: "kubectl get pods --no-headers -o json"
|
||||||
|
register: run_pods_log
|
||||||
|
until: (run_pods_log.stdout | from_json)['items'] | map(attribute = 'status.phase') | join(',') == "Running,Running"
|
||||||
|
retries: 24
|
||||||
|
delay: 5
|
21
tests/testcases/030_check-network.yml
Normal file
21
tests/testcases/030_check-network.yml
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
---
|
||||||
|
- hosts: node1
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
|
||||||
|
- name: Get pod names
|
||||||
|
shell: "kubectl get pods -o json"
|
||||||
|
register: pods
|
||||||
|
|
||||||
|
- set_fact:
|
||||||
|
pod_names: "{{ (pods.stdout | from_json)['items'] | map(attribute = 'metadata.name') | list }}"
|
||||||
|
pod_ips: "{{ (pods.stdout | from_json)['items'] | map(attribute = 'status.podIP') | list }}"
|
||||||
|
|
||||||
|
- name: Check pods IP are in correct network
|
||||||
|
assert:
|
||||||
|
that: item | ipaddr(kube_pods_subnet)
|
||||||
|
with_items: pod_ips
|
||||||
|
|
||||||
|
|
||||||
|
- name: Ping between pods is working
|
||||||
|
shell: "kubectl exec {{pod_names[0]}} -- ping -c 4 {{ pod_ips[1] }}"
|
Loading…
Reference in a new issue