Add digitalocean test case

This commit is contained in:
Antoine Legrand 2017-02-22 14:27:30 +01:00 committed by Antoine Legrand
parent 442d211ee3
commit 3ef7c25a16
3 changed files with 254 additions and 22 deletions

View file

@ -1,4 +1,5 @@
stages:
- deploy-do-part1
- moderator
- unit-tests
- deploy-gce-part1
@ -235,6 +236,178 @@ before_script:
-e gce_credentials_file=${HOME}/.ssh/gce.json
-e inventory_path=${PWD}/inventory/sample/hosts.ini
.do: &do
<<: *job
<<: *gce
cache:
key: "$CI_BUILD_REF_NAME"
paths:
- downloads/
- $HOME/.cache
before_script:
- docker info
- pip install ansible==2.2.1.0
- pip install netaddr
- pip install apache-libcloud==0.20.1
- pip install boto==2.9.0
- pip install dopy==0.3.5
- ansible-playbook --version
- export PYPATH=$([ $BOOTSTRAP_OS = none ] && echo /usr/bin/python || echo /opt/bin/python)
script:
- pwd
- ls
- echo ${PWD}
- >
ansible-playbook tests/cloud_playbooks/create-do.yml -i tests/local_inventory/hosts.cfg -c local
${LOG_LEVEL}
-e cloud_image=${CLOUD_IMAGE}
-e cloud_region=${CLOUD_REGION}
-e inventory_path=${PWD}/inventory/inventory.ini
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e mode=${CLUSTER_MODE}
-e test_id=${TEST_ID}
# Check out latest tag if testing upgrade
# Uncomment when gitlab kargo repo has tags
#- test "${UPGRADE_TEST}" = "true" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
- test "${UPGRADE_TEST}" = "true" && git checkout 031cf565ec3ccd3ebbe80eeef3454c3780e5c598 && pip install ansible==2.2.0
# Create cluster
- >
ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
${SSH_ARGS}
${LOG_LEVEL}
-e state=present
-e ansible_python_interpreter=${PYPATH}
-e ansible_ssh_user=${SSH_USER}
-e bootstrap_os=${BOOTSTRAP_OS}
-e cert_management=${CERT_MGMT:-script}
-e cloud_provider=gce
-e deploy_netchecker=true
-e download_localhost=true
-e download_run_once=true
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
-e local_release_dir=${PWD}/downloads
-e resolvconf_mode=${RESOLVCONF_MODE}
-e vault_deployment_type=${VAULT_DEPLOYMENT}
cluster.yml
# Repeat deployment if testing upgrade
#FIXME(mattymo): repeat "Create cluster" above without duplicating code
- >
if [ "${UPGRADE_TEST}" = "true" ]; then
pip install ansible==2.2.1.0;
git checkout "${CI_BUILD_REF}";
ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
${SSH_ARGS}
${LOG_LEVEL}
-e ansible_python_interpreter=${PYPATH}
-e ansible_ssh_user=${SSH_USER}
-e bootstrap_os=${BOOTSTRAP_OS}
-e cloud_provider=gce
-e deploy_netchecker=true
-e download_localhost=true
-e download_run_once=true
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
-e local_release_dir=${PWD}/downloads
-e resolvconf_mode=${RESOLVCONF_MODE}
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
cluster.yml;
fi
# Tests Cases
## Test Master API
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/010_check-apiserver.yml $LOG_LEVEL
## Ping the between 2 pod
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
## Advanced DNS checks
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/040_check-network-adv.yml $LOG_LEVEL
## Idempotency checks 1/5 (repeat deployment)
- >
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
--private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS}
-e ansible_python_interpreter=${PYPATH}
-e download_run_once=true
-e download_localhost=true
-e deploy_netchecker=true
-e resolvconf_mode=${RESOLVCONF_MODE}
-e local_release_dir=${PWD}/downloads
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
cluster.yml;
fi
## Idempotency checks 2/5 (Advanced DNS checks)
- >
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
fi
## Idempotency checks 3/5 (reset deployment)
- >
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
--private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS}
-e ansible_python_interpreter=${PYPATH}
reset.yml;
fi
## Idempotency checks 4/5 (redeploy after reset)
- >
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
--private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS}
-e ansible_python_interpreter=${PYPATH}
-e download_run_once=true
-e download_localhost=true
-e deploy_netchecker=true
-e resolvconf_mode=${RESOLVCONF_MODE}
-e local_release_dir=${PWD}/downloads
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
cluster.yml;
fi
## Idempotency checks 5/5 (Advanced DNS checks)
- >
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
fi
after_script:
- >
ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/create-do.yml -c local $LOG_LEVEL
-e state=absent
-e mode=${CLUSTER_MODE}
-e test_id=${TEST_ID}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e gce_project_id=${GCE_PROJECT_ID}
-e gce_service_account_email=${GCE_ACCOUNT}
-e gce_credentials_file=${HOME}/.ssh/gce.json
-e cloud_image=${CLOUD_IMAGE}
-e inventory_path=${PWD}/inventory/inventory.ini
-e cloud_region=${CLOUD_REGION}
# Test matrix. Leave the comments for markup scripts.
.coreos_calico_aio_variables: &coreos_calico_aio_variables
# stage: deploy-gce-part1
@ -312,6 +485,19 @@ coreos-calico-aio:
except: ['triggers']
only: [/^pr-.*$/]
coreos-calico-sep-do:
stage: deploy-do-part1
<<: *job
<<: *do
variables:
<<: *gce_variables
<<: *coreos_calico_sep_variables
CLOUD_IMAGE: coreos-stable
CLOUD_REGION: NYC3
when: on_success
except: ['triggers']
only: [/^pr-.*$/]
coreos-calico-sep-triggers:
stage: deploy-gce-part1
<<: *job

View file

@ -3,7 +3,50 @@
become: false
gather_facts: no
vars:
cloud_machine_type: g1-small
state: "present"
ssh_key_id: "6536865"
cloud_machine_type: 2gb
regions:
- nyc1
- sfo1
- nyc2
- ams2
- sgp1
- lon1
- nyc3
- ams3
- fra1
- tor1
- sfo2
- blr1
cloud_images:
- coreos-beta
- fedora-24-x64
- centos-5-x64
- centos-5-x32
- fedora-25-x64
- debian-7-x64
- debian-7-x32
- debian-8-x64
- debian-8-x32
- centos-6-x32
- centos-6-x64
- coreos-stable
- ubuntu-16-10-x32
- ubuntu-16-10-x64
- freebsd-11-0-x64-zfs
- freebsd-10-3-x64-zfs
- coreos-alpha
- ubuntu-12-04-x32
- ubuntu-12-04-x64
- ubuntu-16-04-x64
- ubuntu-16-04-x32
- ubuntu-14-04-x64
- ubuntu-14-04-x32
- centos-7-x64
- freebsd-11-0-x64
- freebsd-10-3-x64
- centos-7-3-1611-x64
mode: default
tasks:
@ -19,7 +62,7 @@
["k8s-{{test_name}}-1", "k8s-{{test_name}}-2"]
{%- endif -%}
- name: Create DO instances
- name: Manage DO instances | {{state}}
digital_ocean:
unique_name: yes
api_token: "{{ lookup('env','DO_API_TOKEN') }}"
@ -28,21 +71,24 @@
name: "{{ item }}"
private_networking: no
region_id: "{{cloud_region}}"
size_id: 2gb
ssh_key_ids: "6536865"
state: present
size_id: "{{cloud_machine_type}}"
ssh_key_ids: "{{ssh_key_id}}"
state: "{{state}}"
wait: yes
register: droplets
with_items: "{{instance_names}}"
- debug:
msg: "{{droplets}}, {{inventory_path}}"
when: "{{ state == 'present' }}"
- name: Template the inventory
template:
src: ../templates/inventory-do.j2
dest: "{{ inventory_path }}/inventory-do.cfg"
dest: "{{ inventory_path }}"
when: "{{ state == 'present' }}"
- name: Wait for SSH to come up
wait_for: host={{item.droplet.ip_address}} port=22 delay=10 timeout=180 state=started
with_items: "{{droplets.results}}"
when: "{{ state == 'present' }}"

View file

@ -1,46 +1,46 @@
{% for instance in droplets.results %}
node{{loop.index}} ansible_ssh_host={{instance.droplet.ip_address}}
{{instance.droplet.name}} ansible_ssh_host={{instance.droplet.ip_address}}
{% endfor %}
{% if mode is defined and mode == "separate" %}
[kube-master]
node1
{{droplets.results[0].droplet.name}}
[kube-node]
node2
{{droplets.results[1].droplet.name}}
[etcd]
node3
{{droplets.results[2].droplet.name}}
[vault]
node3
{{droplets.results[2].droplet.name}}
{% elif mode is defined and mode == "ha" %}
[kube-master]
node1
node2
{{droplets.results[0].droplet.name}}
{{droplets.results[1].droplet.name}}
[kube-node]
node3
{{droplets.results[2].droplet.name}}
[etcd]
node2
node3
{{droplets.results[1].droplet.name}}
{{droplets.results[2].droplet.name}}
[vault]
node2
node3
{{droplets.results[1].droplet.name}}
{{droplets.results[2].droplet.name}}
{% else %}
[kube-master]
node1
{{droplets.results[0].droplet.name}}
[kube-node]
node2
{{droplets.results[1].droplet.name}}
[etcd]
node1
{{droplets.results[0].droplet.name}}
[vault]
node1
{{droplets.results[0].droplet.name}}
{% endif %}
[k8s-cluster:children]