Add synthetic scale deployment mode

New deploy modes: scale, ha-scale, separate-scale
Creates 200 fake hosts for deployment with fake hostvars.

Useful for testing certificate generation and propagation to other
master nodes.

Updated test cases descriptions.
This commit is contained in:
Matthew Mosesohn 2017-02-13 14:13:28 +03:00
parent b84cc14694
commit 9c1701f2aa
7 changed files with 71 additions and 24 deletions

1
.gitignore vendored
View file

@ -1,6 +1,7 @@
.vagrant .vagrant
*.retry *.retry
inventory/vagrant_ansible_inventory inventory/vagrant_ansible_inventory
inventory/group_vars/fake_hosts.yml
temp temp
.idea .idea
.tox .tox

View file

@ -124,6 +124,7 @@ before_script:
-e local_release_dir=${PWD}/downloads -e local_release_dir=${PWD}/downloads
-e resolvconf_mode=${RESOLVCONF_MODE} -e resolvconf_mode=${RESOLVCONF_MODE}
-e vault_deployment_type=${VAULT_DEPLOYMENT} -e vault_deployment_type=${VAULT_DEPLOYMENT}
--limit "all:!fake_hosts"
cluster.yml cluster.yml
# Repeat deployment if testing upgrade # Repeat deployment if testing upgrade
@ -150,18 +151,19 @@ before_script:
-e resolvconf_mode=${RESOLVCONF_MODE} -e resolvconf_mode=${RESOLVCONF_MODE}
-e weave_cpu_requests=${WEAVE_CPU_LIMIT} -e weave_cpu_requests=${WEAVE_CPU_LIMIT}
-e weave_cpu_limit=${WEAVE_CPU_LIMIT} -e weave_cpu_limit=${WEAVE_CPU_LIMIT}
--limit "all:!fake_hosts"
$PLAYBOOK; $PLAYBOOK;
fi fi
# Tests Cases # Tests Cases
## Test Master API ## Test Master API
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/010_check-apiserver.yml $LOG_LEVEL - ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
## Ping the between 2 pod ## Ping the between 2 pod
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL - ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
## Advanced DNS checks ## Advanced DNS checks
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/040_check-network-adv.yml $LOG_LEVEL - ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL
## Idempotency checks 1/5 (repeat deployment) ## Idempotency checks 1/5 (repeat deployment)
- > - >
@ -178,6 +180,7 @@ before_script:
-e local_release_dir=${PWD}/downloads -e local_release_dir=${PWD}/downloads
-e etcd_deployment_type=${ETCD_DEPLOYMENT} -e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT} -e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
--limit "all:!fake_hosts"
cluster.yml; cluster.yml;
fi fi
@ -186,6 +189,7 @@ before_script:
if [ "${IDEMPOT_CHECK}" = "true" ]; then if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
--limit "all:!fake_hosts"
tests/testcases/040_check-network-adv.yml $LOG_LEVEL; tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
fi fi
@ -197,7 +201,8 @@ before_script:
--private-key=${HOME}/.ssh/id_rsa --private-key=${HOME}/.ssh/id_rsa
-e bootstrap_os=${BOOTSTRAP_OS} -e bootstrap_os=${BOOTSTRAP_OS}
-e ansible_python_interpreter=${PYPATH} -e ansible_python_interpreter=${PYPATH}
-e reset_confirmation=yes -e reset_confirmation=yes
--limit "all:!fake_hosts"
reset.yml; reset.yml;
fi fi
@ -216,6 +221,7 @@ before_script:
-e local_release_dir=${PWD}/downloads -e local_release_dir=${PWD}/downloads
-e etcd_deployment_type=${ETCD_DEPLOYMENT} -e etcd_deployment_type=${ETCD_DEPLOYMENT}
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT} -e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
--limit "all:!fake_hosts"
cluster.yml; cluster.yml;
fi fi
@ -224,6 +230,7 @@ before_script:
if [ "${IDEMPOT_CHECK}" = "true" ]; then if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
--limit "all:!fake_hosts"
tests/testcases/040_check-network-adv.yml $LOG_LEVEL; tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
fi fi
@ -244,7 +251,7 @@ before_script:
.coreos_calico_sep_variables: &coreos_calico_sep_variables .coreos_calico_sep_variables: &coreos_calico_sep_variables
# stage: deploy-gce-part1 # stage: deploy-gce-part1
KUBE_NETWORK_PLUGIN: calico KUBE_NETWORK_PLUGIN: calico
CLOUD_IMAGE: coreos-stable-1235-6-0-v20170111 CLOUD_IMAGE: coreos-stable-1235-6-0-v20170111
CLOUD_REGION: us-west1-b CLOUD_REGION: us-west1-b
CLUSTER_MODE: separate CLUSTER_MODE: separate
BOOTSTRAP_OS: coreos BOOTSTRAP_OS: coreos
@ -310,7 +317,7 @@ before_script:
KUBE_NETWORK_PLUGIN: calico KUBE_NETWORK_PLUGIN: calico
CLOUD_IMAGE: centos-7 CLOUD_IMAGE: centos-7
CLOUD_REGION: europe-west1-b CLOUD_REGION: europe-west1-b
CLUSTER_MODE: ha CLUSTER_MODE: ha-scale
IDEMPOT_CHECK: "true" IDEMPOT_CHECK: "true"
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables .coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables

View file

@ -4,25 +4,40 @@ Travis CI test matrix
GCE instances GCE instances
------------- -------------
Here is the test matrix for the Travis CI gates: Here is the test matrix for the CI gates:
| Network plugin| OS type| GCE region| Nodes layout| | Network plugin| OS type| GCE region| Nodes layout|
|-------------------------|-------------------------|-------------------------|-------------------------| |-------------------------|-------------------------|-------------------------|-------------------------|
| canal| debian-8-kubespray| asia-east1-a| ha| | canal| debian-8-kubespray| asia-east1-a| ha-scale|
| calico| debian-8-kubespray| europe-west1-c| default| | calico| debian-8-kubespray| europe-west1-c| default|
| flannel| centos-7| asia-northeast1-c| default| | flannel| centos-7| asia-northeast1-c| default|
| calico| centos-7| us-central1-b| ha| | calico| centos-7| us-central1-b| ha|
| weave| rhel-7| us-east1-c| default| | weave| rhel-7| us-east1-c| default|
| canal| coreos-stable| us-west1-b| default| | canal| coreos-stable| us-west1-b| ha-scale|
| canal| rhel-7| asia-northeast1-b| separate| | canal| rhel-7| asia-northeast1-b| separate|
| weave| ubuntu-1604-xenial| europe-west1-d| separate| | weave| ubuntu-1604-xenial| europe-west1-d| separate|
| calico| coreos-stable| us-central1-f| separate| | calico| coreos-stable| us-central1-f| separate|
Where the nodes layout `default` is a non-HA two nodes setup with the separate `kube-node`
and the `etcd` group merged with the `kube-master`. The `separate` layout is when Node Layouts
there is only node of each type, which is a kube master, compute and etcd cluster member. ------------
And the `ha` layout stands for a two etcd nodes, two masters and a single worker node,
partially intersecting though. There are four node layout types: `default`, `separate`, `ha`, and `scale`.
`default` is a non-HA two nodes setup with one separate `kube-node`
and the `etcd` group merged with the `kube-master`.
`separate` layout is when there is only node of each type, which includes
a kube-master, kube-node, and etcd cluster member.
`ha` layout consists of two etcd nodes, two masters and a single worker node,
with role intersection.
`scale` layout can be combined with above layouts. It includes 200 fake hosts
in the Ansible inventory. This helps test TLS certificate generation at scale
to prevent regressions and profile certain long-running tasks. These nodes are
never actually deployed, but certificates are generated for them.
Note, the canal network plugin deploys flannel as well plus calico policy controller. Note, the canal network plugin deploys flannel as well plus calico policy controller.
@ -40,15 +55,15 @@ GCE instances
| Stage| Network plugin| OS type| GCE region| Nodes layout | Stage| Network plugin| OS type| GCE region| Nodes layout
|--------------------|--------------------|--------------------|--------------------|--------------------| |--------------------|--------------------|--------------------|--------------------|--------------------|
| part1| calico| coreos-stable| us-west1-b| separated| | part1| calico| coreos-stable| us-west1-b| separate|
| part1| canal| debian-8-kubespray| us-east1-b| ha| | part1| canal| debian-8-kubespray| us-east1-b| ha|
| part1| weave| rhel-7| europe-west1-b| default| | part1| weave| rhel-7| europe-west1-b| scale|
| part2| flannel| centos-7| us-west1-a| default| | part2| flannel| centos-7| us-west1-a| default|
| part2| calico| debian-8-kubespray| us-central1-b| default| | part2| calico| debian-8-kubespray| us-central1-b| default|
| part2| canal| coreos-stable| us-east1-b| default| | part2| canal| coreos-stable| us-east1-b| default|
| special| canal| rhel-7| us-east1-b| separated| | special| canal| rhel-7| us-east1-b| separate|
| special| weave| ubuntu-1604-xenial| us-central1-b| separated| | special| weave| ubuntu-1604-xenial| us-central1-b| scale|
| special| calico| centos-7| europe-west1-b| ha| | special| calico| centos-7| europe-west1-b| ha-scale|
| special| weave| coreos-alpha| us-west1-a| ha| | special| weave| coreos-alpha| us-west1-a| ha|
The "Stage" means a build step of the build pipeline. The steps are ordered as `part1->part2->special`. The "Stage" means a build step of the build pipeline. The steps are ordered as `part1->part2->special`.

View file

@ -2,6 +2,7 @@
- name: "Check_certs | check if all certs have already been generated on first master" - name: "Check_certs | check if all certs have already been generated on first master"
stat: stat:
path: "{{ etcd_cert_dir }}/{{ item }}" path: "{{ etcd_cert_dir }}/{{ item }}"
get_md5: no
delegate_to: "{{groups['etcd'][0]}}" delegate_to: "{{groups['etcd'][0]}}"
register: etcdcert_master register: etcdcert_master
run_once: true run_once: true

View file

@ -13,7 +13,7 @@
- set_fact: - set_fact:
instance_names: >- instance_names: >-
{%- if mode in ['separate', 'ha'] -%} {%- if mode in ['separate', 'separate-scale', 'ha', 'ha-scale'] -%}
k8s-{{test_name}}-1,k8s-{{test_name}}-2,k8s-{{test_name}}-3 k8s-{{test_name}}-1,k8s-{{test_name}}-2,k8s-{{test_name}}-3
{%- else -%} {%- else -%}
k8s-{{test_name}}-1,k8s-{{test_name}}-2 k8s-{{test_name}}-1,k8s-{{test_name}}-2
@ -39,6 +39,18 @@
src: ../templates/inventory-gce.j2 src: ../templates/inventory-gce.j2
dest: "{{ inventory_path }}" dest: "{{ inventory_path }}"
- name: Make group_vars directory
file:
path: "{{ inventory_path|dirname }}/group_vars"
state: directory
when: mode in ['scale', 'separate-scale', 'ha-scale']
- name: Template fake hosts group vars
template:
src: ../templates/fake_hosts.yml.j2
dest: "{{ inventory_path|dirname }}/group_vars/fake_hosts.yml"
when: mode in ['scale', 'separate-scale', 'ha-scale']
- name: Wait for SSH to come up - name: Wait for SSH to come up
wait_for: host={{item.public_ip}} port=22 delay=10 timeout=180 state=started wait_for: host={{item.public_ip}} port=22 delay=10 timeout=180 state=started
with_items: "{{gce.instance_data}}" with_items: "{{gce.instance_data}}"

View file

@ -0,0 +1,3 @@
ansible_default_ipv4:
address: 255.255.255.255
ansible_hostname: "{{ '{{' }}inventory_hostname}}"

View file

@ -2,12 +2,11 @@
{% set node2 = gce.instance_data[1].name %} {% set node2 = gce.instance_data[1].name %}
{{node1}} ansible_ssh_host={{gce.instance_data[0].public_ip}} {{node1}} ansible_ssh_host={{gce.instance_data[0].public_ip}}
{{node2}} ansible_ssh_host={{gce.instance_data[1].public_ip}} {{node2}} ansible_ssh_host={{gce.instance_data[1].public_ip}}
{% if mode is defined and mode in ["separate", "ha"] %} {% if mode is defined and mode in ["separate", "separate-scale"] %}
{% set node3 = gce.instance_data[2].name %} {% set node3 = gce.instance_data[2].name %}
{{node3}} ansible_ssh_host={{gce.instance_data[2].public_ip}} {{node3}} ansible_ssh_host={{gce.instance_data[2].public_ip}}
{% endif %} {% endif %}
{% if mode is defined and mode in ["separate", "separate-scale"] %}
{% if mode is defined and mode == "separate" %}
[kube-master] [kube-master]
{{node1}} {{node1}}
@ -19,7 +18,7 @@
[vault] [vault]
{{node3}} {{node3}}
{% elif mode is defined and mode == "ha" %} {% elif mode is defined and mode in ["ha", "ha-scale"] %}
[kube-master] [kube-master]
{{node1}} {{node1}}
{{node2}} {{node2}}
@ -51,3 +50,12 @@
[k8s-cluster:children] [k8s-cluster:children]
kube-node kube-node
kube-master kube-master
{% if mode is defined and mode in ["scale", "separate-scale", "ha-scale"] %}
[fake_hosts]
fake_scale_host[1:200]
[kube-node:children]
fake_hosts
{% endif %}