Fix CI upgrade scenario by using dynamic inventory file (#2635)

Also updates the commit ID we use as a basis for upgrade tests.
This commit is contained in:
Matthew Mosesohn 2018-04-10 16:02:33 +03:00 committed by GitHub
parent 45f15bf753
commit 09f93d9e0c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 16 additions and 15 deletions

View file

@ -20,6 +20,7 @@ variables:
GCE_PREEMPTIBLE: "false"
ANSIBLE_KEEP_REMOTE_FILES: "1"
ANSIBLE_CONFIG: ./tests/ansible.cfg
ANSIBLE_INVENTORY: ./inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini
IDEMPOT_CHECK: "false"
RESET_CHECK: "false"
UPGRADE_TEST: "false"
@ -90,9 +91,9 @@ before_script:
- cd tests && make create-${CI_PLATFORM} -s ; cd -
# Check out latest tag if testing upgrade
# Uncomment when gitlab kargo repo has tags
# Uncomment when gitlab kubespray repo has tags
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
- test "${UPGRADE_TEST}" != "false" && git checkout ba0a03a8ba2d97a73d06242ec4bb3c7e2012e58c
- test "${UPGRADE_TEST}" != "false" && git checkout f7d52564aad2ff8e337634951beb4a881c0e8aa6
# Checkout the CI vars file so it is available
- test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
# Workaround https://github.com/kubernetes-incubator/kubespray/issues/2021
@ -102,7 +103,7 @@ before_script:
# Create cluster
- >
ansible-playbook
-i inventory/sample/hosts.ini
-i ${ANSIBLE_INVENTORY}
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
@ -121,7 +122,7 @@ before_script:
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
git checkout "${CI_BUILD_REF}";
ansible-playbook
-i inventory/sample/hosts.ini
-i ${ANSIBLE_INVENTORY}
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
@ -137,20 +138,20 @@ before_script:
# Tests Cases
## Test Master API
- >
ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
## Ping the between 2 pod
- ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
- ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
## Advanced DNS checks
- ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL
- ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL
## Idempotency checks 1/5 (repeat deployment)
- >
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook
-i inventory/sample/hosts.ini
-i ${ANSIBLE_INVENTORY}
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
@ -167,7 +168,7 @@ before_script:
- >
if [ "${IDEMPOT_CHECK}" = "true" ]; then
ansible-playbook
-i inventory/sample/hosts.ini
-i ${ANSIBLE_INVENTORY}
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
@ -182,7 +183,7 @@ before_script:
- >
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
ansible-playbook
-i inventory/sample/hosts.ini
-i ${ANSIBLE_INVENTORY}
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
@ -199,7 +200,7 @@ before_script:
- >
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
ansible-playbook
-i inventory/sample/hosts.ini
-i ${ANSIBLE_INVENTORY}
-b --become-user=root
--private-key=${HOME}/.ssh/id_rsa
-u $SSH_USER
@ -215,7 +216,7 @@ before_script:
## Idempotency checks 5/5 (Advanced DNS checks)
- >
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
ansible-playbook -i inventory/sample/hosts.ini -e ansible_python_interpreter=${PYPATH}
ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH}
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
--limit "all:!fake_hosts"
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;

View file

@ -1,4 +1,4 @@
INVENTORY=$(PWD)/../inventory/sample/hosts.ini
INVENTORY=$(PWD)/../inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini
$(HOME)/.ssh/id_rsa:
mkdir -p $(HOME)/.ssh

View file

@ -1,9 +1,9 @@
def run(username, credentialsId, ami, network_plugin, aws_access, aws_secret) {
def inventory_path = pwd() + "/inventory/sample/hosts.ini"
def inventory_path = pwd() + "/inventory/sample/${env.CI_JOB_NAME}-${env.BUILD_NUMBER}.ini"
dir('tests') {
wrap([$class: 'AnsiColorBuildWrapper', colorMapName: "xterm"]) {
try {
create_vm("${env.JOB_NAME}-${env.BUILD_NUMBER}", inventory_path, ami, username, network_plugin, aws_access, aws_secret)
create_vm("${env.CI_JOB_NAME}-${env.BUILD_NUMBER}", inventory_path, ami, username, network_plugin, aws_access, aws_secret)
install_cluster(inventory_path, credentialsId, network_plugin)
test_apiserver(inventory_path, credentialsId)