e70bc92bb0
Squashed commits: [f9355ea
] Swap order in which we reload docker/socket [2ca6819
] Reload docker.socket after installing flannel on coreos Workaround for #569 [9f976e5
] Vagrantfile: setup proxy inside virtual machines In corporate networks, it is good to pre-configure proxy variables. [9d7142f
] Vagrantfile: use Ubuntu 16.04 LTS Use recent supported version of Ubuntu for local development setup with Vagrant. [50f77cc
] Add CI test layouts * Drop Wily from test matrix * Replace the Wily cases dropped with extra cases to test separate roles deployment Signed-off-by: Bogdan Dobrelya <bdobrelia@mirantis.com> [03e162b
] Update OWNERS [c7b00ca
] Use tar+register instead of copy/slurp for distributing tokens and certs Related bug: https://github.com/ansible/ansible/issues/15405 Uses tar and register because synchronize module cannot sudo on the remote side correctly and copy is too slow. This patch dramatically cuts down the number of tasks to process for cert synchronization. [2778ac6
] Add new var skip_dnsmasq_k8s If skip_dnsmasq is set, it will still not set up dnsmasq k8s pod. This enables independent setup of resolvconf section before kubelet is up.
163 lines
5.6 KiB
YAML
163 lines
5.6 KiB
YAML
sudo: false
|
|
|
|
git:
|
|
depth: 5
|
|
|
|
env:
|
|
global:
|
|
GCE_USER=travis
|
|
SSH_USER=$GCE_USER
|
|
TEST_ID=$TRAVIS_JOB_NUMBER
|
|
CONTAINER_ENGINE=docker
|
|
PRIVATE_KEY=$GCE_PRIVATE_KEY
|
|
ANSIBLE_KEEP_REMOTE_FILES=1
|
|
CLUSTER_MODE=default
|
|
matrix:
|
|
# Debian Jessie
|
|
- >-
|
|
KUBE_NETWORK_PLUGIN=flannel
|
|
CLOUD_IMAGE=debian-8-kubespray
|
|
CLOUD_REGION=europe-west1-b
|
|
CLUSTER_MODE=default
|
|
- >-
|
|
KUBE_NETWORK_PLUGIN=calico
|
|
CLOUD_IMAGE=debian-8-kubespray
|
|
CLOUD_REGION=us-central1-c
|
|
CLUSTER_MODE=default
|
|
- >-
|
|
KUBE_NETWORK_PLUGIN=weave
|
|
CLOUD_IMAGE=debian-8-kubespray
|
|
CLOUD_REGION=us-east1-d
|
|
CLUSTER_MODE=default
|
|
|
|
# Centos 7
|
|
- >-
|
|
KUBE_NETWORK_PLUGIN=flannel
|
|
CLOUD_IMAGE=centos-7-sudo
|
|
CLOUD_REGION=asia-east1-c
|
|
CLUSTER_MODE=default
|
|
- >-
|
|
KUBE_NETWORK_PLUGIN=calico
|
|
CLOUD_IMAGE=centos-7-sudo
|
|
CLOUD_REGION=europe-west1-b
|
|
CLUSTER_MODE=default
|
|
- >-
|
|
KUBE_NETWORK_PLUGIN=weave
|
|
CLOUD_IMAGE=centos-7-sudo
|
|
CLOUD_REGION=us-central1-c
|
|
CLUSTER_MODE=default
|
|
|
|
# Redhat 7
|
|
- >-
|
|
KUBE_NETWORK_PLUGIN=flannel
|
|
CLOUD_IMAGE=rhel-7-sudo
|
|
CLOUD_REGION=us-east1-d
|
|
CLUSTER_MODE=default
|
|
- >-
|
|
KUBE_NETWORK_PLUGIN=calico
|
|
CLOUD_IMAGE=rhel-7-sudo
|
|
CLOUD_REGION=asia-east1-c
|
|
CLUSTER_MODE=default
|
|
- >-
|
|
KUBE_NETWORK_PLUGIN=weave
|
|
CLOUD_IMAGE=rhel-7-sudo
|
|
CLOUD_REGION=europe-west1-b
|
|
CLUSTER_MODE=default
|
|
|
|
# Ubuntu 16.04
|
|
- >-
|
|
KUBE_NETWORK_PLUGIN=flannel
|
|
CLOUD_IMAGE=ubuntu-1604-xenial
|
|
CLOUD_REGION=us-central1-c
|
|
CLUSTER_MODE=default
|
|
- >-
|
|
KUBE_NETWORK_PLUGIN=calico
|
|
CLOUD_IMAGE=ubuntu-1604-xenial
|
|
CLOUD_REGION=us-east1-d
|
|
CLUSTER_MODE=default
|
|
- >-
|
|
KUBE_NETWORK_PLUGIN=weave
|
|
CLOUD_IMAGE=ubuntu-1604-xenial
|
|
CLOUD_REGION=asia-east1-c
|
|
CLUSTER_MODE=default
|
|
|
|
# Extra cases for separated roles
|
|
- >-
|
|
KUBE_NETWORK_PLUGIN=flannel
|
|
CLOUD_IMAGE=rhel-7-sudo
|
|
CLOUD_REGION=europe-west1-b
|
|
CLUSTER_MODE=separate
|
|
- >-
|
|
KUBE_NETWORK_PLUGIN=calico
|
|
CLOUD_IMAGE=ubuntu-1604-xenial
|
|
CLOUD_REGION=us-central1-a
|
|
CLUSTER_MODE=separate
|
|
- >-
|
|
KUBE_NETWORK_PLUGIN=weave
|
|
CLOUD_IMAGE=debian-8-kubespray
|
|
CLOUD_REGION=us-east1-d
|
|
CLUSTER_MODE=separate
|
|
|
|
|
|
before_install:
|
|
# Install Ansible.
|
|
- pip install --user boto -U
|
|
- pip install --user ansible
|
|
- pip install --user netaddr
|
|
# W/A https://github.com/ansible/ansible-modules-core/issues/5196#issuecomment-253766186
|
|
- pip install --user apache-libcloud==0.20.1
|
|
|
|
cache:
|
|
- directories:
|
|
- $HOME/.cache/pip
|
|
- $HOME/.local
|
|
|
|
before_script:
|
|
- echo "RUN $TRAVIS_JOB_NUMBER $KUBE_NETWORK_PLUGIN $CONTAINER_ENGINE "
|
|
- mkdir -p $HOME/.ssh
|
|
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
|
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
|
|
- chmod 400 $HOME/.ssh/id_rsa
|
|
- chmod 755 $HOME/.local/bin/ansible-playbook
|
|
- $HOME/.local/bin/ansible-playbook --version
|
|
- cp tests/ansible.cfg .
|
|
# - "echo $HOME/.local/bin/ansible-playbook -i inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml"
|
|
## Configure ansible deployment logs to be collected as an artifact. Enable when GCS configured, see https://docs.travis-ci.com/user/deployment/gcs
|
|
# - $HOME/.local/bin/ansible-playbook -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root scripts/configure-logs.yaml
|
|
|
|
script:
|
|
- >
|
|
$HOME/.local/bin/ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local $LOG_LEVEL
|
|
-e mode=${CLUSTER_MODE}
|
|
-e test_id=${TEST_ID}
|
|
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
|
-e gce_project_id=${GCE_PROJECT_ID}
|
|
-e gce_service_account_email=${GCE_ACCOUNT}
|
|
-e gce_pem_file=${HOME}/.ssh/gce
|
|
-e cloud_image=${CLOUD_IMAGE}
|
|
-e inventory_path=${PWD}/inventory/inventory.ini
|
|
-e cloud_region=${CLOUD_REGION}
|
|
|
|
# Create cluster
|
|
- "$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} cluster.yml"
|
|
# Tests Cases
|
|
## Test Master API
|
|
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
|
## Create a POD
|
|
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/020_check-create-pod.yml $LOG_LEVEL
|
|
## Ping the between 2 pod
|
|
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
|
|
## Collect env info, enable it once GCS configured, see https://docs.travis-ci.com/user/deployment/gcs
|
|
# - $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root scripts/collect-info.yaml
|
|
|
|
after_script:
|
|
- >
|
|
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
|
|
-e test_id=${TEST_ID}
|
|
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
|
-e gce_project_id=${GCE_PROJECT_ID}
|
|
-e gce_service_account_email=${GCE_ACCOUNT}
|
|
-e gce_pem_file=${HOME}/.ssh/gce
|
|
-e cloud_image=${CLOUD_IMAGE}
|
|
-e inventory_path=${PWD}/inventory/inventory.ini
|
|
-e cloud_region=${CLOUD_REGION}
|