Merge branch 'master' into master

This commit is contained in:
zoues 2017-05-23 09:32:28 +08:00 committed by GitHub
commit 198adb8d37
68 changed files with 507 additions and 332 deletions

1
.gitignore vendored
View file

@ -7,6 +7,7 @@ temp
.idea .idea
.tox .tox
.cache .cache
*.bak
*.egg-info *.egg-info
*.pyc *.pyc
*.pyo *.pyo

View file

@ -18,7 +18,7 @@ variables:
# us-west1-a # us-west1-a
before_script: before_script:
- pip install ansible==2.2.1.0 - pip install ansible==2.3.0
- pip install netaddr - pip install netaddr
- pip install apache-libcloud==0.20.1 - pip install apache-libcloud==0.20.1
- pip install boto==2.9.0 - pip install boto==2.9.0
@ -74,7 +74,7 @@ before_script:
- $HOME/.cache - $HOME/.cache
before_script: before_script:
- docker info - docker info
- pip install ansible==2.2.1.0 - pip install ansible==2.3.0
- pip install netaddr - pip install netaddr
- pip install apache-libcloud==0.20.1 - pip install apache-libcloud==0.20.1
- pip install boto==2.9.0 - pip install boto==2.9.0
@ -137,7 +137,7 @@ before_script:
if [ "${UPGRADE_TEST}" != "false" ]; then if [ "${UPGRADE_TEST}" != "false" ]; then
test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml"; test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml";
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml"; test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
pip install ansible==2.2.1.0; pip install ansible==2.3.0;
git checkout "${CI_BUILD_REF}"; git checkout "${CI_BUILD_REF}";
ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
${SSH_ARGS} ${SSH_ARGS}
@ -596,6 +596,7 @@ syntax-check:
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root cluster.yml -vvv --syntax-check - ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root cluster.yml -vvv --syntax-check
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root upgrade-cluster.yml -vvv --syntax-check - ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root upgrade-cluster.yml -vvv --syntax-check
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root reset.yml -vvv --syntax-check - ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root reset.yml -vvv --syntax-check
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root extra_playbooks/upgrade-only-k8s.yml -vvv --syntax-check
except: ['triggers', 'master'] except: ['triggers', 'master']
tox-inventory-builder: tox-inventory-builder:

View file

@ -1,161 +0,0 @@
sudo: required
services:
- docker
git:
depth: 5
env:
global:
GCE_USER=travis
SSH_USER=$GCE_USER
TEST_ID=$TRAVIS_JOB_NUMBER
CONTAINER_ENGINE=docker
PRIVATE_KEY=$GCE_PRIVATE_KEY
GS_ACCESS_KEY_ID=$GS_KEY
GS_SECRET_ACCESS_KEY=$GS_SECRET
ANSIBLE_KEEP_REMOTE_FILES=1
CLUSTER_MODE=default
BOOTSTRAP_OS=none
matrix:
# Debian Jessie
- >-
KUBE_NETWORK_PLUGIN=canal
CLOUD_IMAGE=debian-8-kubespray
CLOUD_REGION=asia-east1-a
CLUSTER_MODE=ha
- >-
KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=debian-8-kubespray
CLOUD_REGION=europe-west1-c
CLUSTER_MODE=default
# Centos 7
- >-
KUBE_NETWORK_PLUGIN=flannel
CLOUD_IMAGE=centos-7
CLOUD_REGION=asia-northeast1-c
CLUSTER_MODE=default
- >-
KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=centos-7
CLOUD_REGION=us-central1-b
CLUSTER_MODE=ha
# Redhat 7
- >-
KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=rhel-7
CLOUD_REGION=us-east1-c
CLUSTER_MODE=default
# CoreOS stable
#- >-
# KUBE_NETWORK_PLUGIN=weave
# CLOUD_IMAGE=coreos-stable
# CLOUD_REGION=europe-west1-b
# CLUSTER_MODE=ha
# BOOTSTRAP_OS=coreos
- >-
KUBE_NETWORK_PLUGIN=canal
CLOUD_IMAGE=coreos-stable
CLOUD_REGION=us-west1-b
CLUSTER_MODE=default
BOOTSTRAP_OS=coreos
# Extra cases for separated roles
- >-
KUBE_NETWORK_PLUGIN=canal
CLOUD_IMAGE=rhel-7
CLOUD_REGION=asia-northeast1-b
CLUSTER_MODE=separate
- >-
KUBE_NETWORK_PLUGIN=weave
CLOUD_IMAGE=ubuntu-1604-xenial
CLOUD_REGION=europe-west1-d
CLUSTER_MODE=separate
- >-
KUBE_NETWORK_PLUGIN=calico
CLOUD_IMAGE=coreos-stable
CLOUD_REGION=us-central1-f
CLUSTER_MODE=separate
BOOTSTRAP_OS=coreos
matrix:
allow_failures:
- env: KUBE_NETWORK_PLUGIN=weave CLOUD_IMAGE=coreos-stable CLOUD_REGION=europe-west1-b CLUSTER_MODE=ha BOOTSTRAP_OS=coreos
before_install:
# Install Ansible.
- pip install --user ansible
- pip install --user netaddr
# W/A https://github.com/ansible/ansible-modules-core/issues/5196#issuecomment-253766186
- pip install --user apache-libcloud==0.20.1
- pip install --user boto==2.9.0 -U
# Load cached docker images
- if [ -d /var/tmp/releases ]; then find /var/tmp/releases -type f -name "*.tar" | xargs -I {} sh -c "zcat {} | docker load"; fi
cache:
- directories:
- $HOME/.cache/pip
- $HOME/.local
- /var/tmp/releases
before_script:
- echo "RUN $TRAVIS_JOB_NUMBER $KUBE_NETWORK_PLUGIN $CONTAINER_ENGINE "
- mkdir -p $HOME/.ssh
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
- chmod 400 $HOME/.ssh/id_rsa
- chmod 755 $HOME/.local/bin/ansible-playbook
- $HOME/.local/bin/ansible-playbook --version
- cp tests/ansible.cfg .
- export PYPATH=$([ $BOOTSTRAP_OS = none ] && echo /usr/bin/python || echo /opt/bin/python)
# - "echo $HOME/.local/bin/ansible-playbook -i inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml"
script:
- >
$HOME/.local/bin/ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local $LOG_LEVEL
-e mode=${CLUSTER_MODE}
-e test_id=${TEST_ID}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e gce_project_id=${GCE_PROJECT_ID}
-e gce_service_account_email=${GCE_ACCOUNT}
-e gce_pem_file=${HOME}/.ssh/gce
-e cloud_image=${CLOUD_IMAGE}
-e inventory_path=${PWD}/inventory/inventory.ini
-e cloud_region=${CLOUD_REGION}
# Create cluster with netchecker app deployed
- >
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e bootstrap_os=${BOOTSTRAP_OS}
-e ansible_python_interpreter=${PYPATH}
-e download_run_once=true
-e download_localhost=true
-e local_release_dir=/var/tmp/releases
-e deploy_netchecker=true
cluster.yml
# Tests Cases
## Test Master API
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/010_check-apiserver.yml $LOG_LEVEL
## Ping the between 2 pod
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
## Advanced DNS checks
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/040_check-network-adv.yml $LOG_LEVEL
after_script:
- >
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
-e mode=${CLUSTER_MODE}
-e test_id=${TEST_ID}
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
-e gce_project_id=${GCE_PROJECT_ID}
-e gce_service_account_email=${GCE_ACCOUNT}
-e gce_pem_file=${HOME}/.ssh/gce
-e cloud_image=${CLOUD_IMAGE}
-e inventory_path=${PWD}/inventory/inventory.ini
-e cloud_region=${CLOUD_REGION}

View file

@ -50,13 +50,14 @@ Note: Upstart/SysV init based OS types are not supported.
Versions of supported components Versions of supported components
-------------------------------- --------------------------------
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.6.4 <br> [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.6.4 <br>
[etcd](https://github.com/coreos/etcd/releases) v3.0.17 <br> [etcd](https://github.com/coreos/etcd/releases) v3.0.17 <br>
[flanneld](https://github.com/coreos/flannel/releases) v0.6.2 <br> [flanneld](https://github.com/coreos/flannel/releases) v0.6.2 <br>
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.23.0 <br> [calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.23.0 <br>
[canal](https://github.com/projectcalico/canal) (given calico/flannel versions) <br> [canal](https://github.com/projectcalico/canal) (given calico/flannel versions) <br>
[weave](http://weave.works/) v1.8.2 <br> [weave](http://weave.works/) v1.8.2 <br>
[docker](https://www.docker.com/) v1.12.5 <br> [docker](https://www.docker.com/) v1.13.1 <br>
[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 <br> [rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 <br>
Note: rkt support as docker alternative is limited to control plane (etcd and Note: rkt support as docker alternative is limited to control plane (etcd and
@ -67,9 +68,9 @@ plugins can be deployed for a given single cluster.
Requirements Requirements
-------------- --------------
* **Ansible v2.2 (or newer) and python-netaddr is installed on the machine * **Ansible v2.3 (or newer) and python-netaddr is installed on the machine
that will run Ansible commands** that will run Ansible commands**
* **Jinja 2.8 (or newer) is required to run the Ansible Playbooks** * **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
* The target servers must have **access to the Internet** in order to pull docker images. * The target servers must have **access to the Internet** in order to pull docker images.
* The target servers are configured to allow **IPv4 forwarding**. * The target servers are configured to allow **IPv4 forwarding**.
* **Your ssh key must be copied** to all the servers part of your inventory. * **Your ssh key must be copied** to all the servers part of your inventory.

View file

@ -14,20 +14,42 @@ This project will create:
**How to Use:** **How to Use:**
- Export the variables for your AWS credentials or edit credentials.tfvars: - Export the variables for your AWS credentials or edit `credentials.tfvars`:
``` ```
export aws_access_key="xxx" export AWS_ACCESS_KEY_ID="www"
export aws_secret_key="yyy" export AWS_SECRET_ACCESS_KEY ="xxx"
export aws_ssh_key_name="zzz" export AWS_SSH_KEY_NAME="yyy"
export AWS_DEFAULT_REGION="zzz"
``` ```
- Rename `contrib/terraform/aws/terraform.tfvars.example` to `terraform.tfvars`
- Update contrib/terraform/aws/terraform.tfvars with your data - Update `contrib/terraform/aws/terraform.tfvars` with your data
- Allocate new AWS Elastic IPs: Depending on # of Availability Zones used (2 for each AZ)
- Create an AWS EC2 SSH Key
- Run with `terraform apply -var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
- Once the infrastructure is created, you can run the kargo playbooks and supply inventory/hosts with the `-i` flag. - Once the infrastructure is created, you can run the kargo playbooks and supply inventory/hosts with the `-i` flag.
**Troubleshooting**
***Remaining AWS IAM Instance Profile***:
If the cluster was destroyed without using Terraform it is possible that
the AWS IAM Instance Profiles still remain. To delete them you can use
the `AWS CLI` with the following command:
```
aws iam delete-instance-profile --region <region_name> --instance-profile-name <profile_name>
```
***Ansible Inventory doesnt get created:***
It could happen that Terraform doesnt create an Ansible Inventory file automatically. If this is the case copy the output after `inventory=` and create a file named `hosts`in the directory `inventory` and paste the inventory into the file.
**Architecture** **Architecture**
Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones. Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones.

View file

@ -173,6 +173,7 @@ data "template_file" "inventory" {
list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}" list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}"
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\"" elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
elb_api_port = "loadbalancer_apiserver.port=${var.aws_elb_api_port}" elb_api_port = "loadbalancer_apiserver.port=${var.aws_elb_api_port}"
kube_insecure_apiserver_address = "kube_apiserver_insecure_bind_address: ${var.kube_insecure_apiserver_address}"
} }
} }

View file

@ -18,3 +18,7 @@ output "etcd" {
output "aws_elb_api_fqdn" { output "aws_elb_api_fqdn" {
value = "${module.aws-elb.aws_elb_api_fqdn}:${var.aws_elb_api_port}" value = "${module.aws-elb.aws_elb_api_fqdn}:${var.aws_elb_api_port}"
} }
output "inventory" {
value = "${data.template_file.inventory.rendered}"
}

View file

@ -25,3 +25,4 @@ kube-master
[k8s-cluster:vars] [k8s-cluster:vars]
${elb_api_fqdn} ${elb_api_fqdn}
${elb_api_port} ${elb_api_port}
${kube_insecure_apiserver_address}

View file

@ -1,6 +1,5 @@
#Global Vars #Global Vars
aws_cluster_name = "devtest" aws_cluster_name = "devtest"
aws_region = "eu-central-1"
#VPC Vars #VPC Vars
aws_vpc_cidr_block = "10.250.192.0/18" aws_vpc_cidr_block = "10.250.192.0/18"
@ -28,5 +27,6 @@ aws_cluster_ami = "ami-903df7ff"
#Settings AWS ELB #Settings AWS ELB
aws_elb_api_port = 443 aws_elb_api_port = 6443
k8s_secure_api_port = 443 k8s_secure_api_port = 6443
kube_insecure_apiserver_address = 0.0.0.0

View file

@ -95,3 +95,7 @@ variable "aws_elb_api_port" {
variable "k8s_secure_api_port" { variable "k8s_secure_api_port" {
description = "Secure Port of K8S API Server" description = "Secure Port of K8S API Server"
} }
variable "kube_insecure_apiserver_address" {
description= "Bind Address for insecure Port of K8s API Server"
}

View file

@ -86,7 +86,7 @@ This will provision one VM as master using a floating ip, two additional masters
Additionally, now the terraform based installation supports provisioning of a GlusterFS shared file system based on a separate set of VMs, running either a Debian or RedHat based set of VMs. To enable this, you need to add to your `my-terraform-vars.tfvars` the following variables: Additionally, now the terraform based installation supports provisioning of a GlusterFS shared file system based on a separate set of VMs, running either a Debian or RedHat based set of VMs. To enable this, you need to add to your `my-terraform-vars.tfvars` the following variables:
``` ```
# Flavour depends on your openstack installation, you can get available flavours through `nova list-flavors` # Flavour depends on your openstack installation, you can get available flavours through `nova flavor-list`
flavor_gfs_node = "af659280-5b8a-42b5-8865-a703775911da" flavor_gfs_node = "af659280-5b8a-42b5-8865-a703775911da"
# This is the name of an image already available in your openstack installation. # This is the name of an image already available in your openstack installation.
image_gfs = "Ubuntu 15.10" image_gfs = "Ubuntu 15.10"

View file

@ -27,7 +27,7 @@ not _kube-node_.
There are also two special groups: There are also two special groups:
* **calico-rr** : explained for [advanced Calico networking cases](docs/calico.md) * **calico-rr** : explained for [advanced Calico networking cases](calico.md)
* **bastion** : configure a bastion host if your nodes are not directly reachable * **bastion** : configure a bastion host if your nodes are not directly reachable
Below is a complete inventory example: Below is a complete inventory example:

View file

@ -11,6 +11,10 @@ Or with Ansible:
Before running the cluster playbook you must satisfy the following requirements: Before running the cluster playbook you must satisfy the following requirements:
* On each CoreOS nodes a writable directory **/opt/bin** (~400M disk space) General CoreOS Pre-Installation Notes:
- You should set the bootstrap_os variable to `coreos`
- Ensure that the bin_dir is set to `/opt/bin`
- ansible_python_interpreter should be `/opt/bin/python`. This will be laid down by the bootstrap task.
- The default resolvconf_mode setting of `docker_dns` **does not** work for CoreOS. This is because we do not edit the systemd service file for docker on CoreOS nodes. Instead, just use the `host_resolvconf` mode. It should work out of the box.
Then you can proceed to [cluster deployment](#run-deployment) Then you can proceed to [cluster deployment](#run-deployment)

View file

@ -38,7 +38,7 @@ Example inventory generator usage:
``` ```
cp -r inventory my_inventory cp -r inventory my_inventory
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5) declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
CONFIG_FILE=my_inventory/inventory.cfg python3 contrib/inventory_builder/inventory.py ${IPS} CONFIG_FILE=my_inventory/inventory.cfg python3 contrib/inventory_builder/inventory.py ${IPS[@]}
``` ```
Starting custom deployment Starting custom deployment

View file

@ -44,7 +44,15 @@ deployed.
``` ```
git fetch origin git fetch origin
git checkout origin/master git checkout origin/master
ansible-playbook upgrade-cluster.yml -b -i inventory/inventory.cfg ansible-playbook upgrade-cluster.yml -b -i inventory/inventory.cfg -e kube_version=v1.6.0
```
After a successul upgrade, the Server Version should be updated:
```
$ kubectl version
Client Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.0", GitCommit:"fff5156092b56e6bd60fff75aad4dc9de6b6ef37", GitTreeState:"clean", BuildDate:"2017-03-28T19:15:41Z", GoVersion:"go1.8", Compiler:"gc", Platform:"darwin/amd64"}
Server Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.0+coreos.0", GitCommit:"8031716957d697332f9234ddf85febb07ac6c3e3", GitTreeState:"clean", BuildDate:"2017-03-29T04:33:09Z", GoVersion:"go1.7.5", Compiler:"gc", Platform:"linux/amd64"}
``` ```
#### Upgrade order #### Upgrade order

View file

@ -98,6 +98,20 @@ Stack](https://github.com/kubernetes-incubator/kargo/blob/master/docs/dns-stack.
loaded by preinstall kubernetes processes. For example, ceph and rbd backed volumes. Set this variable to loaded by preinstall kubernetes processes. For example, ceph and rbd backed volumes. Set this variable to
true to let kubelet load kernel modules. true to let kubelet load kernel modules.
##### Custom flags for Kube Components
For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. This can be done by providing a list of flags. Example:
```
kubelet_custom_flags:
- "--eviction-hard=memory.available<100Mi"
- "--eviction-soft-grace-period=memory.available=30s"
- "--eviction-soft=memory.available<300Mi"
```
The possible vars are:
* *apiserver_custom_flags*
* *controller_mgr_custom_flags*
* *scheduler_custom_flags*
* *kubelet_custom_flags*
#### User accounts #### User accounts
Kargo sets up two Kubernetes accounts by default: ``root`` and ``kube``. Their Kargo sets up two Kubernetes accounts by default: ``root`` and ``kube``. Their

1
extra_playbooks/inventory Symbolic link
View file

@ -0,0 +1 @@
../inventory

1
extra_playbooks/roles Symbolic link
View file

@ -0,0 +1 @@
../roles

View file

@ -0,0 +1,60 @@
### NOTE: This playbook cannot be used to deploy any new nodes to the cluster.
### Additional information:
### * Will not upgrade etcd
### * Will not upgrade network plugins
### * Will not upgrade Docker
### * Currently does not support Vault deployment.
###
### In most cases, you probably want to use upgrade-cluster.yml playbook and
### not this one.
- hosts: localhost
gather_facts: False
roles:
- { role: kargo-defaults}
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
- hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
gather_facts: false
vars:
# Need to disable pipelining for bootstrap-os as some systems have requiretty in sudoers set, which makes pipelining
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
ansible_ssh_pipelining: false
roles:
- { role: kargo-defaults}
- { role: bootstrap-os, tags: bootstrap-os}
- hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
vars:
ansible_ssh_pipelining: true
gather_facts: true
- hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kargo-defaults}
- { role: kubernetes/preinstall, tags: preinstall }
#Handle upgrades to master components first to maintain backwards compat.
- hosts: kube-master
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: 1
roles:
- { role: kargo-defaults}
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
- { role: kubernetes/node, tags: node }
- { role: kubernetes/master, tags: master }
- { role: upgrade/post-upgrade, tags: post-upgrade }
#Finally handle worker upgrades, based on given batch size
- hosts: kube-node:!kube-master
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: "{{ serial | default('20%') }}"
roles:
- { role: kargo-defaults}
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
- { role: kubernetes/node, tags: node }
- { role: upgrade/post-upgrade, tags: post-upgrade }
- { role: kargo-defaults}

View file

@ -98,7 +98,7 @@ cluster_name: cluster.local
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods # Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
ndots: 2 ndots: 2
# Can be dnsmasq_kubedns, kubedns or none # Can be dnsmasq_kubedns, kubedns or none
dns_mode: dnsmasq_kubedns dns_mode: kubedns
# Can be docker_dns, host_resolvconf or none # Can be docker_dns, host_resolvconf or none
resolvconf_mode: docker_dns resolvconf_mode: docker_dns
# Deploy netchecker app to verify DNS resolve as an HTTP service # Deploy netchecker app to verify DNS resolve as an HTTP service

View file

@ -1,14 +1,3 @@
ansible==2.2.1.0 ansible>=2.3.0
netaddr netaddr
# Ansible 2.2.1 requires jinja2<2.9, see <https://github.com/ansible/ansible/blob/v2.2.1.0-1/setup.py#L25>, jinja2>=2.9.6
# but without explicit limiting upper jinja2 version here pip ignores
# Ansible requirements and installs latest available jinja2
# (pip is not very smart here), which is incompatible with with
# Ansible 2.2.1.
# With incompatible jinja2 version "ansible-vault create" (and probably other parts)
# fails with:
# ERROR! Unexpected Exception: The 'jinja2<2.9' distribution was not found
# and is required by ansible
# This upper limit should be removed in 2.2.2 release, see:
# <https://github.com/ansible/ansible/commit/978311bf3f91dae5806ab72b665b0937adce38ad>
jinja2>=2.8,<2.9

View file

@ -13,3 +13,6 @@
line: "enabled=0" line: "enabled=0"
state: present state: present
when: fastestmirror.stat.exists when: fastestmirror.stat.exists
- name: Install packages requirements for bootstrap
raw: yum -y install libselinux-python

View file

@ -41,7 +41,7 @@ spec:
- /cluster-proportional-autoscaler - /cluster-proportional-autoscaler
- --namespace=kube-system - --namespace=kube-system
- --configmap=dnsmasq-autoscaler - --configmap=dnsmasq-autoscaler
- --target=ReplicationController/dnsmasq - --target=Deployment/dnsmasq
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate. # If using small nodes, "nodesPerReplica" should dominate.
- --default-params={"linear":{"nodesPerReplica":{{ dnsmasq_nodes_per_replica }},"preventSinglePointFailure":true}} - --default-params={"linear":{"nodesPerReplica":{{ dnsmasq_nodes_per_replica }},"preventSinglePointFailure":true}}

View file

@ -22,8 +22,8 @@ kube_version: v1.6.4
etcd_version: v3.0.17 etcd_version: v3.0.17
#TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults #TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
# after migration to container download # after migration to container download
calico_version: "v1.1.0-rc8" calico_version: "v1.1.3"
calico_cni_version: "v1.5.6" calico_cni_version: "v1.7.0"
calico_policy_version: "v0.5.4" calico_policy_version: "v0.5.4"
weave_version: 1.8.2 weave_version: 1.8.2
flannel_version: v0.6.2 flannel_version: v0.6.2
@ -50,10 +50,8 @@ calico_cni_image_repo: "calico/cni"
calico_cni_image_tag: "{{ calico_cni_version }}" calico_cni_image_tag: "{{ calico_cni_version }}"
calico_policy_image_repo: "calico/kube-policy-controller" calico_policy_image_repo: "calico/kube-policy-controller"
calico_policy_image_tag: "{{ calico_policy_version }}" calico_policy_image_tag: "{{ calico_policy_version }}"
# TODO(adidenko): switch to "calico/routereflector" when calico_rr_image_repo: "quay.io/calico/routereflector"
# https://github.com/projectcalico/calico-bird/pull/27 is merged calico_rr_image_tag: "v0.3.0"
calico_rr_image_repo: "quay.io/l23network/routereflector"
calico_rr_image_tag: "v0.1"
exechealthz_version: 1.1 exechealthz_version: 1.1
exechealthz_image_repo: "gcr.io/google_containers/exechealthz-amd64" exechealthz_image_repo: "gcr.io/google_containers/exechealthz-amd64"
exechealthz_image_tag: "{{ exechealthz_version }}" exechealthz_image_tag: "{{ exechealthz_version }}"
@ -61,9 +59,11 @@ hyperkube_image_repo: "quay.io/coreos/hyperkube"
hyperkube_image_tag: "{{ kube_version }}_coreos.0" hyperkube_image_tag: "{{ kube_version }}_coreos.0"
pod_infra_image_repo: "gcr.io/google_containers/pause-amd64" pod_infra_image_repo: "gcr.io/google_containers/pause-amd64"
pod_infra_image_tag: "{{ pod_infra_version }}" pod_infra_image_tag: "{{ pod_infra_version }}"
netcheck_tag: "v1.0" netcheck_version: "v1.0"
netcheck_agent_img_repo: "quay.io/l23network/k8s-netchecker-agent" netcheck_agent_img_repo: "quay.io/l23network/k8s-netchecker-agent"
netcheck_agent_tag: "{{ netcheck_version }}"
netcheck_server_img_repo: "quay.io/l23network/k8s-netchecker-server" netcheck_server_img_repo: "quay.io/l23network/k8s-netchecker-server"
netcheck_server_tag: "{{ netcheck_version }}"
weave_kube_image_repo: "weaveworks/weave-kube" weave_kube_image_repo: "weaveworks/weave-kube"
weave_kube_image_tag: "{{ weave_version }}" weave_kube_image_tag: "{{ weave_version }}"
weave_npc_image_repo: "weaveworks/weave-npc" weave_npc_image_repo: "weaveworks/weave-npc"
@ -103,13 +103,13 @@ downloads:
netcheck_server: netcheck_server:
container: true container: true
repo: "{{ netcheck_server_img_repo }}" repo: "{{ netcheck_server_img_repo }}"
tag: "{{ netcheck_tag }}" tag: "{{ netcheck_server_tag }}"
sha256: "{{ netcheck_server_digest_checksum|default(None) }}" sha256: "{{ netcheck_server_digest_checksum|default(None) }}"
enabled: "{{ deploy_netchecker|bool }}" enabled: "{{ deploy_netchecker|bool }}"
netcheck_agent: netcheck_agent:
container: true container: true
repo: "{{ netcheck_agent_img_repo }}" repo: "{{ netcheck_agent_img_repo }}"
tag: "{{ netcheck_tag }}" tag: "{{ netcheck_agent_tag }}"
sha256: "{{ netcheck_agent_digest_checksum|default(None) }}" sha256: "{{ netcheck_agent_digest_checksum|default(None) }}"
enabled: "{{ deploy_netchecker|bool }}" enabled: "{{ deploy_netchecker|bool }}"
etcd: etcd:

View file

@ -2,14 +2,18 @@
- name: downloading... - name: downloading...
debug: debug:
msg: "{{ download.url }}" msg: "{{ download.url }}"
when: "{{ download.enabled|bool and not download.container|bool }}" when:
- download.enabled|bool
- not download.container|bool
- name: Create dest directories - name: Create dest directories
file: file:
path: "{{local_release_dir}}/{{download.dest|dirname}}" path: "{{local_release_dir}}/{{download.dest|dirname}}"
state: directory state: directory
recurse: yes recurse: yes
when: "{{ download.enabled|bool and not download.container|bool }}" when:
- download.enabled|bool
- not download.container|bool
tags: bootstrap-os tags: bootstrap-os
- name: Download items - name: Download items
@ -23,7 +27,9 @@
until: "'OK' in get_url_result.msg or 'file already exists' in get_url_result.msg" until: "'OK' in get_url_result.msg or 'file already exists' in get_url_result.msg"
retries: 4 retries: 4
delay: "{{ retry_stagger | random + 3 }}" delay: "{{ retry_stagger | random + 3 }}"
when: "{{ download.enabled|bool and not download.container|bool }}" when:
- download.enabled|bool
- not download.container|bool
- name: Extract archives - name: Extract archives
unarchive: unarchive:
@ -32,7 +38,11 @@
owner: "{{ download.owner|default(omit) }}" owner: "{{ download.owner|default(omit) }}"
mode: "{{ download.mode|default(omit) }}" mode: "{{ download.mode|default(omit) }}"
copy: no copy: no
when: "{{ download.enabled|bool and not download.container|bool and download.unarchive is defined and download.unarchive == True }}" when:
- download.enabled|bool
- not download.container|bool
- download.unarchive is defined
- download.unarchive == True
- name: Fix permissions - name: Fix permissions
file: file:
@ -40,7 +50,10 @@
path: "{{local_release_dir}}/{{download.dest}}" path: "{{local_release_dir}}/{{download.dest}}"
owner: "{{ download.owner|default(omit) }}" owner: "{{ download.owner|default(omit) }}"
mode: "{{ download.mode|default(omit) }}" mode: "{{ download.mode|default(omit) }}"
when: "{{ download.enabled|bool and not download.container|bool and (download.unarchive is not defined or download.unarchive == False) }}" when:
- download.enabled|bool
- not download.container|bool
- (download.unarchive is not defined or download.unarchive == False)
- set_fact: - set_fact:
download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube-master'][0]}}{% endif %}" download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube-master'][0]}}{% endif %}"
@ -53,13 +66,15 @@
recurse: yes recurse: yes
mode: 0755 mode: 0755
owner: "{{ansible_ssh_user|default(ansible_user_id)}}" owner: "{{ansible_ssh_user|default(ansible_user_id)}}"
when: "{{ download.enabled|bool and download.container|bool }}" when:
- download.enabled|bool
- download.container|bool
tags: bootstrap-os tags: bootstrap-os
# This is required for the download_localhost delegate to work smooth with Container Linux by CoreOS cluster nodes # This is required for the download_localhost delegate to work smooth with Container Linux by CoreOS cluster nodes
- name: Hack python binary path for localhost - name: Hack python binary path for localhost
raw: sh -c "mkdir -p /opt/bin; ln -sf /usr/bin/python /opt/bin/python" raw: sh -c "mkdir -p /opt/bin; ln -sf /usr/bin/python /opt/bin/python"
when: "{{ download_delegate == 'localhost' }}" when: download_delegate == 'localhost'
delegate_to: localhost delegate_to: localhost
failed_when: false failed_when: false
run_once: true run_once: true
@ -73,12 +88,18 @@
delegate_to: localhost delegate_to: localhost
become: false become: false
run_once: true run_once: true
when: "{{ download_run_once|bool and download.enabled|bool and download.container|bool and download_delegate == 'localhost' }}" when:
- download_run_once|bool
- download.enabled|bool
- download.container|bool
- download_delegate == 'localhost'
tags: localhost tags: localhost
- name: Make download decision if pull is required by tag or sha256 - name: Make download decision if pull is required by tag or sha256
include: set_docker_image_facts.yml include: set_docker_image_facts.yml
when: "{{ download.enabled|bool and download.container|bool }}" when:
- download.enabled|bool
- download.container|bool
delegate_to: "{{ download_delegate if download_run_once|bool else inventory_hostname }}" delegate_to: "{{ download_delegate if download_run_once|bool else inventory_hostname }}"
run_once: "{{ download_run_once|bool }}" run_once: "{{ download_run_once|bool }}"
tags: facts tags: facts
@ -86,7 +107,9 @@
- name: pulling... - name: pulling...
debug: debug:
msg: "{{ pull_args }}" msg: "{{ pull_args }}"
when: "{{ download.enabled|bool and download.container|bool }}" when:
- download.enabled|bool
- download.container|bool
#NOTE(bogdando) this brings no docker-py deps for nodes #NOTE(bogdando) this brings no docker-py deps for nodes
- name: Download containers if pull is required or told to always pull - name: Download containers if pull is required or told to always pull
@ -95,7 +118,10 @@
until: pull_task_result|succeeded until: pull_task_result|succeeded
retries: 4 retries: 4
delay: "{{ retry_stagger | random + 3 }}" delay: "{{ retry_stagger | random + 3 }}"
when: "{{ download.enabled|bool and download.container|bool and pull_required|bool|default(download_always_pull) }}" when:
- download.enabled|bool
- download.container|bool
- pull_required|bool|default(download_always_pull)
delegate_to: "{{ download_delegate if download_run_once|bool else inventory_hostname }}" delegate_to: "{{ download_delegate if download_run_once|bool else inventory_hostname }}"
run_once: "{{ download_run_once|bool }}" run_once: "{{ download_run_once|bool }}"
@ -110,7 +136,10 @@
- name: "Update the 'container_changed' fact" - name: "Update the 'container_changed' fact"
set_fact: set_fact:
container_changed: "{{ pull_required|bool|default(false) or not 'up to date' in pull_task_result.stdout }}" container_changed: "{{ pull_required|bool|default(false) or not 'up to date' in pull_task_result.stdout }}"
when: "{{ download.enabled|bool and download.container|bool and pull_required|bool|default(download_always_pull) }}" when:
- download.enabled|bool
- download.container|bool
- pull_required|bool|default(download_always_pull)
delegate_to: "{{ download_delegate if download_run_once|bool else inventory_hostname }}" delegate_to: "{{ download_delegate if download_run_once|bool else inventory_hostname }}"
run_once: "{{ download_run_once|bool }}" run_once: "{{ download_run_once|bool }}"
tags: facts tags: facts
@ -120,7 +149,10 @@
path: "{{fname}}" path: "{{fname}}"
register: img register: img
changed_when: false changed_when: false
when: "{{ download.enabled|bool and download.container|bool and download_run_once|bool }}" when:
- download.enabled|bool
- download.container|bool
- download_run_once|bool
delegate_to: "{{ download_delegate }}" delegate_to: "{{ download_delegate }}"
become: false become: false
run_once: true run_once: true
@ -131,7 +163,12 @@
delegate_to: "{{ download_delegate }}" delegate_to: "{{ download_delegate }}"
register: saved register: saved
run_once: true run_once: true
when: (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or download_delegate == "localhost") and download_run_once|bool and download.enabled|bool and download.container|bool and (container_changed|bool or not img.stat.exists) when:
- (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or download_delegate == "localhost")
- download_run_once|bool
- download.enabled|bool
- download.container|bool
- (container_changed|bool or not img.stat.exists)
- name: Download | copy container images to ansible host - name: Download | copy container images to ansible host
synchronize: synchronize:
@ -140,7 +177,14 @@
mode: pull mode: pull
delegate_to: localhost delegate_to: localhost
become: false become: false
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] and inventory_hostname == groups['kube-master'][0] and download_delegate != "localhost" and download_run_once|bool and download.enabled|bool and download.container|bool and saved.changed when:
- not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
- inventory_hostname == groups['kube-master'][0]
- download_delegate != "localhost"
- download_run_once|bool
- download.enabled|bool
- download.container|bool
- saved.changed
- name: Download | upload container images to nodes - name: Download | upload container images to nodes
synchronize: synchronize:
@ -153,10 +197,21 @@
until: get_task|succeeded until: get_task|succeeded
retries: 4 retries: 4
delay: "{{ retry_stagger | random + 3 }}" delay: "{{ retry_stagger | random + 3 }}"
when: (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] and inventory_hostname != groups['kube-master'][0] or download_delegate == "localhost") and download_run_once|bool and download.enabled|bool and download.container|bool when:
- (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] and
inventory_hostname != groups['kube-master'][0] or
download_delegate == "localhost")
- download_run_once|bool
- download.enabled|bool
- download.container|bool
tags: [upload, upgrade] tags: [upload, upgrade]
- name: Download | load container images - name: Download | load container images
shell: "{{ docker_bin_dir }}/docker load < {{ fname }}" shell: "{{ docker_bin_dir }}/docker load < {{ fname }}"
when: (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] and inventory_hostname != groups['kube-master'][0] or download_delegate == "localhost") and download_run_once|bool and download.enabled|bool and download.container|bool when:
- (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] and
inventory_hostname != groups['kube-master'][0] or download_delegate == "localhost")
- download_run_once|bool
- download.enabled|bool
- download.container|bool
tags: [upload, upgrade] tags: [upload, upgrade]

View file

@ -1,4 +1,12 @@
--- ---
- include: sync_etcd_master_certs.yml
when: inventory_hostname in groups.etcd
tags: etcd-secrets
- include: sync_etcd_node_certs.yml
when: inventory_hostname in etcd_node_cert_hosts
tags: etcd-secrets
- name: gen_certs_vault | Read in the local credentials - name: gen_certs_vault | Read in the local credentials
command: cat /etc/vault/roles/etcd/userpass command: cat /etc/vault/roles/etcd/userpass
@ -75,3 +83,5 @@
with_items: "{{ etcd_node_certs_needed|d([]) }}" with_items: "{{ etcd_node_certs_needed|d([]) }}"
when: inventory_hostname in etcd_node_cert_hosts when: inventory_hostname in etcd_node_cert_hosts
notify: set etcd_secret_changed notify: set etcd_secret_changed

View file

@ -7,20 +7,7 @@
when: cert_management == "script" when: cert_management == "script"
tags: [etcd-secrets, facts] tags: [etcd-secrets, facts]
- include: gen_certs_script.yml - include: "gen_certs_{{ cert_management }}.yml"
when: cert_management == "script"
tags: etcd-secrets
- include: sync_etcd_master_certs.yml
when: cert_management == "vault" and inventory_hostname in groups.etcd
tags: etcd-secrets
- include: sync_etcd_node_certs.yml
when: cert_management == "vault" and inventory_hostname in etcd_node_cert_hosts
tags: etcd-secrets
- include: gen_certs_vault.yml
when: cert_management == "vault" and (etcd_master_certs_needed|d() or etcd_node_certs_needed|d())
tags: etcd-secrets tags: etcd-secrets
- include: "install_{{ etcd_deployment_type }}.yml" - include: "install_{{ etcd_deployment_type }}.yml"

View file

@ -42,9 +42,6 @@ kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
kube_manifest_dir: "{{ kube_config_dir }}/manifests" kube_manifest_dir: "{{ kube_config_dir }}/manifests"
system_namespace: kube-system system_namespace: kube-system
# Logging directory (sysvinit systems)
kube_log_dir: "/var/log/kubernetes"
# This is where all the cert scripts and certs will be located # This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl" kube_cert_dir: "{{ kube_config_dir }}/ssl"

View file

@ -17,7 +17,7 @@
- set_fact: - set_fact:
wait_for_delegate: "{{hostvars['bastion']['ansible_ssh_host']}}" wait_for_delegate: "{{hostvars['bastion']['ansible_ssh_host']}}"
when: "{{ 'bastion' in groups['all'] }}" when: "'bastion' in groups['all']"
- name: wait for bastion to come back - name: wait for bastion to come back
wait_for: wait_for:
@ -27,7 +27,7 @@
timeout: 300 timeout: 300
become: false become: false
delegate_to: localhost delegate_to: localhost
when: "is_bastion" when: is_bastion
- name: waiting for server to come back (using bastion if necessary) - name: waiting for server to come back (using bastion if necessary)
wait_for: wait_for:
@ -37,4 +37,4 @@
timeout: 300 timeout: 300
become: false become: false
delegate_to: "{{ wait_for_delegate }}" delegate_to: "{{ wait_for_delegate }}"
when: "not is_bastion" when: not is_bastion

View file

@ -24,8 +24,8 @@ deploy_netchecker: false
netchecker_port: 31081 netchecker_port: 31081
agent_report_interval: 15 agent_report_interval: 15
netcheck_namespace: default netcheck_namespace: default
agent_img: "{{ netcheck_agent_img_repo }}:{{ netcheck_tag }}" agent_img: "{{ netcheck_agent_img_repo }}:{{ netcheck_agent_tag }}"
server_img: "{{ netcheck_server_img_repo }}:{{ netcheck_tag }}" server_img: "{{ netcheck_server_img_repo }}:{{ netcheck_server_tag }}"
# Limits for netchecker apps # Limits for netchecker apps
netchecker_agent_cpu_limit: 30m netchecker_agent_cpu_limit: 30m

View file

@ -5,7 +5,7 @@
with_items: with_items:
- {file: netchecker-agent-ds.yml.j2, type: ds, name: netchecker-agent} - {file: netchecker-agent-ds.yml.j2, type: ds, name: netchecker-agent}
- {file: netchecker-agent-hostnet-ds.yml.j2, type: ds, name: netchecker-agent-hostnet} - {file: netchecker-agent-hostnet-ds.yml.j2, type: ds, name: netchecker-agent-hostnet}
- {file: netchecker-server-pod.yml.j2, type: po, name: netchecker-server} - {file: netchecker-server-deployment.yml.j2, type: po, name: netchecker-server}
- {file: netchecker-server-svc.yml.j2, type: svc, name: netchecker-service} - {file: netchecker-server-svc.yml.j2, type: svc, name: netchecker-service}
register: manifests register: manifests
when: inventory_hostname == groups['kube-master'][0] when: inventory_hostname == groups['kube-master'][0]

View file

@ -42,7 +42,7 @@ spec:
- --namespace=kube-system - --namespace=kube-system
- --configmap=kubedns-autoscaler - --configmap=kubedns-autoscaler
# Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base # Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
- --target=replicationcontroller/kubedns - --target=Deployment/kubedns
- --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}} - --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}}
- --logtostderr=true - --logtostderr=true
- --v=2 - --v=2

View file

@ -83,6 +83,7 @@ spec:
{% if kube_log_level == '4' %} {% if kube_log_level == '4' %}
- --log-queries - --log-queries
{% endif %} {% endif %}
- --local=/{{ bogus_domains }}
ports: ports:
- containerPort: 53 - containerPort: 53
name: dns name: dns

View file

@ -20,6 +20,10 @@ spec:
valueFrom: valueFrom:
fieldRef: fieldRef:
fieldPath: metadata.name fieldPath: metadata.name
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
args: args:
- "-v=5" - "-v=5"
- "-alsologtostderr=true" - "-alsologtostderr=true"

View file

@ -24,6 +24,10 @@ spec:
valueFrom: valueFrom:
fieldRef: fieldRef:
fieldPath: metadata.name fieldPath: metadata.name
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
args: args:
- "-v=5" - "-v=5"
- "-alsologtostderr=true" - "-alsologtostderr=true"

View file

@ -24,6 +24,10 @@ spec:
valueFrom: valueFrom:
fieldRef: fieldRef:
fieldPath: metadata.name fieldPath: metadata.name
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
args: args:
- "-v=5" - "-v=5"
- "-alsologtostderr=true" - "-alsologtostderr=true"

View file

@ -0,0 +1,33 @@
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: netchecker-server
spec:
replicas: 1
template:
metadata:
name: netchecker-server
labels:
app: netchecker-server
namespace: {{ netcheck_namespace }}
spec:
containers:
- name: netchecker-server
image: "{{ server_img }}"
env:
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
limits:
cpu: {{ netchecker_server_cpu_limit }}
memory: {{ netchecker_server_memory_limit }}
requests:
cpu: {{ netchecker_server_cpu_requests }}
memory: {{ netchecker_server_memory_requests }}
ports:
- containerPort: 8081
hostPort: 8081
args:
- "-v=5"
- "-logtostderr"
- "-kubeproxyinit"
- "-endpoint=0.0.0.0:8081"

View file

@ -1,28 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: netchecker-server
labels:
app: netchecker-server
namespace: {{ netcheck_namespace }}
spec:
containers:
- name: netchecker-server
image: "{{ server_img }}"
env:
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
limits:
cpu: {{ netchecker_server_cpu_limit }}
memory: {{ netchecker_server_memory_limit }}
requests:
cpu: {{ netchecker_server_cpu_requests }}
memory: {{ netchecker_server_memory_requests }}
ports:
- containerPort: 8081
hostPort: 8081
args:
- "-v=5"
- "-logtostderr"
- "-kubeproxyinit"
- "-endpoint=0.0.0.0:8081"

View file

@ -1 +1,4 @@
helm_enabled: false helm_enabled: false
# specify a dir and attach it to helm for HELM_HOME.
helm_home_dir: "/root/.helm"

View file

@ -1,4 +1,7 @@
--- ---
- name: Helm | Make sure HELM_HOME directory exists
file: path={{ helm_home_dir }} state=directory
- name: Helm | Set up helm launcher - name: Helm | Set up helm launcher
template: template:
src: helm-container.j2 src: helm-container.j2
@ -8,7 +11,7 @@
register: helm_container register: helm_container
- name: Helm | Install/upgrade helm - name: Helm | Install/upgrade helm
command: "helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }}" command: "{{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }}"
when: helm_container.changed when: helm_container.changed
- name: Helm | Set up bash completion - name: Helm | Set up bash completion

View file

@ -3,6 +3,7 @@
--net=host \ --net=host \
--name=helm \ --name=helm \
-v /etc/ssl:/etc/ssl:ro \ -v /etc/ssl:/etc/ssl:ro \
-v {{ helm_home_dir }}:{{ helm_home_dir }}:rw \
{% for dir in ssl_ca_dirs -%} {% for dir in ssl_ca_dirs -%}
-v {{ dir }}:{{ dir }}:ro \ -v {{ dir }}:{{ dir }}:ro \
{% endfor -%} {% endfor -%}

View file

@ -36,6 +36,13 @@ kube_apiserver_cpu_limit: 800m
kube_apiserver_memory_requests: 256M kube_apiserver_memory_requests: 256M
kube_apiserver_cpu_requests: 100m kube_apiserver_cpu_requests: 100m
# Admission control plug-ins
kube_apiserver_admission_control:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- ResourceQuota
## Enable/Disable Kube API Server Authentication Methods ## Enable/Disable Kube API Server Authentication Methods
kube_basic_auth: true kube_basic_auth: true
@ -51,3 +58,10 @@ kube_oidc_auth: false
# kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem # kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem
# kube_oidc_username_claim: sub # kube_oidc_username_claim: sub
# kube_oidc_groups_claim: groups # kube_oidc_groups_claim: groups
##Variables for custom flags
apiserver_custom_flags: []
controller_mgr_custom_flags: []
scheduler_custom_flags: []

View file

@ -34,9 +34,9 @@
- meta: flush_handlers - meta: flush_handlers
- name: copy kube system namespace manifest - name: Write kube system namespace manifest
copy: template:
src: namespace.yml src: namespace.j2
dest: "{{kube_config_dir}}/{{system_namespace}}-ns.yml" dest: "{{kube_config_dir}}/{{system_namespace}}-ns.yml"
run_once: yes run_once: yes
when: inventory_hostname == groups['kube-master'][0] when: inventory_hostname == groups['kube-master'][0]

View file

@ -9,7 +9,7 @@ metadata:
spec: spec:
hostNetwork: true hostNetwork: true
{% if kube_version | version_compare('v1.6', '>=') %} {% if kube_version | version_compare('v1.6', '>=') %}
dnsPolicy: ClusterFirstWithHostNet dnsPolicy: ClusterFirst
{% endif %} {% endif %}
containers: containers:
- name: kube-apiserver - name: kube-apiserver
@ -33,7 +33,7 @@ spec:
- --etcd-keyfile={{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem - --etcd-keyfile={{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem
- --insecure-bind-address={{ kube_apiserver_insecure_bind_address }} - --insecure-bind-address={{ kube_apiserver_insecure_bind_address }}
- --apiserver-count={{ kube_apiserver_count }} - --apiserver-count={{ kube_apiserver_count }}
- --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota - --admission-control={{ kube_apiserver_admission_control | join(',') }}
- --service-cluster-ip-range={{ kube_service_addresses }} - --service-cluster-ip-range={{ kube_service_addresses }}
- --service-node-port-range={{ kube_apiserver_node_port_range }} - --service-node-port-range={{ kube_apiserver_node_port_range }}
- --client-ca-file={{ kube_cert_dir }}/ca.pem - --client-ca-file={{ kube_cert_dir }}/ca.pem
@ -80,6 +80,13 @@ spec:
{% endif %} {% endif %}
{% if kube_api_anonymous_auth is defined and kube_version | version_compare('v1.5', '>=') %} {% if kube_api_anonymous_auth is defined and kube_version | version_compare('v1.5', '>=') %}
- --anonymous-auth={{ kube_api_anonymous_auth }} - --anonymous-auth={{ kube_api_anonymous_auth }}
{% endif %}
{% if apiserver_custom_flags is string %}
- {{ apiserver_custom_flags }}
{% else %}
{% for flag in apiserver_custom_flags %}
- {{ flag }}
{% endfor %}
{% endif %} {% endif %}
livenessProbe: livenessProbe:
httpGet: httpGet:

View file

@ -8,7 +8,7 @@ metadata:
spec: spec:
hostNetwork: true hostNetwork: true
{% if kube_version | version_compare('v1.6', '>=') %} {% if kube_version | version_compare('v1.6', '>=') %}
dnsPolicy: ClusterFirstWithHostNet dnsPolicy: ClusterFirst
{% endif %} {% endif %}
containers: containers:
- name: kube-controller-manager - name: kube-controller-manager
@ -45,6 +45,13 @@ spec:
- --allocate-node-cidrs=true - --allocate-node-cidrs=true
- --configure-cloud-routes=true - --configure-cloud-routes=true
- --cluster-cidr={{ kube_pods_subnet }} - --cluster-cidr={{ kube_pods_subnet }}
{% endif %}
{% if controller_mgr_custom_flags is string %}
- {{ controller_mgr_custom_flags }}
{% else %}
{% for flag in controller_mgr_custom_flags %}
- {{ flag }}
{% endfor %}
{% endif %} {% endif %}
livenessProbe: livenessProbe:
httpGet: httpGet:

View file

@ -8,7 +8,7 @@ metadata:
spec: spec:
hostNetwork: true hostNetwork: true
{% if kube_version | version_compare('v1.6', '>=') %} {% if kube_version | version_compare('v1.6', '>=') %}
dnsPolicy: ClusterFirstWithHostNet dnsPolicy: ClusterFirst
{% endif %} {% endif %}
containers: containers:
- name: kube-scheduler - name: kube-scheduler
@ -27,6 +27,13 @@ spec:
- --leader-elect=true - --leader-elect=true
- --master={{ kube_apiserver_endpoint }} - --master={{ kube_apiserver_endpoint }}
- --v={{ kube_log_level }} - --v={{ kube_log_level }}
{% if scheduler_custom_flags is string %}
- {{ scheduler_custom_flags }}
{% else %}
{% for flag in scheduler_custom_flags %}
- {{ flag }}
{% endfor %}
{% endif %}
livenessProbe: livenessProbe:
httpGet: httpGet:
host: 127.0.0.1 host: 127.0.0.1

View file

@ -1,3 +1,6 @@
# Valid options: docker (default), rkt, or host
kubelet_deployment_type: docker
# change to 0.0.0.0 to enable insecure access from anywhere (not recommended) # change to 0.0.0.0 to enable insecure access from anywhere (not recommended)
kube_apiserver_insecure_bind_address: 127.0.0.1 kube_apiserver_insecure_bind_address: 127.0.0.1
@ -45,3 +48,6 @@ etcd_config_dir: /etc/ssl/etcd
kube_apiserver_node_port_range: "30000-32767" kube_apiserver_node_port_range: "30000-32767"
kubelet_load_modules: false kubelet_load_modules: false
##Support custom flags to be passed to kubelet
kubelet_custom_flags: []

View file

@ -0,0 +1,10 @@
---
- name: install | Copy kubelet from hyperkube container
command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/systembindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -f /hyperkube /systembindir/kubelet"
register: kubelet_task_result
until: kubelet_task_result.rc == 0
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
changed_when: false
tags: [hyperkube, upgrade]
notify: restart kubelet

View file

@ -7,6 +7,12 @@
- include: pre_upgrade.yml - include: pre_upgrade.yml
tags: kubelet tags: kubelet
- name: Ensure /var/lib/cni exists
file:
path: /var/lib/cni
state: directory
mode: 0755
- include: install.yml - include: install.yml
tags: kubelet tags: kubelet

View file

@ -25,6 +25,7 @@
-v /var/lib/cni:/var/lib/cni:shared \ -v /var/lib/cni:/var/lib/cni:shared \
-v /var/run:/var/run:rw \ -v /var/run:/var/run:rw \
-v {{kube_config_dir}}:{{kube_config_dir}}:ro \ -v {{kube_config_dir}}:{{kube_config_dir}}:ro \
-v /etc/os-release:/etc/os-release:ro \
{{ hyperkube_image_repo }}:{{ hyperkube_image_tag}} \ {{ hyperkube_image_repo }}:{{ hyperkube_image_tag}} \
./hyperkube kubelet \ ./hyperkube kubelet \
"$@" "$@"

View file

@ -23,10 +23,11 @@ ExecStart={{ bin_dir }}/kubelet \
$DOCKER_SOCKET \ $DOCKER_SOCKET \
$KUBELET_NETWORK_PLUGIN \ $KUBELET_NETWORK_PLUGIN \
$KUBELET_CLOUDPROVIDER $KUBELET_CLOUDPROVIDER
ExecStartPre=-{{ docker_bin_dir }}/docker rm -f kubelet
ExecReload={{ docker_bin_dir }}/docker restart kubelet
Restart=always Restart=always
RestartSec=10s RestartSec=10s
ExecStartPre=-{{ docker_bin_dir }}/docker rm -f kubelet
ExecReload={{ docker_bin_dir }}/docker restart kubelet
[Install] [Install]
WantedBy=multi-user.target WantedBy=multi-user.target

View file

@ -0,0 +1,30 @@
[Unit]
Description=Kubernetes Kubelet Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
{% if kube_network_plugin is defined and kube_network_plugin == "calico" %}
After=docker.service docker.socket calico-node.service
Wants=docker.socket calico-node.service
{% else %}
After=docker.service
Wants=docker.socket
{% endif %}
[Service]
EnvironmentFile={{kube_config_dir}}/kubelet.env
ExecStart={{ bin_dir }}/kubelet \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBELET_API_SERVER \
$KUBELET_ADDRESS \
$KUBELET_PORT \
$KUBELET_HOSTNAME \
$KUBE_ALLOW_PRIV \
$KUBELET_ARGS \
$DOCKER_SOCKET \
$KUBELET_NETWORK_PLUGIN \
$KUBELET_CLOUDPROVIDER
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target

View file

@ -19,13 +19,13 @@ KUBELET_HOSTNAME="--hostname-override={{ ansible_hostname }}"
{# DNS settings for kubelet #} {# DNS settings for kubelet #}
{% if dns_mode == 'kubedns' %} {% if dns_mode == 'kubedns' %}
{% set kubelet_args_cluster_dns %}--cluster_dns={{ skydns_server }}{% endset %} {% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }}{% endset %}
{% elif dns_mode == 'dnsmasq_kubedns' %} {% elif dns_mode == 'dnsmasq_kubedns' %}
{% set kubelet_args_cluster_dns %}--cluster_dns={{ dns_server }}{% endset %} {% set kubelet_args_cluster_dns %}--cluster-dns={{ dns_server }}{% endset %}
{% else %} {% else %}
{% set kubelet_args_cluster_dns %}{% endset %} {% set kubelet_args_cluster_dns %}{% endset %}
{% endif %} {% endif %}
{% set kubelet_args_dns %}{{ kubelet_args_cluster_dns }} --cluster_domain={{ dns_domain }} --resolv-conf={{ kube_resolv_conf }}{% endset %} {% set kubelet_args_dns %}{{ kubelet_args_cluster_dns }} --cluster-domain={{ dns_domain }} --resolv-conf={{ kube_resolv_conf }}{% endset %}
{# Location of the apiserver #} {# Location of the apiserver #}
{% set kubelet_args_kubeconfig %}--kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --require-kubeconfig{% endset %} {% set kubelet_args_kubeconfig %}--kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --require-kubeconfig{% endset %}
@ -44,7 +44,7 @@ KUBELET_HOSTNAME="--hostname-override={{ ansible_hostname }}"
{% set node_labels %}--node-labels=node-role.kubernetes.io/node=true{% endset %} {% set node_labels %}--node-labels=node-role.kubernetes.io/node=true{% endset %}
{% endif %} {% endif %}
KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ node_labels }}" KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ node_labels }} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}"
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "weave", "canal"] %} {% if kube_network_plugin is defined and kube_network_plugin in ["calico", "weave", "canal"] %}
KUBELET_NETWORK_PLUGIN="--network-plugin=cni --network-plugin-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin" KUBELET_NETWORK_PLUGIN="--network-plugin=cni --network-plugin-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
{% elif kube_network_plugin is defined and kube_network_plugin == "weave" %} {% elif kube_network_plugin is defined and kube_network_plugin == "weave" %}

View file

@ -20,6 +20,7 @@ ExecStartPre=-/bin/mkdir -p /var/lib/kubelet
EnvironmentFile={{kube_config_dir}}/kubelet.env EnvironmentFile={{kube_config_dir}}/kubelet.env
# stage1-fly mounts /proc /sys /dev so no need to duplicate the mounts # stage1-fly mounts /proc /sys /dev so no need to duplicate the mounts
ExecStart=/usr/bin/rkt run \ ExecStart=/usr/bin/rkt run \
--volume os-release,kind=host,source=/etc/os-release,readOnly=true \
--volume dns,kind=host,source=/etc/resolv.conf \ --volume dns,kind=host,source=/etc/resolv.conf \
--volume etc-kubernetes,kind=host,source={{ kube_config_dir }},readOnly=false \ --volume etc-kubernetes,kind=host,source={{ kube_config_dir }},readOnly=false \
--volume etc-ssl-certs,kind=host,source=/etc/ssl/certs,readOnly=true \ --volume etc-ssl-certs,kind=host,source=/etc/ssl/certs,readOnly=true \
@ -39,6 +40,7 @@ ExecStart=/usr/bin/rkt run \
--mount volume=opt-cni,target=/opt/cni \ --mount volume=opt-cni,target=/opt/cni \
--mount volume=var-lib-cni,target=/var/lib/cni \ --mount volume=var-lib-cni,target=/var/lib/cni \
{% endif %} {% endif %}
--mount volume=os-release,target=/etc/os-release \
--mount volume=dns,target=/etc/resolv.conf \ --mount volume=dns,target=/etc/resolv.conf \
--mount volume=etc-kubernetes,target={{ kube_config_dir }} \ --mount volume=etc-kubernetes,target={{ kube_config_dir }} \
--mount volume=etc-ssl-certs,target=/etc/ssl/certs \ --mount volume=etc-ssl-certs,target=/etc/ssl/certs \

View file

@ -8,7 +8,7 @@ metadata:
spec: spec:
hostNetwork: true hostNetwork: true
{% if kube_version | version_compare('v1.6', '>=') %} {% if kube_version | version_compare('v1.6', '>=') %}
dnsPolicy: ClusterFirstWithHostNet dnsPolicy: ClusterFirst
{% endif %} {% endif %}
containers: containers:
- name: kube-proxy - name: kube-proxy

View file

@ -45,5 +45,5 @@
when: inventory_hostname in groups['kube-master'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf' when: inventory_hostname in groups['kube-master'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'
- name: Preinstall | restart kube-controller-manager - name: Preinstall | restart kube-controller-manager
shell: "docker ps -f name=k8s-controller-manager* -q | xargs --no-run-if-empty docker rm -f" shell: "docker ps -f name=k8s_kube-controller-manager* -q | xargs --no-run-if-empty docker rm -f"
when: inventory_hostname in groups['kube-master'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf' and kube_controller_set.stat.exists when: inventory_hostname in groups['kube-master'] and dns_mode != 'none' and resolvconf_mode == 'host_resolvconf' and kube_controller_set.stat.exists

View file

@ -17,7 +17,10 @@
line: "{{ loadbalancer_apiserver.address }} {{ apiserver_loadbalancer_domain_name| default('lb-apiserver.kubernetes.local') }}" line: "{{ loadbalancer_apiserver.address }} {{ apiserver_loadbalancer_domain_name| default('lb-apiserver.kubernetes.local') }}"
state: present state: present
backup: yes backup: yes
when: loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined and apiserver_loadbalancer_domain_name is defined when:
- loadbalancer_apiserver is defined
- loadbalancer_apiserver.address is defined
- apiserver_loadbalancer_domain_name is defined
- name: Hosts | localhost ipv4 in hosts file - name: Hosts | localhost ipv4 in hosts file
lineinfile: lineinfile:

View file

@ -43,7 +43,7 @@
path: "{{ kube_config_dir }}" path: "{{ kube_config_dir }}"
state: directory state: directory
owner: kube owner: kube
when: "{{ inventory_hostname in groups['k8s-cluster'] }}" when: inventory_hostname in groups['k8s-cluster']
tags: [kubelet, k8s-secrets, kube-controller-manager, kube-apiserver, bootstrap-os, apps, network, master, node] tags: [kubelet, k8s-secrets, kube-controller-manager, kube-apiserver, bootstrap-os, apps, network, master, node]
- name: Create kubernetes script directory - name: Create kubernetes script directory
@ -51,7 +51,7 @@
path: "{{ kube_script_dir }}" path: "{{ kube_script_dir }}"
state: directory state: directory
owner: kube owner: kube
when: "{{ inventory_hostname in groups['k8s-cluster'] }}" when: "inventory_hostname in groups['k8s-cluster']"
tags: [k8s-secrets, bootstrap-os] tags: [k8s-secrets, bootstrap-os]
- name: Create kubernetes manifests directory - name: Create kubernetes manifests directory
@ -59,17 +59,21 @@
path: "{{ kube_manifest_dir }}" path: "{{ kube_manifest_dir }}"
state: directory state: directory
owner: kube owner: kube
when: "{{ inventory_hostname in groups['k8s-cluster'] }}" when: "inventory_hostname in groups['k8s-cluster']"
tags: [kubelet, bootstrap-os, master, node] tags: [kubelet, bootstrap-os, master, node]
- name: check cloud_provider value - name: check cloud_provider value
fail: fail:
msg: "If set the 'cloud_provider' var must be set either to 'generic', 'gce', 'aws', 'azure', 'openstack' or 'vsphere'" msg: "If set the 'cloud_provider' var must be set either to 'generic', 'gce', 'aws', 'azure', 'openstack' or 'vsphere'"
when: cloud_provider is defined and cloud_provider not in ['generic', 'gce', 'aws', 'azure', 'openstack', 'vsphere'] when:
- cloud_provider is defined
- cloud_provider not in ['generic', 'gce', 'aws', 'azure', 'openstack', 'vsphere']
tags: [cloud-provider, facts] tags: [cloud-provider, facts]
- include: "{{ cloud_provider }}-credential-check.yml" - include: "{{ cloud_provider }}-credential-check.yml"
when: cloud_provider is defined and cloud_provider in [ 'openstack', 'azure', 'vsphere' ] when:
- cloud_provider is defined
- cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
tags: [cloud-provider, facts] tags: [cloud-provider, facts]
- name: Create cni directories - name: Create cni directories
@ -80,7 +84,9 @@
with_items: with_items:
- "/etc/cni/net.d" - "/etc/cni/net.d"
- "/opt/cni/bin" - "/opt/cni/bin"
when: kube_network_plugin in ["calico", "weave", "canal"] and "{{ inventory_hostname in groups['k8s-cluster'] }}" when:
- kube_network_plugin in ["calico", "weave", "canal"]
- inventory_hostname in groups['k8s-cluster']
tags: [network, calico, weave, canal, bootstrap-os] tags: [network, calico, weave, canal, bootstrap-os]
- name: Update package management cache (YUM) - name: Update package management cache (YUM)
@ -91,7 +97,9 @@
until: yum_task_result|succeeded until: yum_task_result|succeeded
retries: 4 retries: 4
delay: "{{ retry_stagger | random + 3 }}" delay: "{{ retry_stagger | random + 3 }}"
when: ansible_pkg_mgr == 'yum' and not is_atomic when:
- ansible_pkg_mgr == 'yum'
- not is_atomic
tags: bootstrap-os tags: bootstrap-os
- name: Install latest version of python-apt for Debian distribs - name: Install latest version of python-apt for Debian distribs
@ -109,14 +117,17 @@
until: dnf_task_result|succeeded until: dnf_task_result|succeeded
retries: 4 retries: 4
delay: "{{ retry_stagger | random + 3 }}" delay: "{{ retry_stagger | random + 3 }}"
when: ansible_distribution == "Fedora" and when:
ansible_distribution_major_version > 21 - ansible_distribution == "Fedora"
- ansible_distribution_major_version > 21
changed_when: False changed_when: False
tags: bootstrap-os tags: bootstrap-os
- name: Install epel-release on RedHat/CentOS - name: Install epel-release on RedHat/CentOS
shell: rpm -qa | grep epel-release || rpm -ivh {{ epel_rpm_download_url }} shell: rpm -qa | grep epel-release || rpm -ivh {{ epel_rpm_download_url }}
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic when:
- ansible_distribution in ["CentOS","RedHat"]
- not is_atomic
register: epel_task_result register: epel_task_result
until: epel_task_result|succeeded until: epel_task_result|succeeded
retries: 4 retries: 4
@ -149,7 +160,9 @@
selinux: selinux:
policy: targeted policy: targeted
state: permissive state: permissive
when: ansible_os_family == "RedHat" and slc.stat.exists == True when:
- ansible_os_family == "RedHat"
- slc.stat.exists == True
changed_when: False changed_when: False
tags: bootstrap-os tags: bootstrap-os
@ -159,7 +172,9 @@
line: "precedence ::ffff:0:0/96 100" line: "precedence ::ffff:0:0/96 100"
state: present state: present
backup: yes backup: yes
when: disable_ipv6_dns and not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] when:
- disable_ipv6_dns
- not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
tags: bootstrap-os tags: bootstrap-os
- name: set default sysctl file path - name: set default sysctl file path
@ -176,7 +191,9 @@
- name: Change sysctl file path to link source if linked - name: Change sysctl file path to link source if linked
set_fact: set_fact:
sysctl_file_path: "{{sysctl_file_stat.stat.lnk_source}}" sysctl_file_path: "{{sysctl_file_stat.stat.lnk_source}}"
when: sysctl_file_stat.stat.islnk is defined and sysctl_file_stat.stat.islnk when:
- sysctl_file_stat.stat.islnk is defined
- sysctl_file_stat.stat.islnk
tags: bootstrap-os tags: bootstrap-os
- name: Enable ip forwarding - name: Enable ip forwarding
@ -193,22 +210,33 @@
dest: "{{ kube_config_dir }}/cloud_config" dest: "{{ kube_config_dir }}/cloud_config"
group: "{{ kube_cert_group }}" group: "{{ kube_cert_group }}"
mode: 0640 mode: 0640
when: inventory_hostname in groups['k8s-cluster'] and cloud_provider is defined and cloud_provider in [ 'openstack', 'azure', 'vsphere' ] when:
- inventory_hostname in groups['k8s-cluster']
- cloud_provider is defined
- cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
tags: [cloud-provider] tags: [cloud-provider]
- include: etchosts.yml - include: etchosts.yml
tags: [bootstrap-os, etchosts] tags: [bootstrap-os, etchosts]
- include: resolvconf.yml - include: resolvconf.yml
when: dns_mode != 'none' and resolvconf_mode == 'host_resolvconf' when:
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
tags: [bootstrap-os, resolvconf] tags: [bootstrap-os, resolvconf]
- include: dhclient-hooks.yml - include: dhclient-hooks.yml
when: dns_mode != 'none' and resolvconf_mode == 'host_resolvconf' and not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] when:
- dns_mode != 'none'
- resolvconf_mode == 'host_resolvconf'
- not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
tags: [bootstrap-os, resolvconf] tags: [bootstrap-os, resolvconf]
- include: dhclient-hooks-undo.yml - include: dhclient-hooks-undo.yml
when: dns_mode != 'none' and resolvconf_mode != 'host_resolvconf' and not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] when:
- dns_mode != 'none'
- resolvconf_mode != 'host_resolvconf'
- not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
tags: [bootstrap-os, resolvconf] tags: [bootstrap-os, resolvconf]
- name: Check if we are running inside a Azure VM - name: Check if we are running inside a Azure VM
@ -218,7 +246,7 @@
tags: bootstrap-os tags: bootstrap-os
- include: growpart-azure-centos-7.yml - include: growpart-azure-centos-7.yml
when: azure_check.stat.exists and when:
ansible_distribution in ["CentOS","RedHat"] - azure_check.stat.exists
- ansible_distribution in ["CentOS","RedHat"]
tags: bootstrap-os tags: bootstrap-os

View file

@ -16,7 +16,13 @@
{{dns_domain}}.{{d}}./{{d}}.{{d}}./com.{{d}}./ {{dns_domain}}.{{d}}./{{d}}.{{d}}./com.{{d}}./
{%- endfor %} {%- endfor %}
default_resolver: >- default_resolver: >-
{%- if cloud_provider is defined and cloud_provider == 'gce' -%}169.254.169.254{%- else -%}8.8.8.8{%- endif -%} {%- if cloud_provider is defined and cloud_provider == 'gce' -%}
169.254.169.254
{%- elif cloud_provider is defined and cloud_provider == 'aws' -%}
169.254.169.253
{%- else -%}
8.8.8.8
{%- endif -%}
- name: check if kubelet is configured - name: check if kubelet is configured
stat: stat:

View file

@ -85,7 +85,7 @@ if [ -n "$MASTERS" ]; then
cn="${host%%.*}" cn="${host%%.*}"
# admin key # admin key
openssl genrsa -out admin-${host}-key.pem 2048 > /dev/null 2>&1 openssl genrsa -out admin-${host}-key.pem 2048 > /dev/null 2>&1
openssl req -new -key admin-${host}-key.pem -out admin-${host}.csr -subj "/CN=kube-admin-${cn}" > /dev/null 2>&1 openssl req -new -key admin-${host}-key.pem -out admin-${host}.csr -subj "/CN=kube-admin-${cn}/O=system:masters" > /dev/null 2>&1
openssl x509 -req -in admin-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin-${host}.pem -days 3650 > /dev/null 2>&1 openssl x509 -req -in admin-${host}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out admin-${host}.pem -days 3650 > /dev/null 2>&1
done done
fi fi

View file

@ -1,4 +1,11 @@
--- ---
- include: sync_kube_master_certs.yml
when: inventory_hostname in groups['kube-master']
tags: k8s-secrets
- include: sync_kube_node_certs.yml
when: inventory_hostname in groups['k8s-cluster']
tags: k8s-secrets
- name: gen_certs_vault | Read in the local credentials - name: gen_certs_vault | Read in the local credentials
command: cat /etc/vault/roles/kube/userpass command: cat /etc/vault/roles/kube/userpass

View file

@ -74,13 +74,5 @@
- include: "gen_certs_{{ cert_management }}.yml" - include: "gen_certs_{{ cert_management }}.yml"
tags: k8s-secrets tags: k8s-secrets
- include: sync_kube_master_certs.yml
when: cert_management == "vault" and inventory_hostname in groups['kube-master']
tags: k8s-secrets
- include: sync_kube_node_certs.yml
when: cert_management == "vault" and inventory_hostname in groups['k8s-cluster']
tags: k8s-secrets
- include: gen_tokens.yml - include: gen_tokens.yml
tags: k8s-secrets tags: k8s-secrets

View file

@ -38,7 +38,7 @@
set_fact: set_fact:
kube_api_certs_needed: "{{ item.path }}" kube_api_certs_needed: "{{ item.path }}"
with_items: "{{ sync_file_results|d([]) }}" with_items: "{{ sync_file_results|d([]) }}"
when: "{{ item.no_srcs }}" when: item.no_srcs
- name: sync_kube_master_certs | Unset sync_file_results after apiserver cert - name: sync_kube_master_certs | Unset sync_file_results after apiserver cert
set_fact: set_fact:

View file

@ -56,7 +56,7 @@
retries: 4 retries: 4
delay: "{{ retry_stagger | random + 3 }}" delay: "{{ retry_stagger | random + 3 }}"
changed_when: false changed_when: false
when: "{{ overwrite_hyperkube_cni|bool }}" when: overwrite_hyperkube_cni|bool
tags: [hyperkube, upgrade] tags: [hyperkube, upgrade]
- name: Calico | Set cni directory permissions - name: Calico | Set cni directory permissions

View file

@ -0,0 +1,2 @@
---
flush_iptables: true

View file

@ -8,6 +8,7 @@
- kubelet - kubelet
- etcd - etcd
failed_when: false failed_when: false
tags: ['services']
- name: reset | remove services - name: reset | remove services
file: file:
@ -17,6 +18,7 @@
- kubelet - kubelet
- etcd - etcd
register: services_removed register: services_removed
tags: ['services']
- name: reset | remove docker dropins - name: reset | remove docker dropins
file: file:
@ -26,6 +28,7 @@
- docker-dns.conf - docker-dns.conf
- docker-options.conf - docker-options.conf
register: docker_dropins_removed register: docker_dropins_removed
tags: ['docker']
- name: reset | systemctl daemon-reload - name: reset | systemctl daemon-reload
command: systemctl daemon-reload command: systemctl daemon-reload
@ -33,25 +36,31 @@
- name: reset | remove all containers - name: reset | remove all containers
shell: "{{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv" shell: "{{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv"
tags: ['docker']
- name: reset | restart docker if needed - name: reset | restart docker if needed
service: service:
name: docker name: docker
state: restarted state: restarted
when: docker_dropins_removed.changed when: docker_dropins_removed.changed
tags: ['docker']
- name: reset | gather mounted kubelet dirs - name: reset | gather mounted kubelet dirs
shell: mount | grep /var/lib/kubelet | awk '{print $3}' | tac shell: mount | grep /var/lib/kubelet | awk '{print $3}' | tac
check_mode: no check_mode: no
register: mounted_dirs register: mounted_dirs
tags: ['mounts']
- name: reset | unmount kubelet dirs - name: reset | unmount kubelet dirs
command: umount {{item}} command: umount {{item}}
with_items: '{{ mounted_dirs.stdout_lines }}' with_items: '{{ mounted_dirs.stdout_lines }}'
tags: ['mounts']
- name: flush iptables - name: flush iptables
iptables: iptables:
flush: yes flush: yes
when: flush_iptables|bool
tags: ['iptables']
- name: reset | delete some files and directories - name: reset | delete some files and directories
file: file:
@ -74,6 +83,8 @@
- /etc/dhcp/dhclient.d/zdnsupdate.sh - /etc/dhcp/dhclient.d/zdnsupdate.sh
- /etc/dhcp/dhclient-exit-hooks.d/zdnsupdate - /etc/dhcp/dhclient-exit-hooks.d/zdnsupdate
- "{{ bin_dir }}/kubelet" - "{{ bin_dir }}/kubelet"
tags: ['files']
- name: reset | remove dns settings from dhclient.conf - name: reset | remove dns settings from dhclient.conf
blockinfile: blockinfile:
@ -85,6 +96,7 @@
with_items: with_items:
- /etc/dhclient.conf - /etc/dhclient.conf
- /etc/dhcp/dhclient.conf - /etc/dhcp/dhclient.conf
tags: ['files', 'dns']
- name: reset | remove host entries from /etc/hosts - name: reset | remove host entries from /etc/hosts
blockinfile: blockinfile:
@ -92,6 +104,7 @@
state: absent state: absent
follow: yes follow: yes
marker: "# Ansible inventory hosts {mark}" marker: "# Ansible inventory hosts {mark}"
tags: ['files', 'dns']
- name: reset | Restart network - name: reset | Restart network
service: service:
@ -103,3 +116,4 @@
{%- endif %} {%- endif %}
state: restarted state: restarted
when: ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] when: ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"]
tags: ['services', 'network']

View file

@ -3,7 +3,7 @@
- name: bootstrap/start_vault_temp | Ensure vault-temp isn't already running - name: bootstrap/start_vault_temp | Ensure vault-temp isn't already running
shell: if docker rm -f {{ vault_temp_container_name }} 2>&1 1>/dev/null;then echo true;else echo false;fi shell: if docker rm -f {{ vault_temp_container_name }} 2>&1 1>/dev/null;then echo true;else echo false;fi
register: vault_temp_stop_check register: vault_temp_stop_check
changed_when: "{{ 'true' in vault_temp_stop_check.stdout }}" changed_when: "'true' in vault_temp_stop_check.stdout"
- name: bootstrap/start_vault_temp | Start single node Vault with file backend - name: bootstrap/start_vault_temp | Start single node Vault with file backend
command: > command: >
@ -13,6 +13,10 @@
-v /etc/vault:/etc/vault -v /etc/vault:/etc/vault
{{ vault_image_repo }}:{{ vault_version }} server {{ vault_image_repo }}:{{ vault_version }} server
#FIXME(mattymo): Crashes on first start with aufs docker storage. See hashicorp/docker-vault#19
- name: bootstrap/start_vault_temp | Start again single node Vault with file backend
command: docker start {{ vault_temp_container_name }}
- name: bootstrap/start_vault_temp | Initialize vault-temp - name: bootstrap/start_vault_temp | Initialize vault-temp
uri: uri:
url: "http://localhost:{{ vault_port }}/v1/sys/init" url: "http://localhost:{{ vault_port }}/v1/sys/init"