commit
0f77b76ef1
222 changed files with 2175 additions and 1648 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -24,6 +24,7 @@ __pycache__/
|
||||||
.Python
|
.Python
|
||||||
env/
|
env/
|
||||||
build/
|
build/
|
||||||
|
credentials/
|
||||||
develop-eggs/
|
develop-eggs/
|
||||||
dist/
|
dist/
|
||||||
downloads/
|
downloads/
|
||||||
|
|
|
@ -18,10 +18,7 @@ variables:
|
||||||
# us-west1-a
|
# us-west1-a
|
||||||
|
|
||||||
before_script:
|
before_script:
|
||||||
- pip install ansible==2.3.0
|
- pip install -r tests/requirements.txt
|
||||||
- pip install netaddr
|
|
||||||
- pip install apache-libcloud==0.20.1
|
|
||||||
- pip install boto==2.9.0
|
|
||||||
- mkdir -p /.ssh
|
- mkdir -p /.ssh
|
||||||
- cp tests/ansible.cfg .
|
- cp tests/ansible.cfg .
|
||||||
|
|
||||||
|
@ -75,10 +72,7 @@ before_script:
|
||||||
- $HOME/.cache
|
- $HOME/.cache
|
||||||
before_script:
|
before_script:
|
||||||
- docker info
|
- docker info
|
||||||
- pip install ansible==2.3.0
|
- pip install -r tests/requirements.txt
|
||||||
- pip install netaddr
|
|
||||||
- pip install apache-libcloud==0.20.1
|
|
||||||
- pip install boto==2.9.0
|
|
||||||
- mkdir -p /.ssh
|
- mkdir -p /.ssh
|
||||||
- mkdir -p $HOME/.ssh
|
- mkdir -p $HOME/.ssh
|
||||||
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
||||||
|
@ -265,6 +259,7 @@ before_script:
|
||||||
# Test matrix. Leave the comments for markup scripts.
|
# Test matrix. Leave the comments for markup scripts.
|
||||||
.coreos_calico_sep_variables: &coreos_calico_sep_variables
|
.coreos_calico_sep_variables: &coreos_calico_sep_variables
|
||||||
# stage: deploy-gce-part1
|
# stage: deploy-gce-part1
|
||||||
|
AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }"
|
||||||
KUBE_NETWORK_PLUGIN: calico
|
KUBE_NETWORK_PLUGIN: calico
|
||||||
CLOUD_IMAGE: coreos-stable-1465-6-0-v20170817
|
CLOUD_IMAGE: coreos-stable-1465-6-0-v20170817
|
||||||
CLOUD_REGION: us-west1-b
|
CLOUD_REGION: us-west1-b
|
||||||
|
@ -275,9 +270,10 @@ before_script:
|
||||||
##User-data to simply turn off coreos upgrades
|
##User-data to simply turn off coreos upgrades
|
||||||
STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd'
|
STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd'
|
||||||
|
|
||||||
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
|
.ubuntu_canal_ha_rbac_variables: &ubuntu_canal_ha_rbac_variables
|
||||||
# stage: deploy-gce-part1
|
# stage: deploy-gce-part1
|
||||||
KUBE_NETWORK_PLUGIN: canal
|
KUBE_NETWORK_PLUGIN: canal
|
||||||
|
AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }"
|
||||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||||
CLOUD_REGION: europe-west1-b
|
CLOUD_REGION: europe-west1-b
|
||||||
CLUSTER_MODE: ha
|
CLUSTER_MODE: ha
|
||||||
|
@ -370,6 +366,8 @@ before_script:
|
||||||
|
|
||||||
.ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables
|
.ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables
|
||||||
# stage: deploy-gce-part1
|
# stage: deploy-gce-part1
|
||||||
|
AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }"
|
||||||
|
CLOUD_MACHINE_TYPE: "n1-standard-2"
|
||||||
KUBE_NETWORK_PLUGIN: canal
|
KUBE_NETWORK_PLUGIN: canal
|
||||||
CERT_MGMT: vault
|
CERT_MGMT: vault
|
||||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||||
|
@ -451,24 +449,24 @@ ubuntu-weave-sep-triggers:
|
||||||
only: ['triggers']
|
only: ['triggers']
|
||||||
|
|
||||||
# More builds for PRs/merges (manual) and triggers (auto)
|
# More builds for PRs/merges (manual) and triggers (auto)
|
||||||
ubuntu-canal-ha:
|
ubuntu-canal-ha-rbac:
|
||||||
stage: deploy-gce-part1
|
stage: deploy-gce-part1
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
<<: *gce_variables
|
<<: *gce_variables
|
||||||
<<: *ubuntu_canal_ha_variables
|
<<: *ubuntu_canal_ha_rbac_variables
|
||||||
when: manual
|
when: manual
|
||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: ['master', /^pr-.*$/]
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
ubuntu-canal-ha-triggers:
|
ubuntu-canal-ha-rbac-triggers:
|
||||||
stage: deploy-gce-part1
|
stage: deploy-gce-part1
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
<<: *gce_variables
|
<<: *gce_variables
|
||||||
<<: *ubuntu_canal_ha_variables
|
<<: *ubuntu_canal_ha_rbac_variables
|
||||||
when: on_success
|
when: on_success
|
||||||
only: ['triggers']
|
only: ['triggers']
|
||||||
|
|
||||||
|
@ -642,6 +640,13 @@ syntax-check:
|
||||||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root extra_playbooks/upgrade-only-k8s.yml -vvv --syntax-check
|
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root extra_playbooks/upgrade-only-k8s.yml -vvv --syntax-check
|
||||||
except: ['triggers', 'master']
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
|
yamllint:
|
||||||
|
<<: *job
|
||||||
|
stage: unit-tests
|
||||||
|
script:
|
||||||
|
- yamllint roles
|
||||||
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
tox-inventory-builder:
|
tox-inventory-builder:
|
||||||
stage: unit-tests
|
stage: unit-tests
|
||||||
<<: *job
|
<<: *job
|
||||||
|
|
16
.yamllint
Normal file
16
.yamllint
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
---
|
||||||
|
extends: default
|
||||||
|
|
||||||
|
rules:
|
||||||
|
braces:
|
||||||
|
min-spaces-inside: 0
|
||||||
|
max-spaces-inside: 1
|
||||||
|
brackets:
|
||||||
|
min-spaces-inside: 0
|
||||||
|
max-spaces-inside: 1
|
||||||
|
indentation:
|
||||||
|
spaces: 2
|
||||||
|
indent-sequences: consistent
|
||||||
|
line-length: disable
|
||||||
|
new-line-at-end-of-file: disable
|
||||||
|
truthy: disable
|
|
@ -53,13 +53,13 @@ Versions of supported components
|
||||||
--------------------------------
|
--------------------------------
|
||||||
|
|
||||||
|
|
||||||
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.6.7 <br>
|
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.7.3 <br>
|
||||||
[etcd](https://github.com/coreos/etcd/releases) v3.0.17 <br>
|
[etcd](https://github.com/coreos/etcd/releases) v3.2.4 <br>
|
||||||
[flanneld](https://github.com/coreos/flannel/releases) v0.8.0 <br>
|
[flanneld](https://github.com/coreos/flannel/releases) v0.8.0 <br>
|
||||||
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.23.0 <br>
|
[calico](https://docs.projectcalico.org/v2.5/releases/) v2.5.0 <br>
|
||||||
[canal](https://github.com/projectcalico/canal) (given calico/flannel versions) <br>
|
[canal](https://github.com/projectcalico/canal) (given calico/flannel versions) <br>
|
||||||
[weave](http://weave.works/) v2.0.1 <br>
|
[weave](http://weave.works/) v2.0.1 <br>
|
||||||
[docker](https://www.docker.com/) v1.13.1 (see note)<br>
|
[docker](https://www.docker.com/) v1.13 (see note)<br>
|
||||||
[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)<br>
|
[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)<br>
|
||||||
|
|
||||||
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||||
|
|
|
@ -25,16 +25,29 @@ export AWS_DEFAULT_REGION="zzz"
|
||||||
- Rename `contrib/terraform/aws/terraform.tfvars.example` to `terraform.tfvars`
|
- Rename `contrib/terraform/aws/terraform.tfvars.example` to `terraform.tfvars`
|
||||||
|
|
||||||
- Update `contrib/terraform/aws/terraform.tfvars` with your data
|
- Update `contrib/terraform/aws/terraform.tfvars` with your data
|
||||||
- Allocate new AWS Elastic IPs: Depending on # of Availability Zones used (2 for each AZ)
|
- Allocate a new AWS Elastic IP. Use this for your `loadbalancer_apiserver_address` value (below)
|
||||||
- Create an AWS EC2 SSH Key
|
- Create an AWS EC2 SSH Key
|
||||||
|
|
||||||
|
|
||||||
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
|
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```commandline
|
||||||
|
terraform apply -var-file=credentials.tfvars -var 'loadbalancer_apiserver_address=34.212.228.77'
|
||||||
|
```
|
||||||
|
|
||||||
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
|
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
|
||||||
|
|
||||||
|
- Ansible will automatically generate an ssh config file for your bastion hosts. To make use of it, make sure you have a line in your `ansible.cfg` file that looks like the following:
|
||||||
|
```commandline
|
||||||
|
ssh_args = -F ./ssh-bastion.conf -o ControlMaster=auto -o ControlPersist=30m
|
||||||
|
```
|
||||||
|
|
||||||
- Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag.
|
- Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag.
|
||||||
|
|
||||||
|
Example (this one assumes you are using CoreOS)
|
||||||
|
```commandline
|
||||||
|
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_ssh_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache
|
||||||
|
```
|
||||||
|
|
||||||
**Troubleshooting**
|
**Troubleshooting**
|
||||||
|
|
||||||
***Remaining AWS IAM Instance Profile***:
|
***Remaining AWS IAM Instance Profile***:
|
||||||
|
|
|
@ -162,7 +162,7 @@ resource "aws_instance" "k8s-worker" {
|
||||||
*/
|
*/
|
||||||
data "template_file" "inventory" {
|
data "template_file" "inventory" {
|
||||||
template = "${file("${path.module}/templates/inventory.tpl")}"
|
template = "${file("${path.module}/templates/inventory.tpl")}"
|
||||||
|
|
||||||
vars {
|
vars {
|
||||||
public_ip_address_bastion = "${join("\n",formatlist("bastion ansible_ssh_host=%s" , aws_instance.bastion-server.*.public_ip))}"
|
public_ip_address_bastion = "${join("\n",formatlist("bastion ansible_ssh_host=%s" , aws_instance.bastion-server.*.public_ip))}"
|
||||||
connection_strings_master = "${join("\n",formatlist("%s ansible_ssh_host=%s",aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}"
|
connection_strings_master = "${join("\n",formatlist("%s ansible_ssh_host=%s",aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}"
|
||||||
|
@ -173,9 +173,9 @@ data "template_file" "inventory" {
|
||||||
list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}"
|
list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}"
|
||||||
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
|
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
|
||||||
elb_api_port = "loadbalancer_apiserver.port=${var.aws_elb_api_port}"
|
elb_api_port = "loadbalancer_apiserver.port=${var.aws_elb_api_port}"
|
||||||
kube_insecure_apiserver_address = "kube_apiserver_insecure_bind_address: ${var.kube_insecure_apiserver_address}"
|
loadbalancer_apiserver_address = "loadbalancer_apiserver.address=${var.loadbalancer_apiserver_address}"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "null_resource" "inventories" {
|
resource "null_resource" "inventories" {
|
||||||
|
@ -183,4 +183,8 @@ resource "null_resource" "inventories" {
|
||||||
command = "echo '${data.template_file.inventory.rendered}' > ../../../inventory/hosts"
|
command = "echo '${data.template_file.inventory.rendered}' > ../../../inventory/hosts"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
triggers {
|
||||||
|
template = "${data.template_file.inventory.rendered}"
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,4 +25,4 @@ kube-master
|
||||||
[k8s-cluster:vars]
|
[k8s-cluster:vars]
|
||||||
${elb_api_fqdn}
|
${elb_api_fqdn}
|
||||||
${elb_api_port}
|
${elb_api_port}
|
||||||
${kube_insecure_apiserver_address}
|
${loadbalancer_apiserver_address}
|
||||||
|
|
|
@ -5,11 +5,11 @@ aws_cluster_name = "devtest"
|
||||||
aws_vpc_cidr_block = "10.250.192.0/18"
|
aws_vpc_cidr_block = "10.250.192.0/18"
|
||||||
aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"]
|
aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"]
|
||||||
aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
|
aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
|
||||||
aws_avail_zones = ["eu-central-1a","eu-central-1b"]
|
aws_avail_zones = ["us-west-2a","us-west-2b"]
|
||||||
|
|
||||||
#Bastion Host
|
#Bastion Host
|
||||||
aws_bastion_ami = "ami-5900cc36"
|
aws_bastion_ami = "ami-db56b9a3"
|
||||||
aws_bastion_size = "t2.small"
|
aws_bastion_size = "t2.medium"
|
||||||
|
|
||||||
|
|
||||||
#Kubernetes Cluster
|
#Kubernetes Cluster
|
||||||
|
@ -23,9 +23,10 @@ aws_etcd_size = "t2.medium"
|
||||||
aws_kube_worker_num = 4
|
aws_kube_worker_num = 4
|
||||||
aws_kube_worker_size = "t2.medium"
|
aws_kube_worker_size = "t2.medium"
|
||||||
|
|
||||||
aws_cluster_ami = "ami-903df7ff"
|
aws_cluster_ami = "ami-db56b9a3"
|
||||||
|
|
||||||
#Settings AWS ELB
|
#Settings AWS ELB
|
||||||
|
|
||||||
aws_elb_api_port = 443
|
aws_elb_api_port = 6443
|
||||||
k8s_secure_api_port = 443
|
k8s_secure_api_port = 6443
|
||||||
|
kube_insecure_apiserver_address = "0.0.0.0"
|
||||||
|
|
|
@ -96,6 +96,6 @@ variable "k8s_secure_api_port" {
|
||||||
description = "Secure Port of K8S API Server"
|
description = "Secure Port of K8S API Server"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "kube_insecure_apiserver_address" {
|
variable "loadbalancer_apiserver_address" {
|
||||||
description= "Bind Address for insecure Port of K8s API Server"
|
description= "Bind Address for ELB of K8s API Server"
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,13 +23,6 @@ ip a show dev flannel.1
|
||||||
valid_lft forever preferred_lft forever
|
valid_lft forever preferred_lft forever
|
||||||
```
|
```
|
||||||
|
|
||||||
* Docker must be configured with a bridge ip in the flannel subnet.
|
|
||||||
|
|
||||||
```
|
|
||||||
ps aux | grep docker
|
|
||||||
root 20196 1.7 2.7 1260616 56840 ? Ssl 10:18 0:07 /usr/bin/docker daemon --bip=10.233.16.1/24 --mtu=1450
|
|
||||||
```
|
|
||||||
|
|
||||||
* Try to run a container and check its ip address
|
* Try to run a container and check its ip address
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
|
@ -57,7 +57,7 @@ ansible-playbook -i my_inventory/inventory.cfg cluster.yml -b -v \
|
||||||
See more details in the [ansible guide](ansible.md).
|
See more details in the [ansible guide](ansible.md).
|
||||||
|
|
||||||
Adding nodes
|
Adding nodes
|
||||||
--------------------------
|
------------
|
||||||
|
|
||||||
You may want to add worker nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters.
|
You may want to add worker nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters.
|
||||||
|
|
||||||
|
@ -66,4 +66,38 @@ You may want to add worker nodes to your existing cluster. This can be done by r
|
||||||
```
|
```
|
||||||
ansible-playbook -i my_inventory/inventory.cfg scale.yml -b -v \
|
ansible-playbook -i my_inventory/inventory.cfg scale.yml -b -v \
|
||||||
--private-key=~/.ssh/private_key
|
--private-key=~/.ssh/private_key
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Connecting to Kubernetes
|
||||||
|
------------------------
|
||||||
|
By default, Kubespray configures kube-master hosts with insecure access to
|
||||||
|
kube-apiserver via port 8080. A kubeconfig file is not necessary in this case,
|
||||||
|
because kubectl will use http://localhost:8080 to connect. The kubeconfig files
|
||||||
|
generated will point to localhost (on kube-masters) and kube-node hosts will
|
||||||
|
connect either to a localhost nginx proxy or to a loadbalancer if configured.
|
||||||
|
More details on this process is in the [HA guide](ha.md).
|
||||||
|
|
||||||
|
Kubespray permits connecting to the cluster remotely on any IP of any
|
||||||
|
kube-master host on port 6443 by default. However, this requires
|
||||||
|
authentication. One could generate a kubeconfig based on one installed
|
||||||
|
kube-master hosts (needs improvement) or connect with a username and password.
|
||||||
|
By default, a user with admin rights is created, named `kube`.
|
||||||
|
The password can be viewed after deployment by looking at the file
|
||||||
|
`PATH_TO_KUBESPRAY/credentials/kube_user`. This contains a randomly generated
|
||||||
|
password. If you wish to set your own password, just precreate/modify this
|
||||||
|
file yourself.
|
||||||
|
|
||||||
|
For more information on kubeconfig and accessing a Kubernetes cluster, refer to
|
||||||
|
the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/).
|
||||||
|
|
||||||
|
Accessing Kubernetes Dashboard
|
||||||
|
------------------------------
|
||||||
|
|
||||||
|
If the variable `dashboard_enabled` is set (default is true), then you can
|
||||||
|
access the Kubernetes Dashboard at the following URL:
|
||||||
|
|
||||||
|
https://kube:_kube-password_@_host_:6443/ui/
|
||||||
|
|
||||||
|
To see the password, refer to the section above, titled *Connecting to
|
||||||
|
Kubernetes*. The host can be any kube-master or kube-node or loadbalancer
|
||||||
|
(when enabled).
|
||||||
|
|
|
@ -67,6 +67,8 @@ following default cluster paramters:
|
||||||
OpenStack (default is unset)
|
OpenStack (default is unset)
|
||||||
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
|
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
|
||||||
Kubernetes
|
Kubernetes
|
||||||
|
* *kube_feature_gates* - A list of key=value pairs that describe feature gates for
|
||||||
|
alpha/experimental Kubernetes features. (defaults is `[]`)
|
||||||
* *authorization_modes* - A list of [authorization mode](
|
* *authorization_modes* - A list of [authorization mode](
|
||||||
https://kubernetes.io/docs/admin/authorization/#using-flags-for-your-authorization-module)
|
https://kubernetes.io/docs/admin/authorization/#using-flags-for-your-authorization-module)
|
||||||
that the cluster should be configured for. Defaults to `[]` (i.e. no authorization).
|
that the cluster should be configured for. Defaults to `[]` (i.e. no authorization).
|
||||||
|
|
|
@ -26,7 +26,6 @@ first task, is to stop any temporary instances of Vault, to free the port for
|
||||||
the long-term. At the end of this task, the entire Vault cluster should be up
|
the long-term. At the end of this task, the entire Vault cluster should be up
|
||||||
and read to go.
|
and read to go.
|
||||||
|
|
||||||
|
|
||||||
Keys to the Kingdom
|
Keys to the Kingdom
|
||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
|
@ -44,30 +43,38 @@ to authenticate to almost everything in Kubernetes and decode all private
|
||||||
(HTTPS) traffic on your network signed by Vault certificates.
|
(HTTPS) traffic on your network signed by Vault certificates.
|
||||||
|
|
||||||
For even greater security, you may want to remove and store elsewhere any
|
For even greater security, you may want to remove and store elsewhere any
|
||||||
CA keys generated as well (e.g. /etc/vault/ssl/ca-key.pem).
|
CA keys generated as well (e.g. /etc/vault/ssl/ca-key.pem).
|
||||||
|
|
||||||
Vault by default encrypts all traffic to and from the datastore backend, all
|
Vault by default encrypts all traffic to and from the datastore backend, all
|
||||||
resting data, and uses TLS for its TCP listener. It is recommended that you
|
resting data, and uses TLS for its TCP listener. It is recommended that you
|
||||||
do not change the Vault config to disable TLS, unless you absolutely have to.
|
do not change the Vault config to disable TLS, unless you absolutely have to.
|
||||||
|
|
||||||
|
|
||||||
Usage
|
Usage
|
||||||
-----
|
-----
|
||||||
|
|
||||||
To get the Vault role running, you must to do two things at a minimum:
|
To get the Vault role running, you must to do two things at a minimum:
|
||||||
|
|
||||||
1. Assign the ``vault`` group to at least 1 node in your inventory
|
1. Assign the ``vault`` group to at least 1 node in your inventory
|
||||||
2. Change ``cert_management`` to be ``vault`` instead of ``script``
|
1. Change ``cert_management`` to be ``vault`` instead of ``script``
|
||||||
|
|
||||||
Nothing else is required, but customization is possible. Check
|
Nothing else is required, but customization is possible. Check
|
||||||
``roles/vault/defaults/main.yml`` for the different variables that can be
|
``roles/vault/defaults/main.yml`` for the different variables that can be
|
||||||
overridden, most common being ``vault_config``, ``vault_port``, and
|
overridden, most common being ``vault_config``, ``vault_port``, and
|
||||||
``vault_deployment_type``.
|
``vault_deployment_type``.
|
||||||
|
|
||||||
Also, if you intend to use a Root or Intermediate CA generated elsewhere,
|
As a result of the Vault role will be create separated Root CA for `etcd`,
|
||||||
you'll need to copy the certificate and key to the hosts in the vault group
|
`kubernetes` and `vault`. Also, if you intend to use a Root or Intermediate CA
|
||||||
prior to running the vault role. By default, they'll be located at
|
generated elsewhere, you'll need to copy the certificate and key to the hosts in the vault group prior to running the vault role. By default, they'll be located at:
|
||||||
``/etc/vault/ssl/ca.pem`` and ``/etc/vault/ssl/ca-key.pem``, respectively.
|
|
||||||
|
* vault:
|
||||||
|
* ``/etc/vault/ssl/ca.pem``
|
||||||
|
* ``/etc/vault/ssl/ca-key.pem``
|
||||||
|
* etcd:
|
||||||
|
* ``/etc/ssl/etcd/ssl/ca.pem``
|
||||||
|
* ``/etc/ssl/etcd/ssl/ca-key.pem``
|
||||||
|
* kubernetes:
|
||||||
|
* ``/etc/kubernetes/ssl/ca.pem``
|
||||||
|
* ``/etc/kubernetes/ssl/ca-key.pem``
|
||||||
|
|
||||||
Additional Notes:
|
Additional Notes:
|
||||||
|
|
||||||
|
@ -77,7 +84,6 @@ Additional Notes:
|
||||||
credentials are saved to ``/etc/vault/roles/<role>/``. The service will
|
credentials are saved to ``/etc/vault/roles/<role>/``. The service will
|
||||||
need to read in those credentials, if they want to interact with Vault.
|
need to read in those credentials, if they want to interact with Vault.
|
||||||
|
|
||||||
|
|
||||||
Potential Work
|
Potential Work
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
|
@ -87,6 +93,3 @@ Potential Work
|
||||||
- Add the ability to start temp Vault with Host, Rkt, or Docker
|
- Add the ability to start temp Vault with Host, Rkt, or Docker
|
||||||
- Add a dynamic way to change out the backend role creation during Bootstrap,
|
- Add a dynamic way to change out the backend role creation during Bootstrap,
|
||||||
so other services can be used (such as Consul)
|
so other services can be used (such as Consul)
|
||||||
- Segregate Server Cert generation from Auth Cert generation (separate CAs).
|
|
||||||
This work was partially started with the `auth_cert_backend` tasks, but would
|
|
||||||
need to be further applied to all roles (particularly Etcd and Kubernetes).
|
|
||||||
|
|
|
@ -40,23 +40,18 @@ kube_log_level: 2
|
||||||
|
|
||||||
# Users to create for basic auth in Kubernetes API via HTTP
|
# Users to create for basic auth in Kubernetes API via HTTP
|
||||||
# Optionally add groups for user
|
# Optionally add groups for user
|
||||||
kube_api_pwd: "changeme"
|
kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
|
||||||
kube_users:
|
kube_users:
|
||||||
kube:
|
kube:
|
||||||
pass: "{{kube_api_pwd}}"
|
pass: "{{kube_api_pwd}}"
|
||||||
role: admin
|
role: admin
|
||||||
root:
|
groups:
|
||||||
pass: "{{kube_api_pwd}}"
|
- system:masters
|
||||||
role: admin
|
|
||||||
# groups:
|
|
||||||
# - system:masters
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
|
## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
|
||||||
#kube_oidc_auth: false
|
#kube_oidc_auth: false
|
||||||
#kube_basic_auth: false
|
#kube_basic_auth: true
|
||||||
#kube_token_auth: false
|
#kube_token_auth: true
|
||||||
|
|
||||||
|
|
||||||
## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
|
## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
|
||||||
|
@ -148,6 +143,9 @@ vault_deployment_type: docker
|
||||||
# K8s image pull policy (imagePullPolicy)
|
# K8s image pull policy (imagePullPolicy)
|
||||||
k8s_image_pull_policy: IfNotPresent
|
k8s_image_pull_policy: IfNotPresent
|
||||||
|
|
||||||
|
# Kubernetes dashboard (available at http://first_master:6443/ui by default)
|
||||||
|
dashboard_enabled: true
|
||||||
|
|
||||||
# Monitoring apps for k8s
|
# Monitoring apps for k8s
|
||||||
efk_enabled: false
|
efk_enabled: false
|
||||||
|
|
||||||
|
|
|
@ -139,7 +139,7 @@ class KubeManager(object):
|
||||||
if check and self.exists():
|
if check and self.exists():
|
||||||
return []
|
return []
|
||||||
|
|
||||||
cmd = ['create']
|
cmd = ['apply']
|
||||||
|
|
||||||
if not self.filename:
|
if not self.filename:
|
||||||
self.module.fail_json(msg='filename required to create')
|
self.module.fail_json(msg='filename required to create')
|
||||||
|
@ -150,10 +150,7 @@ class KubeManager(object):
|
||||||
|
|
||||||
def replace(self):
|
def replace(self):
|
||||||
|
|
||||||
if not self.force and not self.exists():
|
cmd = ['apply']
|
||||||
return []
|
|
||||||
|
|
||||||
cmd = ['replace']
|
|
||||||
|
|
||||||
if self.force:
|
if self.force:
|
||||||
cmd.append('--force')
|
cmd.append('--force')
|
||||||
|
@ -270,9 +267,8 @@ def main():
|
||||||
|
|
||||||
manager = KubeManager(module)
|
manager = KubeManager(module)
|
||||||
state = module.params.get('state')
|
state = module.params.get('state')
|
||||||
|
|
||||||
if state == 'present':
|
if state == 'present':
|
||||||
result = manager.create()
|
result = manager.create(check=False)
|
||||||
|
|
||||||
elif state == 'absent':
|
elif state == 'absent':
|
||||||
result = manager.delete()
|
result = manager.delete()
|
||||||
|
@ -284,11 +280,7 @@ def main():
|
||||||
result = manager.stop()
|
result = manager.stop()
|
||||||
|
|
||||||
elif state == 'latest':
|
elif state == 'latest':
|
||||||
if manager.exists():
|
result = manager.replace()
|
||||||
manager.force = True
|
|
||||||
result = manager.replace()
|
|
||||||
else:
|
|
||||||
result = manager.create(check=False)
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
module.fail_json(msg='Unrecognized state %s.' % state)
|
module.fail_json(msg='Unrecognized state %s.' % state)
|
||||||
|
|
|
@ -16,6 +16,6 @@ Host {{ bastion_ip }}
|
||||||
ControlPersist 5m
|
ControlPersist 5m
|
||||||
|
|
||||||
Host {{ vars['hosts'] }}
|
Host {{ vars['hosts'] }}
|
||||||
ProxyCommand ssh -W %h:%p {{ real_user }}@{{ bastion_ip }}
|
ProxyCommand ssh -W %h:%p {{ real_user }}@{{ bastion_ip }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}
|
||||||
StrictHostKeyChecking no
|
StrictHostKeyChecking no
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
|
@ -49,4 +49,3 @@
|
||||||
pip:
|
pip:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
with_items: "{{pip_python_modules}}"
|
with_items: "{{pip_python_modules}}"
|
||||||
|
|
||||||
|
|
|
@ -21,10 +21,20 @@
|
||||||
- name: Gather nodes hostnames
|
- name: Gather nodes hostnames
|
||||||
setup:
|
setup:
|
||||||
gather_subset: '!all'
|
gather_subset: '!all'
|
||||||
filter: ansible_hostname
|
filter: ansible_*
|
||||||
|
|
||||||
- name: Assign inventory name to unconfigured hostnames
|
- name: Assign inventory name to unconfigured hostnames (non-CoreOS)
|
||||||
hostname:
|
hostname:
|
||||||
name: "{{inventory_hostname}}"
|
name: "{{inventory_hostname}}"
|
||||||
when: ansible_hostname == 'localhost'
|
when: ansible_os_family not in ['CoreOS', 'Container Linux by CoreOS']
|
||||||
|
|
||||||
|
- name: Assign inventory name to unconfigured hostnames (CoreOS only)
|
||||||
|
command: "hostnamectl set-hostname {{inventory_hostname}}"
|
||||||
|
register: hostname_changed
|
||||||
|
when: ansible_hostname == 'localhost' and ansible_os_family in ['CoreOS', 'Container Linux by CoreOS']
|
||||||
|
|
||||||
|
- name: Update hostname fact (CoreOS only)
|
||||||
|
setup:
|
||||||
|
gather_subset: '!all'
|
||||||
|
filter: ansible_hostname
|
||||||
|
when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] and hostname_changed.changed
|
||||||
|
|
|
@ -6,4 +6,3 @@
|
||||||
regexp: '^\w+\s+requiretty'
|
regexp: '^\w+\s+requiretty'
|
||||||
dest: /etc/sudoers
|
dest: /etc/sudoers
|
||||||
state: absent
|
state: absent
|
||||||
|
|
||||||
|
|
|
@ -4,12 +4,12 @@
|
||||||
|
|
||||||
# Max of 4 names is allowed and no more than 256 - 17 chars total
|
# Max of 4 names is allowed and no more than 256 - 17 chars total
|
||||||
# (a 2 is reserved for the 'default.svc.' and'svc.')
|
# (a 2 is reserved for the 'default.svc.' and'svc.')
|
||||||
#searchdomains:
|
# searchdomains:
|
||||||
# - foo.bar.lc
|
# - foo.bar.lc
|
||||||
|
|
||||||
# Max of 2 is allowed here (a 1 is reserved for the dns_server)
|
# Max of 2 is allowed here (a 1 is reserved for the dns_server)
|
||||||
#nameservers:
|
# nameservers:
|
||||||
# - 127.0.0.1
|
# - 127.0.0.1
|
||||||
|
|
||||||
dns_forward_max: 150
|
dns_forward_max: 150
|
||||||
cache_size: 1000
|
cache_size: 1000
|
||||||
|
|
|
@ -1,6 +1,4 @@
|
||||||
---
|
---
|
||||||
- include: pre_upgrade.yml
|
|
||||||
|
|
||||||
- name: ensure dnsmasq.d directory exists
|
- name: ensure dnsmasq.d directory exists
|
||||||
file:
|
file:
|
||||||
path: /etc/dnsmasq.d
|
path: /etc/dnsmasq.d
|
||||||
|
@ -56,6 +54,26 @@
|
||||||
dest: /etc/dnsmasq.d/01-kube-dns.conf
|
dest: /etc/dnsmasq.d/01-kube-dns.conf
|
||||||
state: link
|
state: link
|
||||||
|
|
||||||
|
- name: Create dnsmasq RBAC manifests
|
||||||
|
template:
|
||||||
|
src: "{{ item }}"
|
||||||
|
dest: "{{ kube_config_dir }}/{{ item }}"
|
||||||
|
with_items:
|
||||||
|
- "dnsmasq-clusterrolebinding.yml"
|
||||||
|
- "dnsmasq-serviceaccount.yml"
|
||||||
|
when: rbac_enabled
|
||||||
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Apply dnsmasq RBAC manifests
|
||||||
|
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }}"
|
||||||
|
with_items:
|
||||||
|
- "dnsmasq-clusterrolebinding.yml"
|
||||||
|
- "dnsmasq-serviceaccount.yml"
|
||||||
|
when: rbac_enabled
|
||||||
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
|
run_once: true
|
||||||
|
|
||||||
- name: Create dnsmasq manifests
|
- name: Create dnsmasq manifests
|
||||||
template:
|
template:
|
||||||
src: "{{item.file}}"
|
src: "{{item.file}}"
|
||||||
|
@ -63,7 +81,7 @@
|
||||||
with_items:
|
with_items:
|
||||||
- {name: dnsmasq, file: dnsmasq-deploy.yml, type: deployment}
|
- {name: dnsmasq, file: dnsmasq-deploy.yml, type: deployment}
|
||||||
- {name: dnsmasq, file: dnsmasq-svc.yml, type: svc}
|
- {name: dnsmasq, file: dnsmasq-svc.yml, type: svc}
|
||||||
- {name: dnsmasq-autoscaler, file: dnsmasq-autoscaler.yml, type: deployment}
|
- {name: dnsmasq-autoscaler, file: dnsmasq-autoscaler.yml.j2, type: deployment}
|
||||||
register: manifests
|
register: manifests
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
@ -75,7 +93,7 @@
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{item.item.type}}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||||
state: "{{item.changed | ternary('latest','present') }}"
|
state: "latest"
|
||||||
with_items: "{{ manifests.results }}"
|
with_items: "{{ manifests.results }}"
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
@ -86,4 +104,3 @@
|
||||||
port: 53
|
port: 53
|
||||||
timeout: 180
|
timeout: 180
|
||||||
when: inventory_hostname == groups['kube-node'][0] and groups['kube-node'][0] in ansible_play_hosts
|
when: inventory_hostname == groups['kube-node'][0] and groups['kube-node'][0] in ansible_play_hosts
|
||||||
|
|
||||||
|
|
|
@ -1,9 +0,0 @@
|
||||||
---
|
|
||||||
- name: Delete legacy dnsmasq daemonset
|
|
||||||
kube:
|
|
||||||
name: dnsmasq
|
|
||||||
namespace: "{{system_namespace}}"
|
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
|
||||||
resource: "ds"
|
|
||||||
state: absent
|
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
# Copyright 2016 The Kubernetes Authors.
|
# Copyright 2016 The Kubernetes Authors.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
@ -30,21 +31,26 @@ spec:
|
||||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||||
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
|
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
|
||||||
spec:
|
spec:
|
||||||
|
{% if rbac_enabled %}
|
||||||
|
serviceAccountName: dnsmasq
|
||||||
|
{% endif %}
|
||||||
|
tolerations:
|
||||||
|
- effect: NoSchedule
|
||||||
|
operator: Exists
|
||||||
containers:
|
containers:
|
||||||
- name: autoscaler
|
- name: autoscaler
|
||||||
image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1
|
image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: "20m"
|
cpu: "20m"
|
||||||
memory: "10Mi"
|
memory: "10Mi"
|
||||||
command:
|
command:
|
||||||
- /cluster-proportional-autoscaler
|
- /cluster-proportional-autoscaler
|
||||||
- --namespace=kube-system
|
- --namespace=kube-system
|
||||||
- --configmap=dnsmasq-autoscaler
|
- --configmap=dnsmasq-autoscaler
|
||||||
- --target=Deployment/dnsmasq
|
- --target=Deployment/dnsmasq
|
||||||
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
|
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
|
||||||
# If using small nodes, "nodesPerReplica" should dominate.
|
# If using small nodes, "nodesPerReplica" should dominate.
|
||||||
- --default-params={"linear":{"nodesPerReplica":{{ dnsmasq_nodes_per_replica }},"preventSinglePointFailure":true}}
|
- --default-params={"linear":{"nodesPerReplica":{{ dnsmasq_nodes_per_replica }},"preventSinglePointFailure":true}}
|
||||||
- --logtostderr=true
|
- --logtostderr=true
|
||||||
- --v={{ kube_log_level }}
|
- --v={{ kube_log_level }}
|
||||||
|
|
14
roles/dnsmasq/templates/dnsmasq-clusterrolebinding.yml
Normal file
14
roles/dnsmasq/templates/dnsmasq-clusterrolebinding.yml
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
---
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: dnsmasq
|
||||||
|
namespace: "{{ system_namespace }}"
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: dnsmasq
|
||||||
|
namespace: "{{ system_namespace}}"
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: cluster-admin
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
|
@ -21,6 +21,9 @@ spec:
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
kubespray/dnsmasq-checksum: "{{ dnsmasq_stat.stat.checksum }}"
|
kubespray/dnsmasq-checksum: "{{ dnsmasq_stat.stat.checksum }}"
|
||||||
spec:
|
spec:
|
||||||
|
tolerations:
|
||||||
|
- effect: NoSchedule
|
||||||
|
operator: Exists
|
||||||
containers:
|
containers:
|
||||||
- name: dnsmasq
|
- name: dnsmasq
|
||||||
image: "{{ dnsmasq_image_repo }}:{{ dnsmasq_image_tag }}"
|
image: "{{ dnsmasq_image_repo }}:{{ dnsmasq_image_tag }}"
|
||||||
|
@ -35,7 +38,6 @@ spec:
|
||||||
capabilities:
|
capabilities:
|
||||||
add:
|
add:
|
||||||
- NET_ADMIN
|
- NET_ADMIN
|
||||||
imagePullPolicy: IfNotPresent
|
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
cpu: {{ dns_cpu_limit }}
|
cpu: {{ dns_cpu_limit }}
|
||||||
|
@ -55,7 +57,6 @@ spec:
|
||||||
mountPath: /etc/dnsmasq.d
|
mountPath: /etc/dnsmasq.d
|
||||||
- name: etcdnsmasqdavailable
|
- name: etcdnsmasqdavailable
|
||||||
mountPath: /etc/dnsmasq.d-available
|
mountPath: /etc/dnsmasq.d-available
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
- name: etcdnsmasqd
|
- name: etcdnsmasqd
|
||||||
hostPath:
|
hostPath:
|
||||||
|
@ -64,4 +65,3 @@ spec:
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /etc/dnsmasq.d-available
|
path: /etc/dnsmasq.d-available
|
||||||
dnsPolicy: Default # Don't use cluster DNS.
|
dnsPolicy: Default # Don't use cluster DNS.
|
||||||
|
|
||||||
|
|
8
roles/dnsmasq/templates/dnsmasq-serviceaccount.yml
Normal file
8
roles/dnsmasq/templates/dnsmasq-serviceaccount.yml
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: dnsmasq
|
||||||
|
namespace: "{{ system_namespace }}"
|
||||||
|
labels:
|
||||||
|
kubernetes.io/cluster-service: "true"
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
docker_version: '1.13'
|
docker_version: '1.13'
|
||||||
|
|
||||||
docker_package_info:
|
docker_package_info:
|
||||||
|
@ -10,3 +11,6 @@ docker_repo_info:
|
||||||
repos:
|
repos:
|
||||||
|
|
||||||
docker_dns_servers_strict: yes
|
docker_dns_servers_strict: yes
|
||||||
|
|
||||||
|
docker_rh_repo_base_url: 'https://yum.dockerproject.org/repo/main/centos/7'
|
||||||
|
docker_rh_repo_gpgkey: 'https://yum.dockerproject.org/gpg'
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
- Docker | pause while Docker restarts
|
- Docker | pause while Docker restarts
|
||||||
- Docker | wait for docker
|
- Docker | wait for docker
|
||||||
|
|
||||||
- name : Docker | reload systemd
|
- name: Docker | reload systemd
|
||||||
shell: systemctl daemon-reload
|
shell: systemctl daemon-reload
|
||||||
|
|
||||||
- name: Docker | reload docker.socket
|
- name: Docker | reload docker.socket
|
||||||
|
|
|
@ -3,14 +3,14 @@
|
||||||
include_vars: "{{ item }}"
|
include_vars: "{{ item }}"
|
||||||
with_first_found:
|
with_first_found:
|
||||||
- files:
|
- files:
|
||||||
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
|
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
|
||||||
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
|
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
|
||||||
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
|
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
|
||||||
- "{{ ansible_distribution|lower }}.yml"
|
- "{{ ansible_distribution|lower }}.yml"
|
||||||
- "{{ ansible_os_family|lower }}.yml"
|
- "{{ ansible_os_family|lower }}.yml"
|
||||||
- defaults.yml
|
- defaults.yml
|
||||||
paths:
|
paths:
|
||||||
- ../vars
|
- ../vars
|
||||||
skip: true
|
skip: true
|
||||||
tags: facts
|
tags: facts
|
||||||
|
|
||||||
|
|
|
@ -48,7 +48,7 @@
|
||||||
- name: add system search domains to docker options
|
- name: add system search domains to docker options
|
||||||
set_fact:
|
set_fact:
|
||||||
docker_dns_search_domains: "{{ docker_dns_search_domains | union(system_search_domains.stdout.split(' ')|default([])) | unique }}"
|
docker_dns_search_domains: "{{ docker_dns_search_domains | union(system_search_domains.stdout.split(' ')|default([])) | unique }}"
|
||||||
when: system_search_domains.stdout != ""
|
when: system_search_domains.stdout != ""
|
||||||
|
|
||||||
- name: check number of nameservers
|
- name: check number of nameservers
|
||||||
fail:
|
fail:
|
||||||
|
|
|
@ -10,11 +10,18 @@
|
||||||
dest: /etc/systemd/system/docker.service.d/http-proxy.conf
|
dest: /etc/systemd/system/docker.service.d/http-proxy.conf
|
||||||
when: http_proxy is defined or https_proxy is defined or no_proxy is defined
|
when: http_proxy is defined or https_proxy is defined or no_proxy is defined
|
||||||
|
|
||||||
|
- name: get systemd version
|
||||||
|
command: rpm -q --qf '%{V}\n' systemd
|
||||||
|
register: systemd_version
|
||||||
|
when: ansible_os_family == "RedHat" and not is_atomic
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
- name: Write docker.service systemd file
|
- name: Write docker.service systemd file
|
||||||
template:
|
template:
|
||||||
src: docker.service.j2
|
src: docker.service.j2
|
||||||
dest: /etc/systemd/system/docker.service
|
dest: /etc/systemd/system/docker.service
|
||||||
register: docker_service_file
|
register: docker_service_file
|
||||||
|
notify: restart docker
|
||||||
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic)
|
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic)
|
||||||
|
|
||||||
- name: Write docker.service systemd file for atomic
|
- name: Write docker.service systemd file for atomic
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
[Service]
|
[Service]
|
||||||
Environment="DOCKER_OPTS={{ docker_options | default('') }} \
|
Environment="DOCKER_OPTS={{ docker_options | default('') }} \
|
||||||
--iptables={% if kube_network_plugin == 'flannel' %}true{% else %}false{% endif %}"
|
--iptables=false"
|
||||||
|
|
|
@ -24,7 +24,9 @@ ExecStart={{ docker_bin_dir }}/docker daemon \
|
||||||
$DOCKER_NETWORK_OPTIONS \
|
$DOCKER_NETWORK_OPTIONS \
|
||||||
$DOCKER_DNS_OPTIONS \
|
$DOCKER_DNS_OPTIONS \
|
||||||
$INSECURE_REGISTRY
|
$INSECURE_REGISTRY
|
||||||
|
{% if ansible_os_family == "RedHat" and systemd_version.stdout|int >= 226 %}
|
||||||
TasksMax=infinity
|
TasksMax=infinity
|
||||||
|
{% endif %}
|
||||||
LimitNOFILE=1048576
|
LimitNOFILE=1048576
|
||||||
LimitNPROC=1048576
|
LimitNPROC=1048576
|
||||||
LimitCORE=infinity
|
LimitCORE=infinity
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
[dockerrepo]
|
[dockerrepo]
|
||||||
name=Docker Repository
|
name=Docker Repository
|
||||||
baseurl=https://yum.dockerproject.org/repo/main/centos/7
|
baseurl={{ docker_rh_repo_base_url }}
|
||||||
enabled=1
|
enabled=1
|
||||||
gpgcheck=1
|
gpgcheck=1
|
||||||
gpgkey=https://yum.dockerproject.org/gpg
|
gpgkey={{ docker_rh_repo_gpgkey }}
|
||||||
{% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %}
|
{% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %}
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
docker_kernel_min_version: '3.10'
|
docker_kernel_min_version: '3.10'
|
||||||
|
|
||||||
# https://apt.dockerproject.org/repo/dists/debian-wheezy/main/filelist
|
# https://apt.dockerproject.org/repo/dists/debian-wheezy/main/filelist
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
docker_kernel_min_version: '0'
|
docker_kernel_min_version: '0'
|
||||||
|
|
||||||
# versioning: docker-io itself is pinned at docker 1.5
|
# versioning: docker-io itself is pinned at docker 1.5
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
docker_kernel_min_version: '0'
|
docker_kernel_min_version: '0'
|
||||||
|
|
||||||
# https://docs.docker.com/engine/installation/linux/fedora/#install-from-a-package
|
# https://docs.docker.com/engine/installation/linux/fedora/#install-from-a-package
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
docker_kernel_min_version: '0'
|
docker_kernel_min_version: '0'
|
||||||
|
|
||||||
# https://yum.dockerproject.org/repo/main/centos/7/Packages/
|
# https://yum.dockerproject.org/repo/main/centos/7/Packages/
|
||||||
|
@ -8,7 +9,7 @@ docker_versioned_pkg:
|
||||||
'1.12': docker-engine-1.12.6-1.el7.centos
|
'1.12': docker-engine-1.12.6-1.el7.centos
|
||||||
'1.13': docker-engine-1.13.1-1.el7.centos
|
'1.13': docker-engine-1.13.1-1.el7.centos
|
||||||
'stable': docker-engine-17.03.0.ce-1.el7.centos
|
'stable': docker-engine-17.03.0.ce-1.el7.centos
|
||||||
'edge': docker-engine-17.03.0.ce-1.el7.centos
|
'edge': docker-engine-17.03.0.ce-1.el7.centos
|
||||||
|
|
||||||
# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package
|
# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package
|
||||||
# https://download.docker.com/linux/centos/7/x86_64/stable/Packages/
|
# https://download.docker.com/linux/centos/7/x86_64/stable/Packages/
|
||||||
|
|
|
@ -20,20 +20,22 @@ download_always_pull: False
|
||||||
# Versions
|
# Versions
|
||||||
kube_version: v1.7.3
|
kube_version: v1.7.3
|
||||||
etcd_version: v3.2.4
|
etcd_version: v3.2.4
|
||||||
#TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
|
# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
|
||||||
# after migration to container download
|
# after migration to container download
|
||||||
calico_version: "v1.1.3"
|
calico_version: "v2.5.0"
|
||||||
calico_cni_version: "v1.8.0"
|
calico_ctl_version: "v1.5.0"
|
||||||
calico_policy_version: "v0.5.4"
|
calico_cni_version: "v1.10.0"
|
||||||
|
calico_policy_version: "v0.7.0"
|
||||||
weave_version: 2.0.1
|
weave_version: 2.0.1
|
||||||
flannel_version: v0.8.0
|
flannel_version: "v0.8.0"
|
||||||
|
flannel_cni_version: "v0.2.0"
|
||||||
pod_infra_version: 3.0
|
pod_infra_version: 3.0
|
||||||
|
|
||||||
# Download URL's
|
# Download URL's
|
||||||
etcd_download_url: "https://storage.googleapis.com/kargo/{{etcd_version}}_etcd"
|
etcd_download_url: "https://storage.googleapis.com/kargo/{{etcd_version}}_etcd"
|
||||||
|
|
||||||
# Checksums
|
# Checksums
|
||||||
etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485"
|
etcd_checksum: "274c46a7f8d26f7ae99d6880610f54933cbcf7f3beafa19236c52eb5df8c7a0b"
|
||||||
|
|
||||||
# Containers
|
# Containers
|
||||||
# Possible values: host, docker
|
# Possible values: host, docker
|
||||||
|
@ -42,13 +44,15 @@ etcd_image_repo: "quay.io/coreos/etcd"
|
||||||
etcd_image_tag: "{{ etcd_version }}"
|
etcd_image_tag: "{{ etcd_version }}"
|
||||||
flannel_image_repo: "quay.io/coreos/flannel"
|
flannel_image_repo: "quay.io/coreos/flannel"
|
||||||
flannel_image_tag: "{{ flannel_version }}"
|
flannel_image_tag: "{{ flannel_version }}"
|
||||||
calicoctl_image_repo: "calico/ctl"
|
flannel_cni_image_repo: "quay.io/coreos/flannel-cni"
|
||||||
calicoctl_image_tag: "{{ calico_version }}"
|
flannel_cni_image_tag: "{{ flannel_cni_version }}"
|
||||||
calico_node_image_repo: "calico/node"
|
calicoctl_image_repo: "quay.io/calico/ctl"
|
||||||
|
calicoctl_image_tag: "{{ calico_ctl_version }}"
|
||||||
|
calico_node_image_repo: "quay.io/calico/node"
|
||||||
calico_node_image_tag: "{{ calico_version }}"
|
calico_node_image_tag: "{{ calico_version }}"
|
||||||
calico_cni_image_repo: "calico/cni"
|
calico_cni_image_repo: "quay.io/calico/cni"
|
||||||
calico_cni_image_tag: "{{ calico_cni_version }}"
|
calico_cni_image_tag: "{{ calico_cni_version }}"
|
||||||
calico_policy_image_repo: "calico/kube-policy-controller"
|
calico_policy_image_repo: "quay.io/calico/kube-policy-controller"
|
||||||
calico_policy_image_tag: "{{ calico_policy_version }}"
|
calico_policy_image_tag: "{{ calico_policy_version }}"
|
||||||
calico_rr_image_repo: "quay.io/calico/routereflector"
|
calico_rr_image_repo: "quay.io/calico/routereflector"
|
||||||
calico_rr_image_tag: "v0.3.0"
|
calico_rr_image_tag: "v0.3.0"
|
||||||
|
@ -56,6 +60,8 @@ hyperkube_image_repo: "quay.io/coreos/hyperkube"
|
||||||
hyperkube_image_tag: "{{ kube_version }}_coreos.0"
|
hyperkube_image_tag: "{{ kube_version }}_coreos.0"
|
||||||
pod_infra_image_repo: "gcr.io/google_containers/pause-amd64"
|
pod_infra_image_repo: "gcr.io/google_containers/pause-amd64"
|
||||||
pod_infra_image_tag: "{{ pod_infra_version }}"
|
pod_infra_image_tag: "{{ pod_infra_version }}"
|
||||||
|
install_socat_image_repo: "xueshanf/install-socat"
|
||||||
|
install_socat_image_tag: "latest"
|
||||||
netcheck_version: "v1.0"
|
netcheck_version: "v1.0"
|
||||||
netcheck_agent_img_repo: "quay.io/l23network/k8s-netchecker-agent"
|
netcheck_agent_img_repo: "quay.io/l23network/k8s-netchecker-agent"
|
||||||
netcheck_agent_tag: "{{ netcheck_version }}"
|
netcheck_agent_tag: "{{ netcheck_version }}"
|
||||||
|
@ -137,6 +143,12 @@ downloads:
|
||||||
tag: "{{ flannel_image_tag }}"
|
tag: "{{ flannel_image_tag }}"
|
||||||
sha256: "{{ flannel_digest_checksum|default(None) }}"
|
sha256: "{{ flannel_digest_checksum|default(None) }}"
|
||||||
enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}"
|
enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}"
|
||||||
|
flannel_cni:
|
||||||
|
container: true
|
||||||
|
repo: "{{ flannel_cni_image_repo }}"
|
||||||
|
tag: "{{ flannel_cni_image_tag }}"
|
||||||
|
sha256: "{{ flannel_cni_digest_checksum|default(None) }}"
|
||||||
|
enabled: "{{ kube_network_plugin == 'flannel' }}"
|
||||||
calicoctl:
|
calicoctl:
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ calicoctl_image_repo }}"
|
repo: "{{ calicoctl_image_repo }}"
|
||||||
|
@ -184,6 +196,11 @@ downloads:
|
||||||
repo: "{{ pod_infra_image_repo }}"
|
repo: "{{ pod_infra_image_repo }}"
|
||||||
tag: "{{ pod_infra_image_tag }}"
|
tag: "{{ pod_infra_image_tag }}"
|
||||||
sha256: "{{ pod_infra_digest_checksum|default(None) }}"
|
sha256: "{{ pod_infra_digest_checksum|default(None) }}"
|
||||||
|
install_socat:
|
||||||
|
container: true
|
||||||
|
repo: "{{ install_socat_image_repo }}"
|
||||||
|
tag: "{{ install_socat_image_tag }}"
|
||||||
|
sha256: "{{ install_socat_digest_checksum|default(None) }}"
|
||||||
nginx:
|
nginx:
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ nginx_image_repo }}"
|
repo: "{{ nginx_image_repo }}"
|
||||||
|
|
|
@ -1,12 +1,5 @@
|
||||||
---
|
---
|
||||||
- name: downloading...
|
- name: file_download | Create dest directories
|
||||||
debug:
|
|
||||||
msg: "{{ download.url }}"
|
|
||||||
when:
|
|
||||||
- download.enabled|bool
|
|
||||||
- not download.container|bool
|
|
||||||
|
|
||||||
- name: Create dest directories
|
|
||||||
file:
|
file:
|
||||||
path: "{{local_release_dir}}/{{download.dest|dirname}}"
|
path: "{{local_release_dir}}/{{download.dest|dirname}}"
|
||||||
state: directory
|
state: directory
|
||||||
|
@ -16,7 +9,7 @@
|
||||||
- not download.container|bool
|
- not download.container|bool
|
||||||
tags: bootstrap-os
|
tags: bootstrap-os
|
||||||
|
|
||||||
- name: Download items
|
- name: file_download | Download item
|
||||||
get_url:
|
get_url:
|
||||||
url: "{{download.url}}"
|
url: "{{download.url}}"
|
||||||
dest: "{{local_release_dir}}/{{download.dest}}"
|
dest: "{{local_release_dir}}/{{download.dest}}"
|
||||||
|
@ -31,7 +24,7 @@
|
||||||
- download.enabled|bool
|
- download.enabled|bool
|
||||||
- not download.container|bool
|
- not download.container|bool
|
||||||
|
|
||||||
- name: Extract archives
|
- name: file_download | Extract archives
|
||||||
unarchive:
|
unarchive:
|
||||||
src: "{{ local_release_dir }}/{{download.dest}}"
|
src: "{{ local_release_dir }}/{{download.dest}}"
|
||||||
dest: "{{ local_release_dir }}/{{download.dest|dirname}}"
|
dest: "{{ local_release_dir }}/{{download.dest|dirname}}"
|
||||||
|
@ -41,10 +34,9 @@
|
||||||
when:
|
when:
|
||||||
- download.enabled|bool
|
- download.enabled|bool
|
||||||
- not download.container|bool
|
- not download.container|bool
|
||||||
- download.unarchive is defined
|
- download.unarchive|default(False)
|
||||||
- download.unarchive == True
|
|
||||||
|
|
||||||
- name: Fix permissions
|
- name: file_download | Fix permissions
|
||||||
file:
|
file:
|
||||||
state: file
|
state: file
|
||||||
path: "{{local_release_dir}}/{{download.dest}}"
|
path: "{{local_release_dir}}/{{download.dest}}"
|
||||||
|
@ -56,10 +48,11 @@
|
||||||
- (download.unarchive is not defined or download.unarchive == False)
|
- (download.unarchive is not defined or download.unarchive == False)
|
||||||
|
|
||||||
- set_fact:
|
- set_fact:
|
||||||
download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube-master'][0]}}{% endif %}"
|
download_delegate: "{% if download_localhost|bool %}localhost{% else %}{{groups['kube-master'][0]}}{% endif %}"
|
||||||
|
run_once: true
|
||||||
tags: facts
|
tags: facts
|
||||||
|
|
||||||
- name: Create dest directory for saved/loaded container images
|
- name: container_download | Create dest directory for saved/loaded container images
|
||||||
file:
|
file:
|
||||||
path: "{{local_release_dir}}/containers"
|
path: "{{local_release_dir}}/containers"
|
||||||
state: directory
|
state: directory
|
||||||
|
@ -72,15 +65,14 @@
|
||||||
tags: bootstrap-os
|
tags: bootstrap-os
|
||||||
|
|
||||||
# This is required for the download_localhost delegate to work smooth with Container Linux by CoreOS cluster nodes
|
# This is required for the download_localhost delegate to work smooth with Container Linux by CoreOS cluster nodes
|
||||||
- name: Hack python binary path for localhost
|
- name: container_download | Hack python binary path for localhost
|
||||||
raw: sh -c "mkdir -p /opt/bin; ln -sf /usr/bin/python /opt/bin/python"
|
raw: sh -c "mkdir -p /opt/bin; ln -sf /usr/bin/python /opt/bin/python"
|
||||||
when: download_delegate == 'localhost'
|
|
||||||
delegate_to: localhost
|
delegate_to: localhost
|
||||||
|
when: download_delegate == 'localhost'
|
||||||
failed_when: false
|
failed_when: false
|
||||||
run_once: true
|
|
||||||
tags: localhost
|
tags: localhost
|
||||||
|
|
||||||
- name: Download | create local directory for saved/loaded container images
|
- name: container_download | create local directory for saved/loaded container images
|
||||||
file:
|
file:
|
||||||
path: "{{local_release_dir}}/containers"
|
path: "{{local_release_dir}}/containers"
|
||||||
state: directory
|
state: directory
|
||||||
|
@ -95,24 +87,16 @@
|
||||||
- download_delegate == 'localhost'
|
- download_delegate == 'localhost'
|
||||||
tags: localhost
|
tags: localhost
|
||||||
|
|
||||||
- name: Make download decision if pull is required by tag or sha256
|
- name: container_download | Make download decision if pull is required by tag or sha256
|
||||||
include: set_docker_image_facts.yml
|
include: set_docker_image_facts.yml
|
||||||
when:
|
when:
|
||||||
- download.enabled|bool
|
- download.enabled|bool
|
||||||
- download.container|bool
|
- download.container|bool
|
||||||
delegate_to: "{{ download_delegate if download_run_once|bool else inventory_hostname }}"
|
delegate_to: "{{ download_delegate if download_run_once|bool or omit }}"
|
||||||
run_once: "{{ download_run_once|bool }}"
|
run_once: "{{ download_run_once|bool }}"
|
||||||
tags: facts
|
tags: facts
|
||||||
|
|
||||||
- name: pulling...
|
- name: container_download | Download containers if pull is required or told to always pull
|
||||||
debug:
|
|
||||||
msg: "{{ pull_args }}"
|
|
||||||
when:
|
|
||||||
- download.enabled|bool
|
|
||||||
- download.container|bool
|
|
||||||
|
|
||||||
#NOTE(bogdando) this brings no docker-py deps for nodes
|
|
||||||
- name: Download containers if pull is required or told to always pull
|
|
||||||
command: "{{ docker_bin_dir }}/docker pull {{ pull_args }}"
|
command: "{{ docker_bin_dir }}/docker pull {{ pull_args }}"
|
||||||
register: pull_task_result
|
register: pull_task_result
|
||||||
until: pull_task_result|succeeded
|
until: pull_task_result|succeeded
|
||||||
|
@ -122,29 +106,29 @@
|
||||||
- download.enabled|bool
|
- download.enabled|bool
|
||||||
- download.container|bool
|
- download.container|bool
|
||||||
- pull_required|bool|default(download_always_pull)
|
- pull_required|bool|default(download_always_pull)
|
||||||
delegate_to: "{{ download_delegate if download_run_once|bool else inventory_hostname }}"
|
delegate_to: "{{ download_delegate if download_run_once|bool or omit }}"
|
||||||
run_once: "{{ download_run_once|bool }}"
|
run_once: "{{ download_run_once|bool }}"
|
||||||
|
|
||||||
- set_fact:
|
- set_fact:
|
||||||
fname: "{{local_release_dir}}/containers/{{download.repo|regex_replace('/|\0|:', '_')}}:{{download.tag|default(download.sha256)|regex_replace('/|\0|:', '_')}}.tar"
|
fname: "{{local_release_dir}}/containers/{{download.repo|regex_replace('/|\0|:', '_')}}:{{download.tag|default(download.sha256)|regex_replace('/|\0|:', '_')}}.tar"
|
||||||
|
run_once: true
|
||||||
tags: facts
|
tags: facts
|
||||||
|
|
||||||
- name: "Set default value for 'container_changed' to false"
|
- name: "container_download | Set default value for 'container_changed' to false"
|
||||||
set_fact:
|
set_fact:
|
||||||
container_changed: "{{pull_required|default(false)|bool}}"
|
container_changed: "{{pull_required|default(false)|bool}}"
|
||||||
|
|
||||||
- name: "Update the 'container_changed' fact"
|
- name: "container_download | Update the 'container_changed' fact"
|
||||||
set_fact:
|
set_fact:
|
||||||
container_changed: "{{ pull_required|bool|default(false) or not 'up to date' in pull_task_result.stdout }}"
|
container_changed: "{{ pull_required|bool|default(false) or not 'up to date' in pull_task_result.stdout }}"
|
||||||
when:
|
when:
|
||||||
- download.enabled|bool
|
- download.enabled|bool
|
||||||
- download.container|bool
|
- download.container|bool
|
||||||
- pull_required|bool|default(download_always_pull)
|
- pull_required|bool|default(download_always_pull)
|
||||||
delegate_to: "{{ download_delegate if download_run_once|bool else inventory_hostname }}"
|
|
||||||
run_once: "{{ download_run_once|bool }}"
|
run_once: "{{ download_run_once|bool }}"
|
||||||
tags: facts
|
tags: facts
|
||||||
|
|
||||||
- name: Stat saved container image
|
- name: container_download | Stat saved container image
|
||||||
stat:
|
stat:
|
||||||
path: "{{fname}}"
|
path: "{{fname}}"
|
||||||
register: img
|
register: img
|
||||||
|
@ -158,7 +142,7 @@
|
||||||
run_once: true
|
run_once: true
|
||||||
tags: facts
|
tags: facts
|
||||||
|
|
||||||
- name: Download | save container images
|
- name: container_download | save container images
|
||||||
shell: "{{ docker_bin_dir }}/docker save {{ pull_args }} | gzip -{{ download_compress }} > {{ fname }}"
|
shell: "{{ docker_bin_dir }}/docker save {{ pull_args }} | gzip -{{ download_compress }} > {{ fname }}"
|
||||||
delegate_to: "{{ download_delegate }}"
|
delegate_to: "{{ download_delegate }}"
|
||||||
register: saved
|
register: saved
|
||||||
|
@ -170,7 +154,7 @@
|
||||||
- download.container|bool
|
- download.container|bool
|
||||||
- (container_changed|bool or not img.stat.exists)
|
- (container_changed|bool or not img.stat.exists)
|
||||||
|
|
||||||
- name: Download | copy container images to ansible host
|
- name: container_download | copy container images to ansible host
|
||||||
synchronize:
|
synchronize:
|
||||||
src: "{{ fname }}"
|
src: "{{ fname }}"
|
||||||
dest: "{{ fname }}"
|
dest: "{{ fname }}"
|
||||||
|
@ -186,7 +170,7 @@
|
||||||
- download.container|bool
|
- download.container|bool
|
||||||
- saved.changed
|
- saved.changed
|
||||||
|
|
||||||
- name: Download | upload container images to nodes
|
- name: container_download | upload container images to nodes
|
||||||
synchronize:
|
synchronize:
|
||||||
src: "{{ fname }}"
|
src: "{{ fname }}"
|
||||||
dest: "{{ fname }}"
|
dest: "{{ fname }}"
|
||||||
|
@ -206,7 +190,7 @@
|
||||||
- download.container|bool
|
- download.container|bool
|
||||||
tags: [upload, upgrade]
|
tags: [upload, upgrade]
|
||||||
|
|
||||||
- name: Download | load container images
|
- name: container_download | load container images
|
||||||
shell: "{{ docker_bin_dir }}/docker load < {{ fname }}"
|
shell: "{{ docker_bin_dir }}/docker load < {{ fname }}"
|
||||||
when:
|
when:
|
||||||
- (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] and
|
- (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] and
|
||||||
|
|
|
@ -9,25 +9,20 @@
|
||||||
|
|
||||||
- name: Register docker images info
|
- name: Register docker images info
|
||||||
raw: >-
|
raw: >-
|
||||||
{{ docker_bin_dir }}/docker images -q | xargs {{ docker_bin_dir }}/docker inspect -f "{{ '{{' }} .RepoTags {{ '}}' }},{{ '{{' }} .RepoDigests {{ '}}' }}"
|
{{ docker_bin_dir }}/docker images -q | xargs {{ docker_bin_dir }}/docker inspect -f "{{ '{{' }} (index .RepoTags 0) {{ '}}' }},{{ '{{' }} (index .RepoDigests 0) {{ '}}' }}" | tr '\n' ','
|
||||||
no_log: true
|
no_log: true
|
||||||
register: docker_images_raw
|
register: docker_images
|
||||||
failed_when: false
|
failed_when: false
|
||||||
changed_when: false
|
changed_when: false
|
||||||
check_mode: no
|
check_mode: no
|
||||||
when: not download_always_pull|bool
|
when: not download_always_pull|bool
|
||||||
|
|
||||||
- set_fact:
|
|
||||||
docker_images: "{{docker_images_raw.stdout|regex_replace('\\[|\\]|\\n]','')|regex_replace('\\s',',')}}"
|
|
||||||
no_log: true
|
|
||||||
when: not download_always_pull|bool
|
|
||||||
|
|
||||||
- set_fact:
|
- set_fact:
|
||||||
pull_required: >-
|
pull_required: >-
|
||||||
{%- if pull_args in docker_images.split(',') %}false{%- else -%}true{%- endif -%}
|
{%- if pull_args in docker_images.stdout.split(',') %}false{%- else -%}true{%- endif -%}
|
||||||
when: not download_always_pull|bool
|
when: not download_always_pull|bool
|
||||||
|
|
||||||
- name: Check the local digest sha256 corresponds to the given image tag
|
- name: Check the local digest sha256 corresponds to the given image tag
|
||||||
assert:
|
assert:
|
||||||
that: "{{download.repo}}:{{download.tag}} in docker_images.split(',')"
|
that: "{{download.repo}}:{{download.tag}} in docker_images.stdout.split(',')"
|
||||||
when: not download_always_pull|bool and not pull_required|bool and pull_by_digest|bool
|
when: not download_always_pull|bool and not pull_required|bool and pull_by_digest|bool
|
||||||
|
|
|
@ -21,8 +21,10 @@ etcd_metrics: "basic"
|
||||||
etcd_memory_limit: 512M
|
etcd_memory_limit: 512M
|
||||||
|
|
||||||
# Uncomment to set CPU share for etcd
|
# Uncomment to set CPU share for etcd
|
||||||
#etcd_cpu_limit: 300m
|
# etcd_cpu_limit: 300m
|
||||||
|
|
||||||
etcd_node_cert_hosts: "{{ groups['k8s-cluster'] | union(groups.get('calico-rr', [])) }}"
|
etcd_node_cert_hosts: "{{ groups['k8s-cluster'] | union(groups.get('calico-rr', [])) }}"
|
||||||
|
|
||||||
etcd_compaction_retention: "8"
|
etcd_compaction_retention: "8"
|
||||||
|
|
||||||
|
etcd_vault_mount_path: etcd
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
- Refresh Time Fact
|
- Refresh Time Fact
|
||||||
- Set Backup Directory
|
- Set Backup Directory
|
||||||
- Create Backup Directory
|
- Create Backup Directory
|
||||||
|
- Stat etcd v2 data directory
|
||||||
- Backup etcd v2 data
|
- Backup etcd v2 data
|
||||||
- Backup etcd v3 data
|
- Backup etcd v3 data
|
||||||
when: etcd_cluster_is_healthy.rc == 0
|
when: etcd_cluster_is_healthy.rc == 0
|
||||||
|
@ -24,7 +25,13 @@
|
||||||
group: root
|
group: root
|
||||||
mode: 0600
|
mode: 0600
|
||||||
|
|
||||||
|
- name: Stat etcd v2 data directory
|
||||||
|
stat:
|
||||||
|
path: "{{ etcd_data_dir }}/member"
|
||||||
|
register: etcd_data_dir_member
|
||||||
|
|
||||||
- name: Backup etcd v2 data
|
- name: Backup etcd v2 data
|
||||||
|
when: etcd_data_dir_member.stat.exists
|
||||||
command: >-
|
command: >-
|
||||||
{{ bin_dir }}/etcdctl backup
|
{{ bin_dir }}/etcdctl backup
|
||||||
--data-dir {{ etcd_data_dir }}
|
--data-dir {{ etcd_data_dir }}
|
||||||
|
@ -43,4 +50,3 @@
|
||||||
ETCDCTL_API: 3
|
ETCDCTL_API: 3
|
||||||
retries: 3
|
retries: 3
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
|
|
||||||
|
|
|
@ -30,4 +30,3 @@
|
||||||
- name: set etcd_secret_changed
|
- name: set etcd_secret_changed
|
||||||
set_fact:
|
set_fact:
|
||||||
etcd_secret_changed: true
|
etcd_secret_changed: true
|
||||||
|
|
||||||
|
|
|
@ -66,4 +66,3 @@
|
||||||
{%- set _ = certs.update({'sync': True}) -%}
|
{%- set _ = certs.update({'sync': True}) -%}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{{ certs.sync }}
|
{{ certs.sync }}
|
||||||
|
|
||||||
|
|
|
@ -73,11 +73,10 @@
|
||||||
'member-{{ node }}-key.pem',
|
'member-{{ node }}-key.pem',
|
||||||
{% endfor %}]"
|
{% endfor %}]"
|
||||||
my_master_certs: ['ca-key.pem',
|
my_master_certs: ['ca-key.pem',
|
||||||
'admin-{{ inventory_hostname }}.pem',
|
'admin-{{ inventory_hostname }}.pem',
|
||||||
'admin-{{ inventory_hostname }}-key.pem',
|
'admin-{{ inventory_hostname }}-key.pem',
|
||||||
'member-{{ inventory_hostname }}.pem',
|
'member-{{ inventory_hostname }}.pem',
|
||||||
'member-{{ inventory_hostname }}-key.pem'
|
'member-{{ inventory_hostname }}-key.pem']
|
||||||
]
|
|
||||||
all_node_certs: "['ca.pem',
|
all_node_certs: "['ca.pem',
|
||||||
{% for node in (groups['k8s-cluster'] + groups['calico-rr']|default([]))|unique %}
|
{% for node in (groups['k8s-cluster'] + groups['calico-rr']|default([]))|unique %}
|
||||||
'node-{{ node }}.pem',
|
'node-{{ node }}.pem',
|
||||||
|
@ -111,22 +110,22 @@
|
||||||
sync_certs|default(false) and inventory_hostname not in groups['etcd']
|
sync_certs|default(false) and inventory_hostname not in groups['etcd']
|
||||||
notify: set etcd_secret_changed
|
notify: set etcd_secret_changed
|
||||||
|
|
||||||
#NOTE(mattymo): Use temporary file to copy master certs because we have a ~200k
|
# NOTE(mattymo): Use temporary file to copy master certs because we have a ~200k
|
||||||
#char limit when using shell command
|
# char limit when using shell command
|
||||||
|
|
||||||
#FIXME(mattymo): Use tempfile module in ansible 2.3
|
|
||||||
- name: Gen_certs | Prepare tempfile for unpacking certs
|
|
||||||
shell: mktemp /tmp/certsXXXXX.tar.gz
|
|
||||||
register: cert_tempfile
|
|
||||||
when: inventory_hostname in groups['etcd'] and sync_certs|default(false) and
|
|
||||||
inventory_hostname != groups['etcd'][0]
|
|
||||||
|
|
||||||
- name: Gen_certs | Write master certs to tempfile
|
# FIXME(mattymo): Use tempfile module in ansible 2.3
|
||||||
copy:
|
- name: Gen_certs | Prepare tempfile for unpacking certs
|
||||||
content: "{{etcd_master_cert_data.stdout}}"
|
shell: mktemp /tmp/certsXXXXX.tar.gz
|
||||||
dest: "{{cert_tempfile.stdout}}"
|
register: cert_tempfile
|
||||||
owner: root
|
when: inventory_hostname in groups['etcd'] and sync_certs|default(false) and
|
||||||
mode: "0600"
|
inventory_hostname != groups['etcd'][0]
|
||||||
|
|
||||||
|
- name: Gen_certs | Write master certs to tempfile
|
||||||
|
copy:
|
||||||
|
content: "{{etcd_master_cert_data.stdout}}"
|
||||||
|
dest: "{{cert_tempfile.stdout}}"
|
||||||
|
owner: root
|
||||||
|
mode: "0600"
|
||||||
when: inventory_hostname in groups['etcd'] and sync_certs|default(false) and
|
when: inventory_hostname in groups['etcd'] and sync_certs|default(false) and
|
||||||
inventory_hostname != groups['etcd'][0]
|
inventory_hostname != groups['etcd'][0]
|
||||||
|
|
||||||
|
@ -162,30 +161,3 @@
|
||||||
owner: kube
|
owner: kube
|
||||||
mode: "u=rwX,g-rwx,o-rwx"
|
mode: "u=rwX,g-rwx,o-rwx"
|
||||||
recurse: yes
|
recurse: yes
|
||||||
|
|
||||||
- name: Gen_certs | target ca-certificate store file
|
|
||||||
set_fact:
|
|
||||||
ca_cert_path: |-
|
|
||||||
{% if ansible_os_family == "Debian" -%}
|
|
||||||
/usr/local/share/ca-certificates/etcd-ca.crt
|
|
||||||
{%- elif ansible_os_family == "RedHat" -%}
|
|
||||||
/etc/pki/ca-trust/source/anchors/etcd-ca.crt
|
|
||||||
{%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%}
|
|
||||||
/etc/ssl/certs/etcd-ca.pem
|
|
||||||
{%- endif %}
|
|
||||||
tags: facts
|
|
||||||
|
|
||||||
- name: Gen_certs | add CA to trusted CA dir
|
|
||||||
copy:
|
|
||||||
src: "{{ etcd_cert_dir }}/ca.pem"
|
|
||||||
dest: "{{ ca_cert_path }}"
|
|
||||||
remote_src: true
|
|
||||||
register: etcd_ca_cert
|
|
||||||
|
|
||||||
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS)
|
|
||||||
command: update-ca-certificates
|
|
||||||
when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"]
|
|
||||||
|
|
||||||
- name: Gen_certs | update ca-certificates (RedHat)
|
|
||||||
command: update-ca-trust extract
|
|
||||||
when: etcd_ca_cert.changed and ansible_os_family == "RedHat"
|
|
||||||
|
|
|
@ -7,52 +7,14 @@
|
||||||
when: inventory_hostname in etcd_node_cert_hosts
|
when: inventory_hostname in etcd_node_cert_hosts
|
||||||
tags: etcd-secrets
|
tags: etcd-secrets
|
||||||
|
|
||||||
|
|
||||||
- name: gen_certs_vault | Read in the local credentials
|
|
||||||
command: cat /etc/vault/roles/etcd/userpass
|
|
||||||
register: etcd_vault_creds_cat
|
|
||||||
delegate_to: "{{ groups['vault'][0] }}"
|
|
||||||
|
|
||||||
- name: gen_certs_vault | Set facts for read Vault Creds
|
|
||||||
set_fact:
|
|
||||||
etcd_vault_creds: "{{ etcd_vault_creds_cat.stdout|from_json }}"
|
|
||||||
delegate_to: "{{ groups['vault'][0] }}"
|
|
||||||
|
|
||||||
- name: gen_certs_vault | Log into Vault and obtain an token
|
|
||||||
uri:
|
|
||||||
url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}/v1/auth/userpass/login/{{ etcd_vault_creds.username }}"
|
|
||||||
headers:
|
|
||||||
Accept: application/json
|
|
||||||
Content-Type: application/json
|
|
||||||
method: POST
|
|
||||||
body_format: json
|
|
||||||
body:
|
|
||||||
password: "{{ etcd_vault_creds.password }}"
|
|
||||||
register: etcd_vault_login_result
|
|
||||||
delegate_to: "{{ groups['vault'][0] }}"
|
|
||||||
|
|
||||||
- name: gen_certs_vault | Set fact for vault_client_token
|
|
||||||
set_fact:
|
|
||||||
vault_client_token: "{{ etcd_vault_login_result.get('json', {}).get('auth', {}).get('client_token') }}"
|
|
||||||
run_once: true
|
|
||||||
|
|
||||||
- name: gen_certs_vault | Set fact for Vault API token
|
|
||||||
set_fact:
|
|
||||||
etcd_vault_headers:
|
|
||||||
Accept: application/json
|
|
||||||
Content-Type: application/json
|
|
||||||
X-Vault-Token: "{{ vault_client_token }}"
|
|
||||||
run_once: true
|
|
||||||
when: vault_client_token != ""
|
|
||||||
|
|
||||||
# Issue master certs to Etcd nodes
|
# Issue master certs to Etcd nodes
|
||||||
- include: ../../vault/tasks/shared/issue_cert.yml
|
- include: ../../vault/tasks/shared/issue_cert.yml
|
||||||
vars:
|
vars:
|
||||||
|
issue_cert_common_name: "etcd:master:{{ item.rsplit('/', 1)[1].rsplit('.', 1)[0] }}"
|
||||||
issue_cert_alt_names: "{{ groups.etcd + ['localhost'] }}"
|
issue_cert_alt_names: "{{ groups.etcd + ['localhost'] }}"
|
||||||
issue_cert_copy_ca: "{{ item == etcd_master_certs_needed|first }}"
|
issue_cert_copy_ca: "{{ item == etcd_master_certs_needed|first }}"
|
||||||
issue_cert_file_group: "{{ etcd_cert_group }}"
|
issue_cert_file_group: "{{ etcd_cert_group }}"
|
||||||
issue_cert_file_owner: kube
|
issue_cert_file_owner: kube
|
||||||
issue_cert_headers: "{{ etcd_vault_headers }}"
|
|
||||||
issue_cert_hosts: "{{ groups.etcd }}"
|
issue_cert_hosts: "{{ groups.etcd }}"
|
||||||
issue_cert_ip_sans: >-
|
issue_cert_ip_sans: >-
|
||||||
[
|
[
|
||||||
|
@ -67,6 +29,7 @@
|
||||||
issue_cert_path: "{{ item }}"
|
issue_cert_path: "{{ item }}"
|
||||||
issue_cert_role: etcd
|
issue_cert_role: etcd
|
||||||
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||||
|
issue_cert_mount_path: "{{ etcd_vault_mount_path }}"
|
||||||
with_items: "{{ etcd_master_certs_needed|d([]) }}"
|
with_items: "{{ etcd_master_certs_needed|d([]) }}"
|
||||||
when: inventory_hostname in groups.etcd
|
when: inventory_hostname in groups.etcd
|
||||||
notify: set etcd_secret_changed
|
notify: set etcd_secret_changed
|
||||||
|
@ -74,11 +37,11 @@
|
||||||
# Issue node certs to everyone else
|
# Issue node certs to everyone else
|
||||||
- include: ../../vault/tasks/shared/issue_cert.yml
|
- include: ../../vault/tasks/shared/issue_cert.yml
|
||||||
vars:
|
vars:
|
||||||
|
issue_cert_common_name: "etcd:node:{{ item.rsplit('/', 1)[1].rsplit('.', 1)[0] }}"
|
||||||
issue_cert_alt_names: "{{ etcd_node_cert_hosts }}"
|
issue_cert_alt_names: "{{ etcd_node_cert_hosts }}"
|
||||||
issue_cert_copy_ca: "{{ item == etcd_node_certs_needed|first }}"
|
issue_cert_copy_ca: "{{ item == etcd_node_certs_needed|first }}"
|
||||||
issue_cert_file_group: "{{ etcd_cert_group }}"
|
issue_cert_file_group: "{{ etcd_cert_group }}"
|
||||||
issue_cert_file_owner: kube
|
issue_cert_file_owner: kube
|
||||||
issue_cert_headers: "{{ etcd_vault_headers }}"
|
|
||||||
issue_cert_hosts: "{{ etcd_node_cert_hosts }}"
|
issue_cert_hosts: "{{ etcd_node_cert_hosts }}"
|
||||||
issue_cert_ip_sans: >-
|
issue_cert_ip_sans: >-
|
||||||
[
|
[
|
||||||
|
@ -93,8 +56,7 @@
|
||||||
issue_cert_path: "{{ item }}"
|
issue_cert_path: "{{ item }}"
|
||||||
issue_cert_role: etcd
|
issue_cert_role: etcd
|
||||||
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||||
|
issue_cert_mount_path: "{{ etcd_vault_mount_path }}"
|
||||||
with_items: "{{ etcd_node_certs_needed|d([]) }}"
|
with_items: "{{ etcd_node_certs_needed|d([]) }}"
|
||||||
when: inventory_hostname in etcd_node_cert_hosts
|
when: inventory_hostname in etcd_node_cert_hosts
|
||||||
notify: set etcd_secret_changed
|
notify: set etcd_secret_changed
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
#Plan A: no docker-py deps
|
# Plan A: no docker-py deps
|
||||||
- name: Install | Copy etcdctl binary from docker container
|
- name: Install | Copy etcdctl binary from docker container
|
||||||
command: sh -c "{{ docker_bin_dir }}/docker rm -f etcdctl-binarycopy;
|
command: sh -c "{{ docker_bin_dir }}/docker rm -f etcdctl-binarycopy;
|
||||||
{{ docker_bin_dir }}/docker create --name etcdctl-binarycopy {{ etcd_image_repo }}:{{ etcd_image_tag }} &&
|
{{ docker_bin_dir }}/docker create --name etcdctl-binarycopy {{ etcd_image_repo }}:{{ etcd_image_tag }} &&
|
||||||
|
@ -11,22 +11,3 @@
|
||||||
retries: 4
|
retries: 4
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
#Plan B: looks nicer, but requires docker-py on all hosts:
|
|
||||||
#- name: Install | Set up etcd-binarycopy container
|
|
||||||
# docker:
|
|
||||||
# name: etcd-binarycopy
|
|
||||||
# state: present
|
|
||||||
# image: "{{ etcd_image_repo }}:{{ etcd_image_tag }}"
|
|
||||||
# when: etcd_deployment_type == "docker"
|
|
||||||
#
|
|
||||||
#- name: Install | Copy etcdctl from etcd-binarycopy container
|
|
||||||
# command: /usr/bin/docker cp "etcd-binarycopy:{{ etcd_container_bin_dir }}etcdctl" "{{ bin_dir }}/etcdctl"
|
|
||||||
# when: etcd_deployment_type == "docker"
|
|
||||||
#
|
|
||||||
#- name: Install | Clean up etcd-binarycopy container
|
|
||||||
# docker:
|
|
||||||
# name: etcd-binarycopy
|
|
||||||
# state: absent
|
|
||||||
# image: "{{ etcd_image_repo }}:{{ etcd_image_tag }}"
|
|
||||||
# when: etcd_deployment_type == "docker"
|
|
||||||
|
|
|
@ -1,8 +1,4 @@
|
||||||
---
|
---
|
||||||
- include: pre_upgrade.yml
|
|
||||||
when: etcd_cluster_setup
|
|
||||||
tags: etcd-pre-upgrade
|
|
||||||
|
|
||||||
- include: check_certs.yml
|
- include: check_certs.yml
|
||||||
when: cert_management == "script"
|
when: cert_management == "script"
|
||||||
tags: [etcd-secrets, facts]
|
tags: [etcd-secrets, facts]
|
||||||
|
@ -10,6 +6,14 @@
|
||||||
- include: "gen_certs_{{ cert_management }}.yml"
|
- include: "gen_certs_{{ cert_management }}.yml"
|
||||||
tags: etcd-secrets
|
tags: etcd-secrets
|
||||||
|
|
||||||
|
- include: upd_ca_trust.yml
|
||||||
|
tags: etcd-secrets
|
||||||
|
|
||||||
|
- name: "Gen_certs | Get etcd certificate serials"
|
||||||
|
shell: "openssl x509 -in {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem -noout -serial | cut -d= -f2"
|
||||||
|
register: "node-{{ inventory_hostname }}_serial"
|
||||||
|
when: inventory_hostname in groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique|sort
|
||||||
|
|
||||||
- include: "install_{{ etcd_deployment_type }}.yml"
|
- include: "install_{{ etcd_deployment_type }}.yml"
|
||||||
when: is_etcd_master
|
when: is_etcd_master
|
||||||
tags: upgrade
|
tags: upgrade
|
||||||
|
|
|
@ -1,59 +0,0 @@
|
||||||
- name: "Pre-upgrade | check for etcd-proxy unit file"
|
|
||||||
stat:
|
|
||||||
path: /etc/systemd/system/etcd-proxy.service
|
|
||||||
register: etcd_proxy_service_file
|
|
||||||
tags: facts
|
|
||||||
|
|
||||||
- name: "Pre-upgrade | check for etcd-proxy init script"
|
|
||||||
stat:
|
|
||||||
path: /etc/init.d/etcd-proxy
|
|
||||||
register: etcd_proxy_init_script
|
|
||||||
tags: facts
|
|
||||||
|
|
||||||
- name: "Pre-upgrade | stop etcd-proxy if service defined"
|
|
||||||
service:
|
|
||||||
name: etcd-proxy
|
|
||||||
state: stopped
|
|
||||||
when: (etcd_proxy_service_file.stat.exists|default(False) or etcd_proxy_init_script.stat.exists|default(False))
|
|
||||||
|
|
||||||
- name: "Pre-upgrade | remove etcd-proxy service definition"
|
|
||||||
file:
|
|
||||||
path: "{{ item }}"
|
|
||||||
state: absent
|
|
||||||
when: (etcd_proxy_service_file.stat.exists|default(False) or etcd_proxy_init_script.stat.exists|default(False))
|
|
||||||
with_items:
|
|
||||||
- /etc/systemd/system/etcd-proxy.service
|
|
||||||
- /etc/init.d/etcd-proxy
|
|
||||||
|
|
||||||
- name: "Pre-upgrade | find etcd-proxy container"
|
|
||||||
command: "{{ docker_bin_dir }}/docker ps -aq --filter 'name=etcd-proxy*'"
|
|
||||||
register: etcd_proxy_container
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: "Pre-upgrade | remove etcd-proxy if it exists"
|
|
||||||
command: "{{ docker_bin_dir }}/docker rm -f {{item}}"
|
|
||||||
with_items: "{{etcd_proxy_container.stdout_lines}}"
|
|
||||||
|
|
||||||
- name: "Pre-upgrade | see if etcdctl is installed"
|
|
||||||
stat:
|
|
||||||
path: "{{ bin_dir }}/etcdctl"
|
|
||||||
register: etcdctl_installed
|
|
||||||
|
|
||||||
- name: "Pre-upgrade | check if member list is non-SSL"
|
|
||||||
command: "{{ bin_dir }}/etcdctl --no-sync --peers={{ etcd_access_addresses | regex_replace('https','http') }} member list"
|
|
||||||
register: etcd_member_list
|
|
||||||
retries: 10
|
|
||||||
delay: 3
|
|
||||||
until: etcd_member_list.rc != 2
|
|
||||||
run_once: true
|
|
||||||
when: etcdctl_installed.stat.exists
|
|
||||||
changed_when: false
|
|
||||||
failed_when: false
|
|
||||||
|
|
||||||
- name: "Pre-upgrade | change peer names to SSL"
|
|
||||||
shell: >-
|
|
||||||
{{ bin_dir }}/etcdctl --no-sync --peers={{ etcd_access_addresses | regex_replace('https','http') }} member list |
|
|
||||||
awk -F"[: =]" '{print "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses | regex_replace('https','http') }} member update "$1" https:"$7":"$8}' | bash
|
|
||||||
run_once: true
|
|
||||||
when: 'etcdctl_installed.stat.exists and etcd_member_list.rc == 0 and "http://" in etcd_member_list.stdout'
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
- name: Refresh config | Create etcd config file
|
- name: Refresh config | Create etcd config file
|
||||||
template:
|
template:
|
||||||
src: etcd.env.yml
|
src: etcd.env.j2
|
||||||
dest: /etc/etcd.env
|
dest: /etc/etcd.env
|
||||||
notify: restart etcd
|
notify: restart etcd
|
||||||
when: is_etcd_master
|
when: is_etcd_master
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
- name: sync_etcd_master_certs | Create list of master certs needing creation
|
- name: sync_etcd_master_certs | Create list of master certs needing creation
|
||||||
set_fact:
|
set_fact:
|
||||||
etcd_master_cert_list: >-
|
etcd_master_cert_list: >-
|
||||||
{{ etcd_master_cert_list|default([]) + [
|
{{ etcd_master_cert_list|default([]) + [
|
||||||
"admin-" + item + ".pem",
|
"admin-" + item + ".pem",
|
||||||
|
@ -11,7 +11,7 @@
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- include: ../../vault/tasks/shared/sync_file.yml
|
- include: ../../vault/tasks/shared/sync_file.yml
|
||||||
vars:
|
vars:
|
||||||
sync_file: "{{ item }}"
|
sync_file: "{{ item }}"
|
||||||
sync_file_dir: "{{ etcd_cert_dir }}"
|
sync_file_dir: "{{ etcd_cert_dir }}"
|
||||||
sync_file_hosts: "{{ groups.etcd }}"
|
sync_file_hosts: "{{ groups.etcd }}"
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
- name: sync_etcd_node_certs | Create list of node certs needing creation
|
- name: sync_etcd_node_certs | Create list of node certs needing creation
|
||||||
set_fact:
|
set_fact:
|
||||||
etcd_node_cert_list: "{{ etcd_node_cert_list|default([]) + ['node-' + item + '.pem'] }}"
|
etcd_node_cert_list: "{{ etcd_node_cert_list|default([]) + ['node-' + item + '.pem'] }}"
|
||||||
with_items: "{{ etcd_node_cert_hosts }}"
|
with_items: "{{ etcd_node_cert_hosts }}"
|
||||||
|
|
||||||
- include: ../../vault/tasks/shared/sync_file.yml
|
- include: ../../vault/tasks/shared/sync_file.yml
|
||||||
vars:
|
vars:
|
||||||
sync_file: "{{ item }}"
|
sync_file: "{{ item }}"
|
||||||
sync_file_dir: "{{ etcd_cert_dir }}"
|
sync_file_dir: "{{ etcd_cert_dir }}"
|
||||||
sync_file_hosts: "{{ etcd_node_cert_hosts }}"
|
sync_file_hosts: "{{ etcd_node_cert_hosts }}"
|
||||||
|
@ -24,7 +24,7 @@
|
||||||
sync_file_results: []
|
sync_file_results: []
|
||||||
|
|
||||||
- include: ../../vault/tasks/shared/sync_file.yml
|
- include: ../../vault/tasks/shared/sync_file.yml
|
||||||
vars:
|
vars:
|
||||||
sync_file: ca.pem
|
sync_file: ca.pem
|
||||||
sync_file_dir: "{{ etcd_cert_dir }}"
|
sync_file_dir: "{{ etcd_cert_dir }}"
|
||||||
sync_file_hosts: "{{ etcd_node_cert_hosts }}"
|
sync_file_hosts: "{{ etcd_node_cert_hosts }}"
|
||||||
|
|
27
roles/etcd/tasks/upd_ca_trust.yml
Normal file
27
roles/etcd/tasks/upd_ca_trust.yml
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
---
|
||||||
|
- name: Gen_certs | target ca-certificate store file
|
||||||
|
set_fact:
|
||||||
|
ca_cert_path: |-
|
||||||
|
{% if ansible_os_family == "Debian" -%}
|
||||||
|
/usr/local/share/ca-certificates/etcd-ca.crt
|
||||||
|
{%- elif ansible_os_family == "RedHat" -%}
|
||||||
|
/etc/pki/ca-trust/source/anchors/etcd-ca.crt
|
||||||
|
{%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%}
|
||||||
|
/etc/ssl/certs/etcd-ca.pem
|
||||||
|
{%- endif %}
|
||||||
|
tags: facts
|
||||||
|
|
||||||
|
- name: Gen_certs | add CA to trusted CA dir
|
||||||
|
copy:
|
||||||
|
src: "{{ etcd_cert_dir }}/ca.pem"
|
||||||
|
dest: "{{ ca_cert_path }}"
|
||||||
|
remote_src: true
|
||||||
|
register: etcd_ca_cert
|
||||||
|
|
||||||
|
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS)
|
||||||
|
command: update-ca-certificates
|
||||||
|
when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"]
|
||||||
|
|
||||||
|
- name: Gen_certs | update ca-certificates (RedHat)
|
||||||
|
command: update-ca-trust extract
|
||||||
|
when: etcd_ca_cert.changed and ansible_os_family == "RedHat"
|
|
@ -1,9 +1,8 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
elrepo_key_url: 'https://www.elrepo.org/RPM-GPG-KEY-elrepo.org'
|
elrepo_key_url: 'https://www.elrepo.org/RPM-GPG-KEY-elrepo.org'
|
||||||
elrepo_rpm : elrepo-release-7.0-3.el7.elrepo.noarch.rpm
|
elrepo_rpm: elrepo-release-7.0-3.el7.elrepo.noarch.rpm
|
||||||
elrepo_mirror : http://www.elrepo.org
|
elrepo_mirror: http://www.elrepo.org
|
||||||
|
|
||||||
elrepo_url : '{{elrepo_mirror}}/{{elrepo_rpm}}'
|
elrepo_url: '{{elrepo_mirror}}/{{elrepo_rpm}}'
|
||||||
|
|
||||||
elrepo_kernel_package: "kernel-lt"
|
elrepo_kernel_package: "kernel-lt"
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
|
---
|
||||||
# Versions
|
# Versions
|
||||||
kubedns_version : 1.14.2
|
kubedns_version: 1.14.2
|
||||||
kubednsautoscaler_version: 1.1.1
|
kubednsautoscaler_version: 1.1.1
|
||||||
|
|
||||||
# Limits for dnsmasq/kubedns apps
|
# Limits for dnsmasq/kubedns apps
|
||||||
|
@ -37,6 +38,17 @@ netchecker_server_memory_limit: 256M
|
||||||
netchecker_server_cpu_requests: 50m
|
netchecker_server_cpu_requests: 50m
|
||||||
netchecker_server_memory_requests: 64M
|
netchecker_server_memory_requests: 64M
|
||||||
|
|
||||||
|
# Dashboard
|
||||||
|
dashboard_enabled: false
|
||||||
|
dashboard_image_repo: kubernetesdashboarddev/kubernetes-dashboard-amd64
|
||||||
|
dashboard_image_tag: head
|
||||||
|
|
||||||
|
# Limits for dashboard
|
||||||
|
dashboard_cpu_limit: 100m
|
||||||
|
dashboard_memory_limit: 256M
|
||||||
|
dashboard_cpu_requests: 50m
|
||||||
|
dashboard_memory_requests: 64M
|
||||||
|
|
||||||
# SSL
|
# SSL
|
||||||
etcd_cert_dir: "/etc/ssl/etcd/ssl"
|
etcd_cert_dir: "/etc/ssl/etcd/ssl"
|
||||||
canal_cert_dir: "/etc/canal/certs"
|
canal_cert_dir: "/etc/canal/certs"
|
||||||
|
|
20
roles/kubernetes-apps/ansible/tasks/dashboard.yml
Normal file
20
roles/kubernetes-apps/ansible/tasks/dashboard.yml
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
---
|
||||||
|
- name: Kubernetes Apps | Lay down dashboard template
|
||||||
|
template:
|
||||||
|
src: "{{item.file}}"
|
||||||
|
dest: "{{kube_config_dir}}/{{item.file}}"
|
||||||
|
with_items:
|
||||||
|
- {file: dashboard.yml.j2, type: deploy, name: netchecker-agent}
|
||||||
|
register: manifests
|
||||||
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
- name: Kubernetes Apps | Start dashboard
|
||||||
|
kube:
|
||||||
|
name: "{{item.item.name}}"
|
||||||
|
namespace: "{{system_namespace}}"
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
resource: "{{item.item.type}}"
|
||||||
|
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||||
|
state: "latest"
|
||||||
|
with_items: "{{ manifests.results }}"
|
||||||
|
when: inventory_hostname == groups['kube-master'][0]
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
- name: Kubernetes Apps | Wait for kube-apiserver
|
- name: Kubernetes Apps | Wait for kube-apiserver
|
||||||
uri:
|
uri:
|
||||||
url: http://localhost:{{ kube_apiserver_insecure_port }}/healthz
|
url: "{{ kube_apiserver_insecure_endpoint }}/healthz"
|
||||||
register: result
|
register: result
|
||||||
until: result.status == 200
|
until: result.status == 200
|
||||||
retries: 10
|
retries: 10
|
||||||
|
@ -14,12 +14,12 @@
|
||||||
dest: "{{kube_config_dir}}/{{item.file}}"
|
dest: "{{kube_config_dir}}/{{item.file}}"
|
||||||
with_items:
|
with_items:
|
||||||
- {name: kubedns, file: kubedns-sa.yml, type: sa}
|
- {name: kubedns, file: kubedns-sa.yml, type: sa}
|
||||||
- {name: kubedns, file: kubedns-deploy.yml, type: deployment}
|
- {name: kubedns, file: kubedns-deploy.yml.j2, type: deployment}
|
||||||
- {name: kubedns, file: kubedns-svc.yml, type: svc}
|
- {name: kubedns, file: kubedns-svc.yml, type: svc}
|
||||||
- {name: kubedns-autoscaler, file: kubedns-autoscaler-sa.yml, type: sa}
|
- {name: kubedns-autoscaler, file: kubedns-autoscaler-sa.yml, type: sa}
|
||||||
- {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrole.yml, type: clusterrole}
|
- {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrole.yml, type: clusterrole}
|
||||||
- {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding}
|
- {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding}
|
||||||
- {name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment}
|
- {name: kubedns-autoscaler, file: kubedns-autoscaler.yml.j2, type: deployment}
|
||||||
register: manifests
|
register: manifests
|
||||||
when:
|
when:
|
||||||
- dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
|
- dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
|
||||||
|
@ -51,13 +51,20 @@
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{item.item.type}}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||||
state: "{{item.changed | ternary('latest','present') }}"
|
state: "latest"
|
||||||
with_items: "{{ manifests.results }}"
|
with_items: "{{ manifests.results }}"
|
||||||
failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg
|
when:
|
||||||
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
|
- dns_mode != 'none'
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
- not item|skipped
|
||||||
tags: dnsmasq
|
tags: dnsmasq
|
||||||
|
|
||||||
- name: Kubernetes Apps | Netchecker
|
- name: Kubernetes Apps | Netchecker
|
||||||
include: tasks/netchecker.yml
|
include: tasks/netchecker.yml
|
||||||
when: deploy_netchecker
|
when: deploy_netchecker
|
||||||
tags: netchecker
|
tags: netchecker
|
||||||
|
|
||||||
|
- name: Kubernetes Apps | Dashboard
|
||||||
|
include: tasks/dashboard.yml
|
||||||
|
when: dashboard_enabled
|
||||||
|
tags: dashboard
|
||||||
|
|
|
@ -1,3 +1,21 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
- name: Kubernetes Apps | Check if netchecker-server manifest already exists
|
||||||
|
stat:
|
||||||
|
path: "{{ kube_config_dir }}/netchecker-server-deployment.yml.j2"
|
||||||
|
register: netchecker_server_manifest
|
||||||
|
tags: ['facts', 'upgrade']
|
||||||
|
|
||||||
|
- name: Kubernetes Apps | Apply netchecker-server manifest to update annotations
|
||||||
|
kube:
|
||||||
|
name: "netchecker-server"
|
||||||
|
namespace: "{{ netcheck_namespace }}"
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
resource: "deploy"
|
||||||
|
state: latest
|
||||||
|
when: inventory_hostname == groups['kube-master'][0] and netchecker_server_manifest.stat.exists
|
||||||
|
tags: upgrade
|
||||||
|
|
||||||
- name: Kubernetes Apps | Lay Down Netchecker Template
|
- name: Kubernetes Apps | Lay Down Netchecker Template
|
||||||
template:
|
template:
|
||||||
src: "{{item.file}}"
|
src: "{{item.file}}"
|
||||||
|
@ -24,18 +42,6 @@
|
||||||
state: absent
|
state: absent
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
#FIXME: remove if kubernetes/features#124 is implemented
|
|
||||||
- name: Kubernetes Apps | Purge old Netchecker daemonsets
|
|
||||||
kube:
|
|
||||||
name: "{{item.item.name}}"
|
|
||||||
namespace: "{{netcheck_namespace}}"
|
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
|
||||||
resource: "{{item.item.type}}"
|
|
||||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
|
||||||
state: absent
|
|
||||||
with_items: "{{ manifests.results }}"
|
|
||||||
when: inventory_hostname == groups['kube-master'][0] and item.item.type == "ds" and item.changed
|
|
||||||
|
|
||||||
- name: Kubernetes Apps | Start Netchecker Resources
|
- name: Kubernetes Apps | Start Netchecker Resources
|
||||||
kube:
|
kube:
|
||||||
name: "{{item.item.name}}"
|
name: "{{item.item.name}}"
|
||||||
|
@ -43,7 +49,6 @@
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{item.item.type}}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||||
state: "{{item.changed | ternary('latest','present') }}"
|
state: "latest"
|
||||||
with_items: "{{ manifests.results }}"
|
with_items: "{{ manifests.results }}"
|
||||||
failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg
|
when: inventory_hostname == groups['kube-master'][0] and not item|skipped
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
|
||||||
|
|
110
roles/kubernetes-apps/ansible/templates/dashboard.yml.j2
Normal file
110
roles/kubernetes-apps/ansible/templates/dashboard.yml.j2
Normal file
|
@ -0,0 +1,110 @@
|
||||||
|
# Copyright 2015 Google Inc. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# Configuration to deploy head version of the Dashboard UI compatible with
|
||||||
|
# Kubernetes 1.6 (RBAC enabled).
|
||||||
|
#
|
||||||
|
# Example usage: kubectl create -f <this_file>
|
||||||
|
|
||||||
|
{% if rbac_enabled %}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: kubernetes-dashboard
|
||||||
|
name: kubernetes-dashboard
|
||||||
|
namespace: {{ system_namespace }}
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: kubernetes-dashboard
|
||||||
|
labels:
|
||||||
|
k8s-app: kubernetes-dashboard
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: cluster-admin
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: kubernetes-dashboard
|
||||||
|
namespace: {{ system_namespace }}
|
||||||
|
{% endif %}
|
||||||
|
---
|
||||||
|
kind: Deployment
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: kubernetes-dashboard
|
||||||
|
name: kubernetes-dashboard
|
||||||
|
namespace: {{ system_namespace }}
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
revisionHistoryLimit: 10
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
k8s-app: kubernetes-dashboard
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: kubernetes-dashboard
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: kubernetes-dashboard
|
||||||
|
image: {{ dashboard_image_repo }}:{{ dashboard_image_tag }}
|
||||||
|
# Image is tagged and updated with :head, so always pull it.
|
||||||
|
imagePullPolicy: Always
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: {{ dashboard_cpu_limit }}
|
||||||
|
memory: {{ dashboard_memory_limit }}
|
||||||
|
requests:
|
||||||
|
cpu: {{ dashboard_cpu_requests }}
|
||||||
|
memory: {{ dashboard_memory_requests }}
|
||||||
|
ports:
|
||||||
|
- containerPort: 9090
|
||||||
|
protocol: TCP
|
||||||
|
args:
|
||||||
|
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||||
|
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||||
|
# to it. Uncomment only if the default does not work.
|
||||||
|
# - --apiserver-host=http://my-address:port
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /
|
||||||
|
port: 9090
|
||||||
|
initialDelaySeconds: 30
|
||||||
|
timeoutSeconds: 30
|
||||||
|
{% if rbac_enabled %}
|
||||||
|
serviceAccountName: kubernetes-dashboard
|
||||||
|
{% endif %}
|
||||||
|
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||||
|
tolerations:
|
||||||
|
- key: node-role.kubernetes.io/master
|
||||||
|
effect: NoSchedule
|
||||||
|
---
|
||||||
|
kind: Service
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: kubernetes-dashboard
|
||||||
|
name: kubernetes-dashboard
|
||||||
|
namespace: {{ system_namespace }}
|
||||||
|
spec:
|
||||||
|
ports:
|
||||||
|
- port: 80
|
||||||
|
targetPort: 9090
|
||||||
|
selector:
|
||||||
|
k8s-app: kubernetes-dashboard
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
# Copyright 2016 The Kubernetes Authors. All rights reserved
|
# Copyright 2016 The Kubernetes Authors. All rights reserved
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
# Copyright 2016 The Kubernetes Authors. All rights reserved
|
# Copyright 2016 The Kubernetes Authors. All rights reserved
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
# Copyright 2016 The Kubernetes Authors. All rights reserved
|
# Copyright 2016 The Kubernetes Authors. All rights reserved
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
# Copyright 2016 The Kubernetes Authors.
|
# Copyright 2016 The Kubernetes Authors.
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
@ -26,26 +27,26 @@ spec:
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kubedns-autoscaler
|
k8s-app: kubedns-autoscaler
|
||||||
annotations:
|
|
||||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
|
||||||
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
|
|
||||||
spec:
|
spec:
|
||||||
|
tolerations:
|
||||||
|
- effect: NoSchedule
|
||||||
|
operator: Exists
|
||||||
containers:
|
containers:
|
||||||
- name: autoscaler
|
- name: autoscaler
|
||||||
image: "{{ kubednsautoscaler_image_repo }}:{{ kubednsautoscaler_image_tag }}"
|
image: "{{ kubednsautoscaler_image_repo }}:{{ kubednsautoscaler_image_tag }}"
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: "20m"
|
cpu: "20m"
|
||||||
memory: "10Mi"
|
memory: "10Mi"
|
||||||
command:
|
command:
|
||||||
- /cluster-proportional-autoscaler
|
- /cluster-proportional-autoscaler
|
||||||
- --namespace={{ system_namespace }}
|
- --namespace={{ system_namespace }}
|
||||||
- --configmap=kubedns-autoscaler
|
- --configmap=kubedns-autoscaler
|
||||||
# Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
|
# Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
|
||||||
- --target=Deployment/kube-dns
|
- --target=Deployment/kube-dns
|
||||||
- --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}}
|
- --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}}
|
||||||
- --logtostderr=true
|
- --logtostderr=true
|
||||||
- --v=2
|
- --v=2
|
||||||
{% if rbac_enabled %}
|
{% if rbac_enabled %}
|
||||||
serviceAccountName: cluster-proportional-autoscaler
|
serviceAccountName: cluster-proportional-autoscaler
|
||||||
{% endif %}
|
{% endif %}
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
apiVersion: extensions/v1beta1
|
apiVersion: extensions/v1beta1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -29,6 +30,8 @@ spec:
|
||||||
tolerations:
|
tolerations:
|
||||||
- key: "CriticalAddonsOnly"
|
- key: "CriticalAddonsOnly"
|
||||||
operator: "Exists"
|
operator: "Exists"
|
||||||
|
- effect: NoSchedule
|
||||||
|
operator: Exists
|
||||||
volumes:
|
volumes:
|
||||||
- name: kube-dns-config
|
- name: kube-dns-config
|
||||||
configMap:
|
configMap:
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -19,4 +20,3 @@ spec:
|
||||||
- name: dns-tcp
|
- name: dns-tcp
|
||||||
port: 53
|
port: 53
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,9 @@ spec:
|
||||||
labels:
|
labels:
|
||||||
app: netchecker-agent
|
app: netchecker-agent
|
||||||
spec:
|
spec:
|
||||||
|
tolerations:
|
||||||
|
- effect: NoSchedule
|
||||||
|
operator: Exists
|
||||||
containers:
|
containers:
|
||||||
- name: netchecker-agent
|
- name: netchecker-agent
|
||||||
image: "{{ agent_img }}"
|
image: "{{ agent_img }}"
|
||||||
|
@ -37,3 +40,8 @@ spec:
|
||||||
requests:
|
requests:
|
||||||
cpu: {{ netchecker_agent_cpu_requests }}
|
cpu: {{ netchecker_agent_cpu_requests }}
|
||||||
memory: {{ netchecker_agent_memory_requests }}
|
memory: {{ netchecker_agent_memory_requests }}
|
||||||
|
updateStrategy:
|
||||||
|
rollingUpdate:
|
||||||
|
maxUnavailable: 1
|
||||||
|
type: RollingUpdate
|
||||||
|
|
||||||
|
|
|
@ -16,6 +16,9 @@ spec:
|
||||||
{% if kube_version | version_compare('v1.6', '>=') %}
|
{% if kube_version | version_compare('v1.6', '>=') %}
|
||||||
dnsPolicy: ClusterFirstWithHostNet
|
dnsPolicy: ClusterFirstWithHostNet
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
tolerations:
|
||||||
|
- effect: NoSchedule
|
||||||
|
operator: Exists
|
||||||
containers:
|
containers:
|
||||||
- name: netchecker-agent
|
- name: netchecker-agent
|
||||||
image: "{{ agent_img }}"
|
image: "{{ agent_img }}"
|
||||||
|
@ -41,3 +44,7 @@ spec:
|
||||||
requests:
|
requests:
|
||||||
cpu: {{ netchecker_agent_cpu_requests }}
|
cpu: {{ netchecker_agent_cpu_requests }}
|
||||||
memory: {{ netchecker_agent_memory_requests }}
|
memory: {{ netchecker_agent_memory_requests }}
|
||||||
|
updateStrategy:
|
||||||
|
rollingUpdate:
|
||||||
|
maxUnavailable: 1
|
||||||
|
type: RollingUpdate
|
||||||
|
|
|
@ -25,12 +25,14 @@ spec:
|
||||||
memory: {{ netchecker_server_memory_requests }}
|
memory: {{ netchecker_server_memory_requests }}
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 8081
|
- containerPort: 8081
|
||||||
hostPort: 8081
|
|
||||||
args:
|
args:
|
||||||
- "-v=5"
|
- "-v=5"
|
||||||
- "-logtostderr"
|
- "-logtostderr"
|
||||||
- "-kubeproxyinit"
|
- "-kubeproxyinit"
|
||||||
- "-endpoint=0.0.0.0:8081"
|
- "-endpoint=0.0.0.0:8081"
|
||||||
|
tolerations:
|
||||||
|
- effect: NoSchedule
|
||||||
|
operator: Exists
|
||||||
{% if rbac_enabled %}
|
{% if rbac_enabled %}
|
||||||
serviceAccountName: netchecker-server
|
serviceAccountName: netchecker-server
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
elasticsearch_cpu_limit: 1000m
|
elasticsearch_cpu_limit: 1000m
|
||||||
elasticsearch_mem_limit: 0M
|
elasticsearch_mem_limit: 0M
|
||||||
elasticsearch_cpu_requests: 100m
|
elasticsearch_cpu_requests: 100m
|
||||||
elasticsearch_mem_requests: 0M
|
elasticsearch_mem_requests: 0M
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
dependencies:
|
dependencies:
|
||||||
- role: download
|
- role: download
|
||||||
file: "{{ downloads.elasticsearch }}"
|
file: "{{ downloads.elasticsearch }}"
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
when: rbac_enabled
|
when: rbac_enabled
|
||||||
|
|
||||||
- name: "ElasticSearch | Create Serviceaccount and Clusterrolebinding (RBAC)"
|
- name: "ElasticSearch | Create Serviceaccount and Clusterrolebinding (RBAC)"
|
||||||
command: "kubectl apply -f {{ kube_config_dir }}/{{ item }} -n {{ system_namespace }}"
|
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }} -n {{ system_namespace }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "efk-sa.yml"
|
- "efk-sa.yml"
|
||||||
- "efk-clusterrolebinding.yml"
|
- "efk-clusterrolebinding.yml"
|
||||||
|
@ -38,4 +38,3 @@
|
||||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-service.yaml -n {{ system_namespace }}"
|
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-service.yaml -n {{ system_namespace }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
when: es_service_manifest.changed
|
when: es_service_manifest.changed
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
metadata:
|
metadata:
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
fluentd_cpu_limit: 0m
|
fluentd_cpu_limit: 0m
|
||||||
fluentd_mem_limit: 200Mi
|
fluentd_mem_limit: 200Mi
|
||||||
fluentd_cpu_requests: 100m
|
fluentd_cpu_requests: 100m
|
||||||
fluentd_mem_requests: 200Mi
|
fluentd_mem_requests: 200Mi
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
dependencies:
|
dependencies:
|
||||||
- role: download
|
- role: download
|
||||||
file: "{{ downloads.fluentd }}"
|
file: "{{ downloads.fluentd }}"
|
||||||
|
|
|
@ -20,4 +20,3 @@
|
||||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/fluentd-ds.yaml -n {{ system_namespace }}"
|
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/fluentd-ds.yaml -n {{ system_namespace }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
when: fluentd_ds_manifest.changed
|
when: fluentd_ds_manifest.changed
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,9 @@ spec:
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
version: "v{{ fluentd_version }}"
|
version: "v{{ fluentd_version }}"
|
||||||
spec:
|
spec:
|
||||||
|
tolerations:
|
||||||
|
- effect: NoSchedule
|
||||||
|
operator: Exists
|
||||||
containers:
|
containers:
|
||||||
- name: fluentd-es
|
- name: fluentd-es
|
||||||
image: "{{ fluentd_image_repo }}:{{ fluentd_image_tag }}"
|
image: "{{ fluentd_image_repo }}:{{ fluentd_image_tag }}"
|
||||||
|
@ -55,4 +58,3 @@ spec:
|
||||||
{% if rbac_enabled %}
|
{% if rbac_enabled %}
|
||||||
serviceAccountName: efk
|
serviceAccountName: efk
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
kibana_cpu_limit: 100m
|
kibana_cpu_limit: 100m
|
||||||
kibana_mem_limit: 0M
|
kibana_mem_limit: 0M
|
||||||
kibana_cpu_requests: 100m
|
kibana_cpu_requests: 100m
|
||||||
kibana_mem_requests: 0M
|
kibana_mem_requests: 0M
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
dependencies:
|
dependencies:
|
||||||
- role: download
|
- role: download
|
||||||
file: "{{ downloads.kibana }}"
|
file: "{{ downloads.kibana }}"
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
---
|
---
|
||||||
- name: "Kibana | Write Kibana deployment"
|
- name: "Kibana | Write Kibana deployment"
|
||||||
template:
|
template:
|
||||||
src: kibana-deployment.yml.j2
|
src: kibana-deployment.yml.j2
|
||||||
dest: "{{ kube_config_dir }}/kibana-deployment.yaml"
|
dest: "{{ kube_config_dir }}/kibana-deployment.yaml"
|
||||||
register: kibana_deployment_manifest
|
register: kibana_deployment_manifest
|
||||||
|
@ -12,12 +12,12 @@
|
||||||
name: "kibana-logging"
|
name: "kibana-logging"
|
||||||
namespace: "{{system_namespace}}"
|
namespace: "{{system_namespace}}"
|
||||||
resource: "deployment"
|
resource: "deployment"
|
||||||
state: "{{ item | ternary('latest','present') }}"
|
state: "latest"
|
||||||
with_items: "{{ kibana_deployment_manifest.changed }}"
|
with_items: "{{ kibana_deployment_manifest.changed }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- name: "Kibana | Write Kibana service "
|
- name: "Kibana | Write Kibana service "
|
||||||
template:
|
template:
|
||||||
src: kibana-service.yml.j2
|
src: kibana-service.yml.j2
|
||||||
dest: "{{ kube_config_dir }}/kibana-service.yaml"
|
dest: "{{ kube_config_dir }}/kibana-service.yaml"
|
||||||
register: kibana_service_manifest
|
register: kibana_service_manifest
|
||||||
|
@ -29,6 +29,6 @@
|
||||||
name: "kibana-logging"
|
name: "kibana-logging"
|
||||||
namespace: "{{system_namespace}}"
|
namespace: "{{system_namespace}}"
|
||||||
resource: "svc"
|
resource: "svc"
|
||||||
state: "{{ item | ternary('latest','present') }}"
|
state: "latest"
|
||||||
with_items: "{{ kibana_service_manifest.changed }}"
|
with_items: "{{ kibana_service_manifest.changed }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
dependencies:
|
dependencies:
|
||||||
- role: kubernetes-apps/efk/elasticsearch
|
- role: kubernetes-apps/efk/elasticsearch
|
||||||
- role: kubernetes-apps/efk/fluentd
|
- role: kubernetes-apps/efk/fluentd
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
helm_enabled: false
|
helm_enabled: false
|
||||||
|
|
||||||
# specify a dir and attach it to helm for HELM_HOME.
|
# specify a dir and attach it to helm for HELM_HOME.
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
dependencies:
|
dependencies:
|
||||||
- role: download
|
- role: download
|
||||||
file: "{{ downloads.helm }}"
|
file: "{{ downloads.helm }}"
|
||||||
|
|
|
@ -27,9 +27,8 @@
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{item.item.type}}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||||
state: "{{item.changed | ternary('latest','present') }}"
|
state: "latest"
|
||||||
with_items: "{{ manifests.results }}"
|
with_items: "{{ manifests.results }}"
|
||||||
failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg
|
|
||||||
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled
|
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled
|
||||||
|
|
||||||
- name: Helm | Install/upgrade helm
|
- name: Helm | Install/upgrade helm
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
metadata:
|
metadata:
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
dependencies:
|
dependencies:
|
||||||
- role: download
|
- role: download
|
||||||
file: "{{ downloads.netcheck_server }}"
|
file: "{{ downloads.netcheck_server }}"
|
||||||
|
|
11
roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
Normal file
11
roles/kubernetes-apps/network_plugin/calico/tasks/main.yml
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
---
|
||||||
|
- name: Start Calico resources
|
||||||
|
kube:
|
||||||
|
name: "{{item.item.name}}"
|
||||||
|
namespace: "{{ system_namespace }}"
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
resource: "{{item.item.type}}"
|
||||||
|
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||||
|
state: "latest"
|
||||||
|
with_items: "{{ calico_node_manifests.results }}"
|
||||||
|
when: inventory_hostname == groups['kube-master'][0] and not item|skipped
|
|
@ -1,32 +1,11 @@
|
||||||
- name: Create canal ConfigMap
|
---
|
||||||
run_once: true
|
- name: Canal | Start Resources
|
||||||
kube:
|
kube:
|
||||||
name: "canal-config"
|
name: "{{item.item.name}}"
|
||||||
|
namespace: "{{ system_namespace }}"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
filename: "{{kube_config_dir}}/canal-config.yaml"
|
resource: "{{item.item.type}}"
|
||||||
resource: "configmap"
|
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||||
namespace: "{{system_namespace}}"
|
state: "latest"
|
||||||
|
with_items: "{{ canal_manifests.results }}"
|
||||||
#FIXME: remove if kubernetes/features#124 is implemented
|
when: inventory_hostname == groups['kube-master'][0] and not item|skipped
|
||||||
- name: Purge old flannel and canal-node
|
|
||||||
run_once: true
|
|
||||||
kube:
|
|
||||||
name: "canal-node"
|
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
|
||||||
filename: "{{ kube_config_dir }}/canal-node.yaml"
|
|
||||||
resource: "ds"
|
|
||||||
namespace: "{{system_namespace}}"
|
|
||||||
state: absent
|
|
||||||
when: inventory_hostname == groups['kube-master'][0] and canal_node_manifest.changed
|
|
||||||
|
|
||||||
- name: Start flannel and calico-node
|
|
||||||
run_once: true
|
|
||||||
kube:
|
|
||||||
name: "canal-node"
|
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
|
||||||
filename: "{{kube_config_dir}}/canal-node.yaml"
|
|
||||||
resource: "ds"
|
|
||||||
namespace: "{{system_namespace}}"
|
|
||||||
state: "{{ item | ternary('latest','present') }}"
|
|
||||||
with_items: "{{ canal_node_manifest.changed }}"
|
|
||||||
|
|
||||||
|
|
22
roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml
Normal file
22
roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
---
|
||||||
|
- name: "Flannel | Create ServiceAccount ClusterRole and ClusterRoleBinding"
|
||||||
|
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/cni-flannel-rbac.yml"
|
||||||
|
run_once: true
|
||||||
|
when: rbac_enabled and flannel_rbac_manifest.changed
|
||||||
|
|
||||||
|
- name: Flannel | Start Resources
|
||||||
|
kube:
|
||||||
|
name: "kube-flannel"
|
||||||
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
|
filename: "{{ kube_config_dir }}/cni-flannel.yml"
|
||||||
|
resource: "ds"
|
||||||
|
namespace: "{{system_namespace}}"
|
||||||
|
state: "latest"
|
||||||
|
with_items: "{{ flannel_manifest.changed }}"
|
||||||
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
- name: Flannel | Wait for flannel subnet.env file presence
|
||||||
|
wait_for:
|
||||||
|
path: /run/flannel/subnet.env
|
||||||
|
delay: 5
|
||||||
|
timeout: 600
|
|
@ -1,8 +1,14 @@
|
||||||
---
|
---
|
||||||
dependencies:
|
dependencies:
|
||||||
- role: kubernetes-apps/network_plugin/canal
|
- role: kubernetes-apps/network_plugin/calico
|
||||||
when: kube_network_plugin == 'canal'
|
when: kube_network_plugin == 'calico'
|
||||||
tags: canal
|
tags: calico
|
||||||
- role: kubernetes-apps/network_plugin/weave
|
- role: kubernetes-apps/network_plugin/canal
|
||||||
when: kube_network_plugin == 'weave'
|
when: kube_network_plugin == 'canal'
|
||||||
tags: weave
|
tags: canal
|
||||||
|
- role: kubernetes-apps/network_plugin/flannel
|
||||||
|
when: kube_network_plugin == 'flannel'
|
||||||
|
tags: flannel
|
||||||
|
- role: kubernetes-apps/network_plugin/weave
|
||||||
|
when: kube_network_plugin == 'weave'
|
||||||
|
tags: weave
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
#FIXME: remove if kubernetes/features#124 is implemented
|
---
|
||||||
|
# FIXME: remove if kubernetes/features#124 is implemented
|
||||||
- name: Weave | Purge old weave daemonset
|
- name: Weave | Purge old weave daemonset
|
||||||
kube:
|
kube:
|
||||||
name: "weave-net"
|
name: "weave-net"
|
||||||
|
@ -9,7 +10,6 @@
|
||||||
state: absent
|
state: absent
|
||||||
when: inventory_hostname == groups['kube-master'][0] and weave_manifest.changed
|
when: inventory_hostname == groups['kube-master'][0] and weave_manifest.changed
|
||||||
|
|
||||||
|
|
||||||
- name: Weave | Start Resources
|
- name: Weave | Start Resources
|
||||||
kube:
|
kube:
|
||||||
name: "weave-net"
|
name: "weave-net"
|
||||||
|
@ -17,11 +17,9 @@
|
||||||
filename: "{{ kube_config_dir }}/weave-net.yml"
|
filename: "{{ kube_config_dir }}/weave-net.yml"
|
||||||
resource: "ds"
|
resource: "ds"
|
||||||
namespace: "{{system_namespace}}"
|
namespace: "{{system_namespace}}"
|
||||||
state: "{{ item | ternary('latest','present') }}"
|
state: "latest"
|
||||||
with_items: "{{ weave_manifest.changed }}"
|
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
|
||||||
- name: "Weave | wait for weave to become available"
|
- name: "Weave | wait for weave to become available"
|
||||||
uri:
|
uri:
|
||||||
url: http://127.0.0.1:6784/status
|
url: http://127.0.0.1:6784/status
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
# Limits for calico apps
|
# Limits for calico apps
|
||||||
calico_policy_controller_cpu_limit: 100m
|
calico_policy_controller_cpu_limit: 100m
|
||||||
calico_policy_controller_memory_limit: 256M
|
calico_policy_controller_memory_limit: 256M
|
||||||
|
@ -7,3 +8,8 @@ calico_policy_controller_memory_requests: 64M
|
||||||
# SSL
|
# SSL
|
||||||
calico_cert_dir: "/etc/calico/certs"
|
calico_cert_dir: "/etc/calico/certs"
|
||||||
canal_cert_dir: "/etc/canal/certs"
|
canal_cert_dir: "/etc/canal/certs"
|
||||||
|
|
||||||
|
rbac_resources:
|
||||||
|
- sa
|
||||||
|
- clusterrole
|
||||||
|
- clusterrolebinding
|
||||||
|
|
|
@ -1,21 +1,49 @@
|
||||||
- set_fact:
|
---
|
||||||
|
- name: Set cert dir
|
||||||
|
set_fact:
|
||||||
calico_cert_dir: "{{ canal_cert_dir }}"
|
calico_cert_dir: "{{ canal_cert_dir }}"
|
||||||
when: kube_network_plugin == 'canal'
|
when: kube_network_plugin == 'canal'
|
||||||
tags: [facts, canal]
|
tags: [facts, canal]
|
||||||
|
|
||||||
- name: Write calico-policy-controller yaml
|
- name: Get calico-policy-controller version if running
|
||||||
|
shell: "{{ bin_dir }}/kubectl -n {{ system_namespace }} get rs calico-policy-controller -o=jsonpath='{$.spec.template.spec.containers[:1].image}' | cut -d':' -f2"
|
||||||
|
register: existing_calico_policy_version
|
||||||
|
run_once: true
|
||||||
|
failed_when: false
|
||||||
|
|
||||||
|
# FIXME(mattymo): This should not be necessary
|
||||||
|
- name: Delete calico-policy-controller if an old one is installed
|
||||||
|
kube:
|
||||||
|
name: calico-policy-controller
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
resource: rs
|
||||||
|
namespace: "{{ system_namespace }}"
|
||||||
|
state: absent
|
||||||
|
run_once: true
|
||||||
|
when:
|
||||||
|
- not "NotFound" in existing_calico_policy_version.stderr
|
||||||
|
- existing_calico_policy_version.stdout | version_compare('v0.7.0', '<')
|
||||||
|
|
||||||
|
- name: Create calico-policy-controller manifests
|
||||||
template:
|
template:
|
||||||
src: calico-policy-controller.yml.j2
|
src: "{{item.file}}.j2"
|
||||||
dest: "{{kube_config_dir}}/calico-policy-controller.yml"
|
dest: "{{kube_config_dir}}/{{item.file}}"
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
with_items:
|
||||||
tags: canal
|
- {name: calico-policy-controller, file: calico-policy-controller.yml, type: rs}
|
||||||
|
- {name: calico-policy-controller, file: calico-policy-sa.yml, type: sa}
|
||||||
|
- {name: calico-policy-controller, file: calico-policy-cr.yml, type: clusterrole}
|
||||||
|
- {name: calico-policy-controller, file: calico-policy-crb.yml, type: clusterrolebinding}
|
||||||
|
register: calico_policy_manifests
|
||||||
|
when:
|
||||||
|
- rbac_enabled or item.type not in rbac_resources
|
||||||
|
|
||||||
- name: Start of Calico policy controller
|
- name: Start of Calico policy controller
|
||||||
kube:
|
kube:
|
||||||
name: "calico-policy-controller"
|
name: "{{item.item.name}}"
|
||||||
|
namespace: "{{ system_namespace }}"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
filename: "{{kube_config_dir}}/calico-policy-controller.yml"
|
resource: "{{item.item.type}}"
|
||||||
namespace: "{{system_namespace}}"
|
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||||
resource: "rs"
|
state: "latest"
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
with_items: "{{ calico_policy_manifests.results }}"
|
||||||
tags: canal
|
when: inventory_hostname == groups['kube-master'][0] and not item|skipped
|
||||||
|
|
|
@ -15,12 +15,18 @@ spec:
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
name: calico-policy-controller
|
name: calico-policy-controller
|
||||||
namespace: {{system_namespace}}
|
namespace: {{ system_namespace }}
|
||||||
labels:
|
labels:
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
k8s-app: calico-policy
|
k8s-app: calico-policy
|
||||||
spec:
|
spec:
|
||||||
hostNetwork: true
|
hostNetwork: true
|
||||||
|
{% if rbac_enabled %}
|
||||||
|
serviceAccountName: calico-policy-controller
|
||||||
|
{% endif %}
|
||||||
|
tolerations:
|
||||||
|
- effect: NoSchedule
|
||||||
|
operator: Exists
|
||||||
containers:
|
containers:
|
||||||
- name: calico-policy-controller
|
- name: calico-policy-controller
|
||||||
image: {{ calico_policy_image_repo }}:{{ calico_policy_image_tag }}
|
image: {{ calico_policy_image_repo }}:{{ calico_policy_image_tag }}
|
||||||
|
|
|
@ -0,0 +1,17 @@
|
||||||
|
---
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: calico-policy-controller
|
||||||
|
namespace: {{ system_namespace }}
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
- extensions
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
- namespaces
|
||||||
|
- networkpolicies
|
||||||
|
verbs:
|
||||||
|
- watch
|
||||||
|
- list
|
|
@ -0,0 +1,13 @@
|
||||||
|
---
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: calico-policy-controller
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: calico-policy-controller
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: calico-policy-controller
|
||||||
|
namespace: {{ system_namespace }}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue