Merge pull request #1 from kubernetes-incubator/master

test
This commit is contained in:
caiwenhao 2017-09-12 22:39:50 +08:00 committed by GitHub
commit 0f77b76ef1
222 changed files with 2175 additions and 1648 deletions

1
.gitignore vendored
View file

@ -24,6 +24,7 @@ __pycache__/
.Python
env/
build/
credentials/
develop-eggs/
dist/
downloads/

View file

@ -18,10 +18,7 @@ variables:
# us-west1-a
before_script:
- pip install ansible==2.3.0
- pip install netaddr
- pip install apache-libcloud==0.20.1
- pip install boto==2.9.0
- pip install -r tests/requirements.txt
- mkdir -p /.ssh
- cp tests/ansible.cfg .
@ -75,10 +72,7 @@ before_script:
- $HOME/.cache
before_script:
- docker info
- pip install ansible==2.3.0
- pip install netaddr
- pip install apache-libcloud==0.20.1
- pip install boto==2.9.0
- pip install -r tests/requirements.txt
- mkdir -p /.ssh
- mkdir -p $HOME/.ssh
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
@ -265,6 +259,7 @@ before_script:
# Test matrix. Leave the comments for markup scripts.
.coreos_calico_sep_variables: &coreos_calico_sep_variables
# stage: deploy-gce-part1
AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }"
KUBE_NETWORK_PLUGIN: calico
CLOUD_IMAGE: coreos-stable-1465-6-0-v20170817
CLOUD_REGION: us-west1-b
@ -275,9 +270,10 @@ before_script:
##User-data to simply turn off coreos upgrades
STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd'
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
.ubuntu_canal_ha_rbac_variables: &ubuntu_canal_ha_rbac_variables
# stage: deploy-gce-part1
KUBE_NETWORK_PLUGIN: canal
AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }"
CLOUD_IMAGE: ubuntu-1604-xenial
CLOUD_REGION: europe-west1-b
CLUSTER_MODE: ha
@ -370,6 +366,8 @@ before_script:
.ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables
# stage: deploy-gce-part1
AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }"
CLOUD_MACHINE_TYPE: "n1-standard-2"
KUBE_NETWORK_PLUGIN: canal
CERT_MGMT: vault
CLOUD_IMAGE: ubuntu-1604-xenial
@ -451,24 +449,24 @@ ubuntu-weave-sep-triggers:
only: ['triggers']
# More builds for PRs/merges (manual) and triggers (auto)
ubuntu-canal-ha:
ubuntu-canal-ha-rbac:
stage: deploy-gce-part1
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_canal_ha_variables
<<: *ubuntu_canal_ha_rbac_variables
when: manual
except: ['triggers']
only: ['master', /^pr-.*$/]
ubuntu-canal-ha-triggers:
ubuntu-canal-ha-rbac-triggers:
stage: deploy-gce-part1
<<: *job
<<: *gce
variables:
<<: *gce_variables
<<: *ubuntu_canal_ha_variables
<<: *ubuntu_canal_ha_rbac_variables
when: on_success
only: ['triggers']
@ -642,6 +640,13 @@ syntax-check:
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root extra_playbooks/upgrade-only-k8s.yml -vvv --syntax-check
except: ['triggers', 'master']
yamllint:
<<: *job
stage: unit-tests
script:
- yamllint roles
except: ['triggers', 'master']
tox-inventory-builder:
stage: unit-tests
<<: *job

16
.yamllint Normal file
View file

@ -0,0 +1,16 @@
---
extends: default
rules:
braces:
min-spaces-inside: 0
max-spaces-inside: 1
brackets:
min-spaces-inside: 0
max-spaces-inside: 1
indentation:
spaces: 2
indent-sequences: consistent
line-length: disable
new-line-at-end-of-file: disable
truthy: disable

View file

@ -53,13 +53,13 @@ Versions of supported components
--------------------------------
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.6.7 <br>
[etcd](https://github.com/coreos/etcd/releases) v3.0.17 <br>
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.7.3 <br>
[etcd](https://github.com/coreos/etcd/releases) v3.2.4 <br>
[flanneld](https://github.com/coreos/flannel/releases) v0.8.0 <br>
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.23.0 <br>
[calico](https://docs.projectcalico.org/v2.5/releases/) v2.5.0 <br>
[canal](https://github.com/projectcalico/canal) (given calico/flannel versions) <br>
[weave](http://weave.works/) v2.0.1 <br>
[docker](https://www.docker.com/) v1.13.1 (see note)<br>
[docker](https://www.docker.com/) v1.13 (see note)<br>
[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)<br>
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).

View file

@ -25,16 +25,29 @@ export AWS_DEFAULT_REGION="zzz"
- Rename `contrib/terraform/aws/terraform.tfvars.example` to `terraform.tfvars`
- Update `contrib/terraform/aws/terraform.tfvars` with your data
- Allocate new AWS Elastic IPs: Depending on # of Availability Zones used (2 for each AZ)
- Create an AWS EC2 SSH Key
- Allocate a new AWS Elastic IP. Use this for your `loadbalancer_apiserver_address` value (below)
- Create an AWS EC2 SSH Key
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
Example:
```commandline
terraform apply -var-file=credentials.tfvars -var 'loadbalancer_apiserver_address=34.212.228.77'
```
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
- Ansible will automatically generate an ssh config file for your bastion hosts. To make use of it, make sure you have a line in your `ansible.cfg` file that looks like the following:
```commandline
ssh_args = -F ./ssh-bastion.conf -o ControlMaster=auto -o ControlPersist=30m
```
- Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag.
Example (this one assumes you are using CoreOS)
```commandline
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_ssh_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache
```
**Troubleshooting**
***Remaining AWS IAM Instance Profile***:

View file

@ -162,7 +162,7 @@ resource "aws_instance" "k8s-worker" {
*/
data "template_file" "inventory" {
template = "${file("${path.module}/templates/inventory.tpl")}"
vars {
public_ip_address_bastion = "${join("\n",formatlist("bastion ansible_ssh_host=%s" , aws_instance.bastion-server.*.public_ip))}"
connection_strings_master = "${join("\n",formatlist("%s ansible_ssh_host=%s",aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}"
@ -173,9 +173,9 @@ data "template_file" "inventory" {
list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}"
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
elb_api_port = "loadbalancer_apiserver.port=${var.aws_elb_api_port}"
kube_insecure_apiserver_address = "kube_apiserver_insecure_bind_address: ${var.kube_insecure_apiserver_address}"
loadbalancer_apiserver_address = "loadbalancer_apiserver.address=${var.loadbalancer_apiserver_address}"
}
}
resource "null_resource" "inventories" {
@ -183,4 +183,8 @@ resource "null_resource" "inventories" {
command = "echo '${data.template_file.inventory.rendered}' > ../../../inventory/hosts"
}
triggers {
template = "${data.template_file.inventory.rendered}"
}
}

View file

@ -25,4 +25,4 @@ kube-master
[k8s-cluster:vars]
${elb_api_fqdn}
${elb_api_port}
${kube_insecure_apiserver_address}
${loadbalancer_apiserver_address}

View file

@ -5,11 +5,11 @@ aws_cluster_name = "devtest"
aws_vpc_cidr_block = "10.250.192.0/18"
aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"]
aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
aws_avail_zones = ["eu-central-1a","eu-central-1b"]
aws_avail_zones = ["us-west-2a","us-west-2b"]
#Bastion Host
aws_bastion_ami = "ami-5900cc36"
aws_bastion_size = "t2.small"
aws_bastion_ami = "ami-db56b9a3"
aws_bastion_size = "t2.medium"
#Kubernetes Cluster
@ -23,9 +23,10 @@ aws_etcd_size = "t2.medium"
aws_kube_worker_num = 4
aws_kube_worker_size = "t2.medium"
aws_cluster_ami = "ami-903df7ff"
aws_cluster_ami = "ami-db56b9a3"
#Settings AWS ELB
aws_elb_api_port = 443
k8s_secure_api_port = 443
aws_elb_api_port = 6443
k8s_secure_api_port = 6443
kube_insecure_apiserver_address = "0.0.0.0"

View file

@ -96,6 +96,6 @@ variable "k8s_secure_api_port" {
description = "Secure Port of K8S API Server"
}
variable "kube_insecure_apiserver_address" {
description= "Bind Address for insecure Port of K8s API Server"
variable "loadbalancer_apiserver_address" {
description= "Bind Address for ELB of K8s API Server"
}

View file

@ -23,13 +23,6 @@ ip a show dev flannel.1
valid_lft forever preferred_lft forever
```
* Docker must be configured with a bridge ip in the flannel subnet.
```
ps aux | grep docker
root 20196 1.7 2.7 1260616 56840 ? Ssl 10:18 0:07 /usr/bin/docker daemon --bip=10.233.16.1/24 --mtu=1450
```
* Try to run a container and check its ip address
```

View file

@ -57,7 +57,7 @@ ansible-playbook -i my_inventory/inventory.cfg cluster.yml -b -v \
See more details in the [ansible guide](ansible.md).
Adding nodes
--------------------------
------------
You may want to add worker nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters.
@ -66,4 +66,38 @@ You may want to add worker nodes to your existing cluster. This can be done by r
```
ansible-playbook -i my_inventory/inventory.cfg scale.yml -b -v \
--private-key=~/.ssh/private_key
```
```
Connecting to Kubernetes
------------------------
By default, Kubespray configures kube-master hosts with insecure access to
kube-apiserver via port 8080. A kubeconfig file is not necessary in this case,
because kubectl will use http://localhost:8080 to connect. The kubeconfig files
generated will point to localhost (on kube-masters) and kube-node hosts will
connect either to a localhost nginx proxy or to a loadbalancer if configured.
More details on this process is in the [HA guide](ha.md).
Kubespray permits connecting to the cluster remotely on any IP of any
kube-master host on port 6443 by default. However, this requires
authentication. One could generate a kubeconfig based on one installed
kube-master hosts (needs improvement) or connect with a username and password.
By default, a user with admin rights is created, named `kube`.
The password can be viewed after deployment by looking at the file
`PATH_TO_KUBESPRAY/credentials/kube_user`. This contains a randomly generated
password. If you wish to set your own password, just precreate/modify this
file yourself.
For more information on kubeconfig and accessing a Kubernetes cluster, refer to
the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/).
Accessing Kubernetes Dashboard
------------------------------
If the variable `dashboard_enabled` is set (default is true), then you can
access the Kubernetes Dashboard at the following URL:
https://kube:_kube-password_@_host_:6443/ui/
To see the password, refer to the section above, titled *Connecting to
Kubernetes*. The host can be any kube-master or kube-node or loadbalancer
(when enabled).

View file

@ -67,6 +67,8 @@ following default cluster paramters:
OpenStack (default is unset)
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
Kubernetes
* *kube_feature_gates* - A list of key=value pairs that describe feature gates for
alpha/experimental Kubernetes features. (defaults is `[]`)
* *authorization_modes* - A list of [authorization mode](
https://kubernetes.io/docs/admin/authorization/#using-flags-for-your-authorization-module)
that the cluster should be configured for. Defaults to `[]` (i.e. no authorization).

View file

@ -26,7 +26,6 @@ first task, is to stop any temporary instances of Vault, to free the port for
the long-term. At the end of this task, the entire Vault cluster should be up
and read to go.
Keys to the Kingdom
-------------------
@ -44,30 +43,38 @@ to authenticate to almost everything in Kubernetes and decode all private
(HTTPS) traffic on your network signed by Vault certificates.
For even greater security, you may want to remove and store elsewhere any
CA keys generated as well (e.g. /etc/vault/ssl/ca-key.pem).
CA keys generated as well (e.g. /etc/vault/ssl/ca-key.pem).
Vault by default encrypts all traffic to and from the datastore backend, all
resting data, and uses TLS for its TCP listener. It is recommended that you
do not change the Vault config to disable TLS, unless you absolutely have to.
Usage
-----
To get the Vault role running, you must to do two things at a minimum:
1. Assign the ``vault`` group to at least 1 node in your inventory
2. Change ``cert_management`` to be ``vault`` instead of ``script``
1. Change ``cert_management`` to be ``vault`` instead of ``script``
Nothing else is required, but customization is possible. Check
``roles/vault/defaults/main.yml`` for the different variables that can be
overridden, most common being ``vault_config``, ``vault_port``, and
``vault_deployment_type``.
Also, if you intend to use a Root or Intermediate CA generated elsewhere,
you'll need to copy the certificate and key to the hosts in the vault group
prior to running the vault role. By default, they'll be located at
``/etc/vault/ssl/ca.pem`` and ``/etc/vault/ssl/ca-key.pem``, respectively.
As a result of the Vault role will be create separated Root CA for `etcd`,
`kubernetes` and `vault`. Also, if you intend to use a Root or Intermediate CA
generated elsewhere, you'll need to copy the certificate and key to the hosts in the vault group prior to running the vault role. By default, they'll be located at:
* vault:
* ``/etc/vault/ssl/ca.pem``
* ``/etc/vault/ssl/ca-key.pem``
* etcd:
* ``/etc/ssl/etcd/ssl/ca.pem``
* ``/etc/ssl/etcd/ssl/ca-key.pem``
* kubernetes:
* ``/etc/kubernetes/ssl/ca.pem``
* ``/etc/kubernetes/ssl/ca-key.pem``
Additional Notes:
@ -77,7 +84,6 @@ Additional Notes:
credentials are saved to ``/etc/vault/roles/<role>/``. The service will
need to read in those credentials, if they want to interact with Vault.
Potential Work
--------------
@ -87,6 +93,3 @@ Potential Work
- Add the ability to start temp Vault with Host, Rkt, or Docker
- Add a dynamic way to change out the backend role creation during Bootstrap,
so other services can be used (such as Consul)
- Segregate Server Cert generation from Auth Cert generation (separate CAs).
This work was partially started with the `auth_cert_backend` tasks, but would
need to be further applied to all roles (particularly Etcd and Kubernetes).

View file

@ -40,23 +40,18 @@ kube_log_level: 2
# Users to create for basic auth in Kubernetes API via HTTP
# Optionally add groups for user
kube_api_pwd: "changeme"
kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}"
kube_users:
kube:
pass: "{{kube_api_pwd}}"
role: admin
root:
pass: "{{kube_api_pwd}}"
role: admin
# groups:
# - system:masters
groups:
- system:masters
## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
#kube_oidc_auth: false
#kube_basic_auth: false
#kube_token_auth: false
#kube_basic_auth: true
#kube_token_auth: true
## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
@ -148,6 +143,9 @@ vault_deployment_type: docker
# K8s image pull policy (imagePullPolicy)
k8s_image_pull_policy: IfNotPresent
# Kubernetes dashboard (available at http://first_master:6443/ui by default)
dashboard_enabled: true
# Monitoring apps for k8s
efk_enabled: false

View file

@ -139,7 +139,7 @@ class KubeManager(object):
if check and self.exists():
return []
cmd = ['create']
cmd = ['apply']
if not self.filename:
self.module.fail_json(msg='filename required to create')
@ -150,10 +150,7 @@ class KubeManager(object):
def replace(self):
if not self.force and not self.exists():
return []
cmd = ['replace']
cmd = ['apply']
if self.force:
cmd.append('--force')
@ -270,9 +267,8 @@ def main():
manager = KubeManager(module)
state = module.params.get('state')
if state == 'present':
result = manager.create()
result = manager.create(check=False)
elif state == 'absent':
result = manager.delete()
@ -284,11 +280,7 @@ def main():
result = manager.stop()
elif state == 'latest':
if manager.exists():
manager.force = True
result = manager.replace()
else:
result = manager.create(check=False)
result = manager.replace()
else:
module.fail_json(msg='Unrecognized state %s.' % state)

View file

@ -16,6 +16,6 @@ Host {{ bastion_ip }}
ControlPersist 5m
Host {{ vars['hosts'] }}
ProxyCommand ssh -W %h:%p {{ real_user }}@{{ bastion_ip }}
ProxyCommand ssh -W %h:%p {{ real_user }}@{{ bastion_ip }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}
StrictHostKeyChecking no
{% endif %}

View file

@ -49,4 +49,3 @@
pip:
name: "{{ item }}"
with_items: "{{pip_python_modules}}"

View file

@ -21,10 +21,20 @@
- name: Gather nodes hostnames
setup:
gather_subset: '!all'
filter: ansible_hostname
filter: ansible_*
- name: Assign inventory name to unconfigured hostnames
- name: Assign inventory name to unconfigured hostnames (non-CoreOS)
hostname:
name: "{{inventory_hostname}}"
when: ansible_hostname == 'localhost'
when: ansible_os_family not in ['CoreOS', 'Container Linux by CoreOS']
- name: Assign inventory name to unconfigured hostnames (CoreOS only)
command: "hostnamectl set-hostname {{inventory_hostname}}"
register: hostname_changed
when: ansible_hostname == 'localhost' and ansible_os_family in ['CoreOS', 'Container Linux by CoreOS']
- name: Update hostname fact (CoreOS only)
setup:
gather_subset: '!all'
filter: ansible_hostname
when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] and hostname_changed.changed

View file

@ -6,4 +6,3 @@
regexp: '^\w+\s+requiretty'
dest: /etc/sudoers
state: absent

View file

@ -4,12 +4,12 @@
# Max of 4 names is allowed and no more than 256 - 17 chars total
# (a 2 is reserved for the 'default.svc.' and'svc.')
#searchdomains:
# - foo.bar.lc
# searchdomains:
# - foo.bar.lc
# Max of 2 is allowed here (a 1 is reserved for the dns_server)
#nameservers:
# - 127.0.0.1
# nameservers:
# - 127.0.0.1
dns_forward_max: 150
cache_size: 1000

View file

@ -1,6 +1,4 @@
---
- include: pre_upgrade.yml
- name: ensure dnsmasq.d directory exists
file:
path: /etc/dnsmasq.d
@ -56,6 +54,26 @@
dest: /etc/dnsmasq.d/01-kube-dns.conf
state: link
- name: Create dnsmasq RBAC manifests
template:
src: "{{ item }}"
dest: "{{ kube_config_dir }}/{{ item }}"
with_items:
- "dnsmasq-clusterrolebinding.yml"
- "dnsmasq-serviceaccount.yml"
when: rbac_enabled
delegate_to: "{{ groups['kube-master'][0] }}"
run_once: true
- name: Apply dnsmasq RBAC manifests
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }}"
with_items:
- "dnsmasq-clusterrolebinding.yml"
- "dnsmasq-serviceaccount.yml"
when: rbac_enabled
delegate_to: "{{ groups['kube-master'][0] }}"
run_once: true
- name: Create dnsmasq manifests
template:
src: "{{item.file}}"
@ -63,7 +81,7 @@
with_items:
- {name: dnsmasq, file: dnsmasq-deploy.yml, type: deployment}
- {name: dnsmasq, file: dnsmasq-svc.yml, type: svc}
- {name: dnsmasq-autoscaler, file: dnsmasq-autoscaler.yml, type: deployment}
- {name: dnsmasq-autoscaler, file: dnsmasq-autoscaler.yml.j2, type: deployment}
register: manifests
delegate_to: "{{ groups['kube-master'][0] }}"
run_once: true
@ -75,7 +93,7 @@
kubectl: "{{bin_dir}}/kubectl"
resource: "{{item.item.type}}"
filename: "{{kube_config_dir}}/{{item.item.file}}"
state: "{{item.changed | ternary('latest','present') }}"
state: "latest"
with_items: "{{ manifests.results }}"
delegate_to: "{{ groups['kube-master'][0] }}"
run_once: true
@ -86,4 +104,3 @@
port: 53
timeout: 180
when: inventory_hostname == groups['kube-node'][0] and groups['kube-node'][0] in ansible_play_hosts

View file

@ -1,9 +0,0 @@
---
- name: Delete legacy dnsmasq daemonset
kube:
name: dnsmasq
namespace: "{{system_namespace}}"
kubectl: "{{bin_dir}}/kubectl"
resource: "ds"
state: absent
when: inventory_hostname == groups['kube-master'][0]

View file

@ -1,3 +1,4 @@
---
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -30,21 +31,26 @@ spec:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
{% if rbac_enabled %}
serviceAccountName: dnsmasq
{% endif %}
tolerations:
- effect: NoSchedule
operator: Exists
containers:
- name: autoscaler
image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1
resources:
- name: autoscaler
image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1
resources:
requests:
cpu: "20m"
memory: "10Mi"
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=dnsmasq-autoscaler
- --target=Deployment/dnsmasq
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
- --default-params={"linear":{"nodesPerReplica":{{ dnsmasq_nodes_per_replica }},"preventSinglePointFailure":true}}
- --logtostderr=true
- --v={{ kube_log_level }}
cpu: "20m"
memory: "10Mi"
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=dnsmasq-autoscaler
- --target=Deployment/dnsmasq
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
- --default-params={"linear":{"nodesPerReplica":{{ dnsmasq_nodes_per_replica }},"preventSinglePointFailure":true}}
- --logtostderr=true
- --v={{ kube_log_level }}

View file

@ -0,0 +1,14 @@
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: dnsmasq
namespace: "{{ system_namespace }}"
subjects:
- kind: ServiceAccount
name: dnsmasq
namespace: "{{ system_namespace}}"
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io

View file

@ -21,6 +21,9 @@ spec:
kubernetes.io/cluster-service: "true"
kubespray/dnsmasq-checksum: "{{ dnsmasq_stat.stat.checksum }}"
spec:
tolerations:
- effect: NoSchedule
operator: Exists
containers:
- name: dnsmasq
image: "{{ dnsmasq_image_repo }}:{{ dnsmasq_image_tag }}"
@ -35,7 +38,6 @@ spec:
capabilities:
add:
- NET_ADMIN
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: {{ dns_cpu_limit }}
@ -55,7 +57,6 @@ spec:
mountPath: /etc/dnsmasq.d
- name: etcdnsmasqdavailable
mountPath: /etc/dnsmasq.d-available
volumes:
- name: etcdnsmasqd
hostPath:
@ -64,4 +65,3 @@ spec:
hostPath:
path: /etc/dnsmasq.d-available
dnsPolicy: Default # Don't use cluster DNS.

View file

@ -0,0 +1,8 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: dnsmasq
namespace: "{{ system_namespace }}"
labels:
kubernetes.io/cluster-service: "true"

View file

@ -1,3 +1,4 @@
---
docker_version: '1.13'
docker_package_info:
@ -10,3 +11,6 @@ docker_repo_info:
repos:
docker_dns_servers_strict: yes
docker_rh_repo_base_url: 'https://yum.dockerproject.org/repo/main/centos/7'
docker_rh_repo_gpgkey: 'https://yum.dockerproject.org/gpg'

View file

@ -8,7 +8,7 @@
- Docker | pause while Docker restarts
- Docker | wait for docker
- name : Docker | reload systemd
- name: Docker | reload systemd
shell: systemctl daemon-reload
- name: Docker | reload docker.socket

View file

@ -3,14 +3,14 @@
include_vars: "{{ item }}"
with_first_found:
- files:
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}.yml"
- "{{ ansible_os_family|lower }}.yml"
- defaults.yml
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml"
- "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml"
- "{{ ansible_distribution|lower }}.yml"
- "{{ ansible_os_family|lower }}.yml"
- defaults.yml
paths:
- ../vars
- ../vars
skip: true
tags: facts

View file

@ -48,7 +48,7 @@
- name: add system search domains to docker options
set_fact:
docker_dns_search_domains: "{{ docker_dns_search_domains | union(system_search_domains.stdout.split(' ')|default([])) | unique }}"
when: system_search_domains.stdout != ""
when: system_search_domains.stdout != ""
- name: check number of nameservers
fail:

View file

@ -10,11 +10,18 @@
dest: /etc/systemd/system/docker.service.d/http-proxy.conf
when: http_proxy is defined or https_proxy is defined or no_proxy is defined
- name: get systemd version
command: rpm -q --qf '%{V}\n' systemd
register: systemd_version
when: ansible_os_family == "RedHat" and not is_atomic
changed_when: false
- name: Write docker.service systemd file
template:
src: docker.service.j2
dest: /etc/systemd/system/docker.service
register: docker_service_file
notify: restart docker
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic)
- name: Write docker.service systemd file for atomic

View file

@ -1,3 +1,3 @@
[Service]
Environment="DOCKER_OPTS={{ docker_options | default('') }} \
--iptables={% if kube_network_plugin == 'flannel' %}true{% else %}false{% endif %}"
--iptables=false"

View file

@ -24,7 +24,9 @@ ExecStart={{ docker_bin_dir }}/docker daemon \
$DOCKER_NETWORK_OPTIONS \
$DOCKER_DNS_OPTIONS \
$INSECURE_REGISTRY
{% if ansible_os_family == "RedHat" and systemd_version.stdout|int >= 226 %}
TasksMax=infinity
{% endif %}
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity

View file

@ -1,7 +1,7 @@
[dockerrepo]
name=Docker Repository
baseurl=https://yum.dockerproject.org/repo/main/centos/7
baseurl={{ docker_rh_repo_base_url }}
enabled=1
gpgcheck=1
gpgkey=https://yum.dockerproject.org/gpg
gpgkey={{ docker_rh_repo_gpgkey }}
{% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %}

View file

@ -1,3 +1,4 @@
---
docker_kernel_min_version: '3.10'
# https://apt.dockerproject.org/repo/dists/debian-wheezy/main/filelist

View file

@ -1,3 +1,4 @@
---
docker_kernel_min_version: '0'
# versioning: docker-io itself is pinned at docker 1.5

View file

@ -1,3 +1,4 @@
---
docker_kernel_min_version: '0'
# https://docs.docker.com/engine/installation/linux/fedora/#install-from-a-package

View file

@ -1,3 +1,4 @@
---
docker_kernel_min_version: '0'
# https://yum.dockerproject.org/repo/main/centos/7/Packages/
@ -8,7 +9,7 @@ docker_versioned_pkg:
'1.12': docker-engine-1.12.6-1.el7.centos
'1.13': docker-engine-1.13.1-1.el7.centos
'stable': docker-engine-17.03.0.ce-1.el7.centos
'edge': docker-engine-17.03.0.ce-1.el7.centos
'edge': docker-engine-17.03.0.ce-1.el7.centos
# https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package
# https://download.docker.com/linux/centos/7/x86_64/stable/Packages/

View file

@ -20,20 +20,22 @@ download_always_pull: False
# Versions
kube_version: v1.7.3
etcd_version: v3.2.4
#TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
# after migration to container download
calico_version: "v1.1.3"
calico_cni_version: "v1.8.0"
calico_policy_version: "v0.5.4"
calico_version: "v2.5.0"
calico_ctl_version: "v1.5.0"
calico_cni_version: "v1.10.0"
calico_policy_version: "v0.7.0"
weave_version: 2.0.1
flannel_version: v0.8.0
flannel_version: "v0.8.0"
flannel_cni_version: "v0.2.0"
pod_infra_version: 3.0
# Download URL's
etcd_download_url: "https://storage.googleapis.com/kargo/{{etcd_version}}_etcd"
# Checksums
etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485"
etcd_checksum: "274c46a7f8d26f7ae99d6880610f54933cbcf7f3beafa19236c52eb5df8c7a0b"
# Containers
# Possible values: host, docker
@ -42,13 +44,15 @@ etcd_image_repo: "quay.io/coreos/etcd"
etcd_image_tag: "{{ etcd_version }}"
flannel_image_repo: "quay.io/coreos/flannel"
flannel_image_tag: "{{ flannel_version }}"
calicoctl_image_repo: "calico/ctl"
calicoctl_image_tag: "{{ calico_version }}"
calico_node_image_repo: "calico/node"
flannel_cni_image_repo: "quay.io/coreos/flannel-cni"
flannel_cni_image_tag: "{{ flannel_cni_version }}"
calicoctl_image_repo: "quay.io/calico/ctl"
calicoctl_image_tag: "{{ calico_ctl_version }}"
calico_node_image_repo: "quay.io/calico/node"
calico_node_image_tag: "{{ calico_version }}"
calico_cni_image_repo: "calico/cni"
calico_cni_image_repo: "quay.io/calico/cni"
calico_cni_image_tag: "{{ calico_cni_version }}"
calico_policy_image_repo: "calico/kube-policy-controller"
calico_policy_image_repo: "quay.io/calico/kube-policy-controller"
calico_policy_image_tag: "{{ calico_policy_version }}"
calico_rr_image_repo: "quay.io/calico/routereflector"
calico_rr_image_tag: "v0.3.0"
@ -56,6 +60,8 @@ hyperkube_image_repo: "quay.io/coreos/hyperkube"
hyperkube_image_tag: "{{ kube_version }}_coreos.0"
pod_infra_image_repo: "gcr.io/google_containers/pause-amd64"
pod_infra_image_tag: "{{ pod_infra_version }}"
install_socat_image_repo: "xueshanf/install-socat"
install_socat_image_tag: "latest"
netcheck_version: "v1.0"
netcheck_agent_img_repo: "quay.io/l23network/k8s-netchecker-agent"
netcheck_agent_tag: "{{ netcheck_version }}"
@ -137,6 +143,12 @@ downloads:
tag: "{{ flannel_image_tag }}"
sha256: "{{ flannel_digest_checksum|default(None) }}"
enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}"
flannel_cni:
container: true
repo: "{{ flannel_cni_image_repo }}"
tag: "{{ flannel_cni_image_tag }}"
sha256: "{{ flannel_cni_digest_checksum|default(None) }}"
enabled: "{{ kube_network_plugin == 'flannel' }}"
calicoctl:
container: true
repo: "{{ calicoctl_image_repo }}"
@ -184,6 +196,11 @@ downloads:
repo: "{{ pod_infra_image_repo }}"
tag: "{{ pod_infra_image_tag }}"
sha256: "{{ pod_infra_digest_checksum|default(None) }}"
install_socat:
container: true
repo: "{{ install_socat_image_repo }}"
tag: "{{ install_socat_image_tag }}"
sha256: "{{ install_socat_digest_checksum|default(None) }}"
nginx:
container: true
repo: "{{ nginx_image_repo }}"

View file

@ -1,12 +1,5 @@
---
- name: downloading...
debug:
msg: "{{ download.url }}"
when:
- download.enabled|bool
- not download.container|bool
- name: Create dest directories
- name: file_download | Create dest directories
file:
path: "{{local_release_dir}}/{{download.dest|dirname}}"
state: directory
@ -16,7 +9,7 @@
- not download.container|bool
tags: bootstrap-os
- name: Download items
- name: file_download | Download item
get_url:
url: "{{download.url}}"
dest: "{{local_release_dir}}/{{download.dest}}"
@ -31,7 +24,7 @@
- download.enabled|bool
- not download.container|bool
- name: Extract archives
- name: file_download | Extract archives
unarchive:
src: "{{ local_release_dir }}/{{download.dest}}"
dest: "{{ local_release_dir }}/{{download.dest|dirname}}"
@ -41,10 +34,9 @@
when:
- download.enabled|bool
- not download.container|bool
- download.unarchive is defined
- download.unarchive == True
- download.unarchive|default(False)
- name: Fix permissions
- name: file_download | Fix permissions
file:
state: file
path: "{{local_release_dir}}/{{download.dest}}"
@ -56,10 +48,11 @@
- (download.unarchive is not defined or download.unarchive == False)
- set_fact:
download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube-master'][0]}}{% endif %}"
download_delegate: "{% if download_localhost|bool %}localhost{% else %}{{groups['kube-master'][0]}}{% endif %}"
run_once: true
tags: facts
- name: Create dest directory for saved/loaded container images
- name: container_download | Create dest directory for saved/loaded container images
file:
path: "{{local_release_dir}}/containers"
state: directory
@ -72,15 +65,14 @@
tags: bootstrap-os
# This is required for the download_localhost delegate to work smooth with Container Linux by CoreOS cluster nodes
- name: Hack python binary path for localhost
- name: container_download | Hack python binary path for localhost
raw: sh -c "mkdir -p /opt/bin; ln -sf /usr/bin/python /opt/bin/python"
when: download_delegate == 'localhost'
delegate_to: localhost
when: download_delegate == 'localhost'
failed_when: false
run_once: true
tags: localhost
- name: Download | create local directory for saved/loaded container images
- name: container_download | create local directory for saved/loaded container images
file:
path: "{{local_release_dir}}/containers"
state: directory
@ -95,24 +87,16 @@
- download_delegate == 'localhost'
tags: localhost
- name: Make download decision if pull is required by tag or sha256
- name: container_download | Make download decision if pull is required by tag or sha256
include: set_docker_image_facts.yml
when:
- download.enabled|bool
- download.container|bool
delegate_to: "{{ download_delegate if download_run_once|bool else inventory_hostname }}"
delegate_to: "{{ download_delegate if download_run_once|bool or omit }}"
run_once: "{{ download_run_once|bool }}"
tags: facts
- name: pulling...
debug:
msg: "{{ pull_args }}"
when:
- download.enabled|bool
- download.container|bool
#NOTE(bogdando) this brings no docker-py deps for nodes
- name: Download containers if pull is required or told to always pull
- name: container_download | Download containers if pull is required or told to always pull
command: "{{ docker_bin_dir }}/docker pull {{ pull_args }}"
register: pull_task_result
until: pull_task_result|succeeded
@ -122,29 +106,29 @@
- download.enabled|bool
- download.container|bool
- pull_required|bool|default(download_always_pull)
delegate_to: "{{ download_delegate if download_run_once|bool else inventory_hostname }}"
delegate_to: "{{ download_delegate if download_run_once|bool or omit }}"
run_once: "{{ download_run_once|bool }}"
- set_fact:
fname: "{{local_release_dir}}/containers/{{download.repo|regex_replace('/|\0|:', '_')}}:{{download.tag|default(download.sha256)|regex_replace('/|\0|:', '_')}}.tar"
run_once: true
tags: facts
- name: "Set default value for 'container_changed' to false"
- name: "container_download | Set default value for 'container_changed' to false"
set_fact:
container_changed: "{{pull_required|default(false)|bool}}"
- name: "Update the 'container_changed' fact"
- name: "container_download | Update the 'container_changed' fact"
set_fact:
container_changed: "{{ pull_required|bool|default(false) or not 'up to date' in pull_task_result.stdout }}"
when:
- download.enabled|bool
- download.container|bool
- pull_required|bool|default(download_always_pull)
delegate_to: "{{ download_delegate if download_run_once|bool else inventory_hostname }}"
run_once: "{{ download_run_once|bool }}"
tags: facts
- name: Stat saved container image
- name: container_download | Stat saved container image
stat:
path: "{{fname}}"
register: img
@ -158,7 +142,7 @@
run_once: true
tags: facts
- name: Download | save container images
- name: container_download | save container images
shell: "{{ docker_bin_dir }}/docker save {{ pull_args }} | gzip -{{ download_compress }} > {{ fname }}"
delegate_to: "{{ download_delegate }}"
register: saved
@ -170,7 +154,7 @@
- download.container|bool
- (container_changed|bool or not img.stat.exists)
- name: Download | copy container images to ansible host
- name: container_download | copy container images to ansible host
synchronize:
src: "{{ fname }}"
dest: "{{ fname }}"
@ -186,7 +170,7 @@
- download.container|bool
- saved.changed
- name: Download | upload container images to nodes
- name: container_download | upload container images to nodes
synchronize:
src: "{{ fname }}"
dest: "{{ fname }}"
@ -206,7 +190,7 @@
- download.container|bool
tags: [upload, upgrade]
- name: Download | load container images
- name: container_download | load container images
shell: "{{ docker_bin_dir }}/docker load < {{ fname }}"
when:
- (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] and

View file

@ -9,25 +9,20 @@
- name: Register docker images info
raw: >-
{{ docker_bin_dir }}/docker images -q | xargs {{ docker_bin_dir }}/docker inspect -f "{{ '{{' }} .RepoTags {{ '}}' }},{{ '{{' }} .RepoDigests {{ '}}' }}"
{{ docker_bin_dir }}/docker images -q | xargs {{ docker_bin_dir }}/docker inspect -f "{{ '{{' }} (index .RepoTags 0) {{ '}}' }},{{ '{{' }} (index .RepoDigests 0) {{ '}}' }}" | tr '\n' ','
no_log: true
register: docker_images_raw
register: docker_images
failed_when: false
changed_when: false
check_mode: no
when: not download_always_pull|bool
- set_fact:
docker_images: "{{docker_images_raw.stdout|regex_replace('\\[|\\]|\\n]','')|regex_replace('\\s',',')}}"
no_log: true
when: not download_always_pull|bool
- set_fact:
pull_required: >-
{%- if pull_args in docker_images.split(',') %}false{%- else -%}true{%- endif -%}
{%- if pull_args in docker_images.stdout.split(',') %}false{%- else -%}true{%- endif -%}
when: not download_always_pull|bool
- name: Check the local digest sha256 corresponds to the given image tag
assert:
that: "{{download.repo}}:{{download.tag}} in docker_images.split(',')"
that: "{{download.repo}}:{{download.tag}} in docker_images.stdout.split(',')"
when: not download_always_pull|bool and not pull_required|bool and pull_by_digest|bool

View file

@ -21,8 +21,10 @@ etcd_metrics: "basic"
etcd_memory_limit: 512M
# Uncomment to set CPU share for etcd
#etcd_cpu_limit: 300m
# etcd_cpu_limit: 300m
etcd_node_cert_hosts: "{{ groups['k8s-cluster'] | union(groups.get('calico-rr', [])) }}"
etcd_compaction_retention: "8"
etcd_vault_mount_path: etcd

View file

@ -5,6 +5,7 @@
- Refresh Time Fact
- Set Backup Directory
- Create Backup Directory
- Stat etcd v2 data directory
- Backup etcd v2 data
- Backup etcd v3 data
when: etcd_cluster_is_healthy.rc == 0
@ -24,7 +25,13 @@
group: root
mode: 0600
- name: Stat etcd v2 data directory
stat:
path: "{{ etcd_data_dir }}/member"
register: etcd_data_dir_member
- name: Backup etcd v2 data
when: etcd_data_dir_member.stat.exists
command: >-
{{ bin_dir }}/etcdctl backup
--data-dir {{ etcd_data_dir }}
@ -43,4 +50,3 @@
ETCDCTL_API: 3
retries: 3
delay: "{{ retry_stagger | random + 3 }}"

View file

@ -30,4 +30,3 @@
- name: set etcd_secret_changed
set_fact:
etcd_secret_changed: true

View file

@ -66,4 +66,3 @@
{%- set _ = certs.update({'sync': True}) -%}
{% endif %}
{{ certs.sync }}

View file

@ -73,11 +73,10 @@
'member-{{ node }}-key.pem',
{% endfor %}]"
my_master_certs: ['ca-key.pem',
'admin-{{ inventory_hostname }}.pem',
'admin-{{ inventory_hostname }}-key.pem',
'member-{{ inventory_hostname }}.pem',
'member-{{ inventory_hostname }}-key.pem'
]
'admin-{{ inventory_hostname }}.pem',
'admin-{{ inventory_hostname }}-key.pem',
'member-{{ inventory_hostname }}.pem',
'member-{{ inventory_hostname }}-key.pem']
all_node_certs: "['ca.pem',
{% for node in (groups['k8s-cluster'] + groups['calico-rr']|default([]))|unique %}
'node-{{ node }}.pem',
@ -111,22 +110,22 @@
sync_certs|default(false) and inventory_hostname not in groups['etcd']
notify: set etcd_secret_changed
#NOTE(mattymo): Use temporary file to copy master certs because we have a ~200k
#char limit when using shell command
#FIXME(mattymo): Use tempfile module in ansible 2.3
- name: Gen_certs | Prepare tempfile for unpacking certs
shell: mktemp /tmp/certsXXXXX.tar.gz
register: cert_tempfile
when: inventory_hostname in groups['etcd'] and sync_certs|default(false) and
inventory_hostname != groups['etcd'][0]
# NOTE(mattymo): Use temporary file to copy master certs because we have a ~200k
# char limit when using shell command
- name: Gen_certs | Write master certs to tempfile
copy:
content: "{{etcd_master_cert_data.stdout}}"
dest: "{{cert_tempfile.stdout}}"
owner: root
mode: "0600"
# FIXME(mattymo): Use tempfile module in ansible 2.3
- name: Gen_certs | Prepare tempfile for unpacking certs
shell: mktemp /tmp/certsXXXXX.tar.gz
register: cert_tempfile
when: inventory_hostname in groups['etcd'] and sync_certs|default(false) and
inventory_hostname != groups['etcd'][0]
- name: Gen_certs | Write master certs to tempfile
copy:
content: "{{etcd_master_cert_data.stdout}}"
dest: "{{cert_tempfile.stdout}}"
owner: root
mode: "0600"
when: inventory_hostname in groups['etcd'] and sync_certs|default(false) and
inventory_hostname != groups['etcd'][0]
@ -162,30 +161,3 @@
owner: kube
mode: "u=rwX,g-rwx,o-rwx"
recurse: yes
- name: Gen_certs | target ca-certificate store file
set_fact:
ca_cert_path: |-
{% if ansible_os_family == "Debian" -%}
/usr/local/share/ca-certificates/etcd-ca.crt
{%- elif ansible_os_family == "RedHat" -%}
/etc/pki/ca-trust/source/anchors/etcd-ca.crt
{%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%}
/etc/ssl/certs/etcd-ca.pem
{%- endif %}
tags: facts
- name: Gen_certs | add CA to trusted CA dir
copy:
src: "{{ etcd_cert_dir }}/ca.pem"
dest: "{{ ca_cert_path }}"
remote_src: true
register: etcd_ca_cert
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS)
command: update-ca-certificates
when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"]
- name: Gen_certs | update ca-certificates (RedHat)
command: update-ca-trust extract
when: etcd_ca_cert.changed and ansible_os_family == "RedHat"

View file

@ -7,52 +7,14 @@
when: inventory_hostname in etcd_node_cert_hosts
tags: etcd-secrets
- name: gen_certs_vault | Read in the local credentials
command: cat /etc/vault/roles/etcd/userpass
register: etcd_vault_creds_cat
delegate_to: "{{ groups['vault'][0] }}"
- name: gen_certs_vault | Set facts for read Vault Creds
set_fact:
etcd_vault_creds: "{{ etcd_vault_creds_cat.stdout|from_json }}"
delegate_to: "{{ groups['vault'][0] }}"
- name: gen_certs_vault | Log into Vault and obtain an token
uri:
url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}/v1/auth/userpass/login/{{ etcd_vault_creds.username }}"
headers:
Accept: application/json
Content-Type: application/json
method: POST
body_format: json
body:
password: "{{ etcd_vault_creds.password }}"
register: etcd_vault_login_result
delegate_to: "{{ groups['vault'][0] }}"
- name: gen_certs_vault | Set fact for vault_client_token
set_fact:
vault_client_token: "{{ etcd_vault_login_result.get('json', {}).get('auth', {}).get('client_token') }}"
run_once: true
- name: gen_certs_vault | Set fact for Vault API token
set_fact:
etcd_vault_headers:
Accept: application/json
Content-Type: application/json
X-Vault-Token: "{{ vault_client_token }}"
run_once: true
when: vault_client_token != ""
# Issue master certs to Etcd nodes
- include: ../../vault/tasks/shared/issue_cert.yml
vars:
issue_cert_common_name: "etcd:master:{{ item.rsplit('/', 1)[1].rsplit('.', 1)[0] }}"
issue_cert_alt_names: "{{ groups.etcd + ['localhost'] }}"
issue_cert_copy_ca: "{{ item == etcd_master_certs_needed|first }}"
issue_cert_file_group: "{{ etcd_cert_group }}"
issue_cert_file_owner: kube
issue_cert_headers: "{{ etcd_vault_headers }}"
issue_cert_hosts: "{{ groups.etcd }}"
issue_cert_ip_sans: >-
[
@ -67,6 +29,7 @@
issue_cert_path: "{{ item }}"
issue_cert_role: etcd
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
issue_cert_mount_path: "{{ etcd_vault_mount_path }}"
with_items: "{{ etcd_master_certs_needed|d([]) }}"
when: inventory_hostname in groups.etcd
notify: set etcd_secret_changed
@ -74,11 +37,11 @@
# Issue node certs to everyone else
- include: ../../vault/tasks/shared/issue_cert.yml
vars:
issue_cert_common_name: "etcd:node:{{ item.rsplit('/', 1)[1].rsplit('.', 1)[0] }}"
issue_cert_alt_names: "{{ etcd_node_cert_hosts }}"
issue_cert_copy_ca: "{{ item == etcd_node_certs_needed|first }}"
issue_cert_file_group: "{{ etcd_cert_group }}"
issue_cert_file_owner: kube
issue_cert_headers: "{{ etcd_vault_headers }}"
issue_cert_hosts: "{{ etcd_node_cert_hosts }}"
issue_cert_ip_sans: >-
[
@ -93,8 +56,7 @@
issue_cert_path: "{{ item }}"
issue_cert_role: etcd
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
issue_cert_mount_path: "{{ etcd_vault_mount_path }}"
with_items: "{{ etcd_node_certs_needed|d([]) }}"
when: inventory_hostname in etcd_node_cert_hosts
notify: set etcd_secret_changed

View file

@ -1,5 +1,5 @@
---
#Plan A: no docker-py deps
# Plan A: no docker-py deps
- name: Install | Copy etcdctl binary from docker container
command: sh -c "{{ docker_bin_dir }}/docker rm -f etcdctl-binarycopy;
{{ docker_bin_dir }}/docker create --name etcdctl-binarycopy {{ etcd_image_repo }}:{{ etcd_image_tag }} &&
@ -11,22 +11,3 @@
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
changed_when: false
#Plan B: looks nicer, but requires docker-py on all hosts:
#- name: Install | Set up etcd-binarycopy container
# docker:
# name: etcd-binarycopy
# state: present
# image: "{{ etcd_image_repo }}:{{ etcd_image_tag }}"
# when: etcd_deployment_type == "docker"
#
#- name: Install | Copy etcdctl from etcd-binarycopy container
# command: /usr/bin/docker cp "etcd-binarycopy:{{ etcd_container_bin_dir }}etcdctl" "{{ bin_dir }}/etcdctl"
# when: etcd_deployment_type == "docker"
#
#- name: Install | Clean up etcd-binarycopy container
# docker:
# name: etcd-binarycopy
# state: absent
# image: "{{ etcd_image_repo }}:{{ etcd_image_tag }}"
# when: etcd_deployment_type == "docker"

View file

@ -1,8 +1,4 @@
---
- include: pre_upgrade.yml
when: etcd_cluster_setup
tags: etcd-pre-upgrade
- include: check_certs.yml
when: cert_management == "script"
tags: [etcd-secrets, facts]
@ -10,6 +6,14 @@
- include: "gen_certs_{{ cert_management }}.yml"
tags: etcd-secrets
- include: upd_ca_trust.yml
tags: etcd-secrets
- name: "Gen_certs | Get etcd certificate serials"
shell: "openssl x509 -in {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem -noout -serial | cut -d= -f2"
register: "node-{{ inventory_hostname }}_serial"
when: inventory_hostname in groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique|sort
- include: "install_{{ etcd_deployment_type }}.yml"
when: is_etcd_master
tags: upgrade

View file

@ -1,59 +0,0 @@
- name: "Pre-upgrade | check for etcd-proxy unit file"
stat:
path: /etc/systemd/system/etcd-proxy.service
register: etcd_proxy_service_file
tags: facts
- name: "Pre-upgrade | check for etcd-proxy init script"
stat:
path: /etc/init.d/etcd-proxy
register: etcd_proxy_init_script
tags: facts
- name: "Pre-upgrade | stop etcd-proxy if service defined"
service:
name: etcd-proxy
state: stopped
when: (etcd_proxy_service_file.stat.exists|default(False) or etcd_proxy_init_script.stat.exists|default(False))
- name: "Pre-upgrade | remove etcd-proxy service definition"
file:
path: "{{ item }}"
state: absent
when: (etcd_proxy_service_file.stat.exists|default(False) or etcd_proxy_init_script.stat.exists|default(False))
with_items:
- /etc/systemd/system/etcd-proxy.service
- /etc/init.d/etcd-proxy
- name: "Pre-upgrade | find etcd-proxy container"
command: "{{ docker_bin_dir }}/docker ps -aq --filter 'name=etcd-proxy*'"
register: etcd_proxy_container
changed_when: false
failed_when: false
- name: "Pre-upgrade | remove etcd-proxy if it exists"
command: "{{ docker_bin_dir }}/docker rm -f {{item}}"
with_items: "{{etcd_proxy_container.stdout_lines}}"
- name: "Pre-upgrade | see if etcdctl is installed"
stat:
path: "{{ bin_dir }}/etcdctl"
register: etcdctl_installed
- name: "Pre-upgrade | check if member list is non-SSL"
command: "{{ bin_dir }}/etcdctl --no-sync --peers={{ etcd_access_addresses | regex_replace('https','http') }} member list"
register: etcd_member_list
retries: 10
delay: 3
until: etcd_member_list.rc != 2
run_once: true
when: etcdctl_installed.stat.exists
changed_when: false
failed_when: false
- name: "Pre-upgrade | change peer names to SSL"
shell: >-
{{ bin_dir }}/etcdctl --no-sync --peers={{ etcd_access_addresses | regex_replace('https','http') }} member list |
awk -F"[: =]" '{print "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses | regex_replace('https','http') }} member update "$1" https:"$7":"$8}' | bash
run_once: true
when: 'etcdctl_installed.stat.exists and etcd_member_list.rc == 0 and "http://" in etcd_member_list.stdout'

View file

@ -1,7 +1,7 @@
---
- name: Refresh config | Create etcd config file
template:
src: etcd.env.yml
src: etcd.env.j2
dest: /etc/etcd.env
notify: restart etcd
when: is_etcd_master

View file

@ -1,7 +1,7 @@
---
- name: sync_etcd_master_certs | Create list of master certs needing creation
set_fact:
set_fact:
etcd_master_cert_list: >-
{{ etcd_master_cert_list|default([]) + [
"admin-" + item + ".pem",
@ -11,7 +11,7 @@
run_once: true
- include: ../../vault/tasks/shared/sync_file.yml
vars:
vars:
sync_file: "{{ item }}"
sync_file_dir: "{{ etcd_cert_dir }}"
sync_file_hosts: "{{ groups.etcd }}"

View file

@ -1,12 +1,12 @@
---
- name: sync_etcd_node_certs | Create list of node certs needing creation
set_fact:
set_fact:
etcd_node_cert_list: "{{ etcd_node_cert_list|default([]) + ['node-' + item + '.pem'] }}"
with_items: "{{ etcd_node_cert_hosts }}"
- include: ../../vault/tasks/shared/sync_file.yml
vars:
vars:
sync_file: "{{ item }}"
sync_file_dir: "{{ etcd_cert_dir }}"
sync_file_hosts: "{{ etcd_node_cert_hosts }}"
@ -24,7 +24,7 @@
sync_file_results: []
- include: ../../vault/tasks/shared/sync_file.yml
vars:
vars:
sync_file: ca.pem
sync_file_dir: "{{ etcd_cert_dir }}"
sync_file_hosts: "{{ etcd_node_cert_hosts }}"

View file

@ -0,0 +1,27 @@
---
- name: Gen_certs | target ca-certificate store file
set_fact:
ca_cert_path: |-
{% if ansible_os_family == "Debian" -%}
/usr/local/share/ca-certificates/etcd-ca.crt
{%- elif ansible_os_family == "RedHat" -%}
/etc/pki/ca-trust/source/anchors/etcd-ca.crt
{%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%}
/etc/ssl/certs/etcd-ca.pem
{%- endif %}
tags: facts
- name: Gen_certs | add CA to trusted CA dir
copy:
src: "{{ etcd_cert_dir }}/ca.pem"
dest: "{{ ca_cert_path }}"
remote_src: true
register: etcd_ca_cert
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS)
command: update-ca-certificates
when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"]
- name: Gen_certs | update ca-certificates (RedHat)
command: update-ca-trust extract
when: etcd_ca_cert.changed and ansible_os_family == "RedHat"

View file

@ -1,9 +1,8 @@
---
elrepo_key_url: 'https://www.elrepo.org/RPM-GPG-KEY-elrepo.org'
elrepo_rpm : elrepo-release-7.0-3.el7.elrepo.noarch.rpm
elrepo_mirror : http://www.elrepo.org
elrepo_rpm: elrepo-release-7.0-3.el7.elrepo.noarch.rpm
elrepo_mirror: http://www.elrepo.org
elrepo_url : '{{elrepo_mirror}}/{{elrepo_rpm}}'
elrepo_url: '{{elrepo_mirror}}/{{elrepo_rpm}}'
elrepo_kernel_package: "kernel-lt"

View file

@ -1,5 +1,6 @@
---
# Versions
kubedns_version : 1.14.2
kubedns_version: 1.14.2
kubednsautoscaler_version: 1.1.1
# Limits for dnsmasq/kubedns apps
@ -37,6 +38,17 @@ netchecker_server_memory_limit: 256M
netchecker_server_cpu_requests: 50m
netchecker_server_memory_requests: 64M
# Dashboard
dashboard_enabled: false
dashboard_image_repo: kubernetesdashboarddev/kubernetes-dashboard-amd64
dashboard_image_tag: head
# Limits for dashboard
dashboard_cpu_limit: 100m
dashboard_memory_limit: 256M
dashboard_cpu_requests: 50m
dashboard_memory_requests: 64M
# SSL
etcd_cert_dir: "/etc/ssl/etcd/ssl"
canal_cert_dir: "/etc/canal/certs"

View file

@ -0,0 +1,20 @@
---
- name: Kubernetes Apps | Lay down dashboard template
template:
src: "{{item.file}}"
dest: "{{kube_config_dir}}/{{item.file}}"
with_items:
- {file: dashboard.yml.j2, type: deploy, name: netchecker-agent}
register: manifests
when: inventory_hostname == groups['kube-master'][0]
- name: Kubernetes Apps | Start dashboard
kube:
name: "{{item.item.name}}"
namespace: "{{system_namespace}}"
kubectl: "{{bin_dir}}/kubectl"
resource: "{{item.item.type}}"
filename: "{{kube_config_dir}}/{{item.item.file}}"
state: "latest"
with_items: "{{ manifests.results }}"
when: inventory_hostname == groups['kube-master'][0]

View file

@ -1,7 +1,7 @@
---
- name: Kubernetes Apps | Wait for kube-apiserver
uri:
url: http://localhost:{{ kube_apiserver_insecure_port }}/healthz
url: "{{ kube_apiserver_insecure_endpoint }}/healthz"
register: result
until: result.status == 200
retries: 10
@ -14,12 +14,12 @@
dest: "{{kube_config_dir}}/{{item.file}}"
with_items:
- {name: kubedns, file: kubedns-sa.yml, type: sa}
- {name: kubedns, file: kubedns-deploy.yml, type: deployment}
- {name: kubedns, file: kubedns-deploy.yml.j2, type: deployment}
- {name: kubedns, file: kubedns-svc.yml, type: svc}
- {name: kubedns-autoscaler, file: kubedns-autoscaler-sa.yml, type: sa}
- {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrole.yml, type: clusterrole}
- {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding}
- {name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment}
- {name: kubedns-autoscaler, file: kubedns-autoscaler.yml.j2, type: deployment}
register: manifests
when:
- dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
@ -51,13 +51,20 @@
kubectl: "{{bin_dir}}/kubectl"
resource: "{{item.item.type}}"
filename: "{{kube_config_dir}}/{{item.item.file}}"
state: "{{item.changed | ternary('latest','present') }}"
state: "latest"
with_items: "{{ manifests.results }}"
failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
when:
- dns_mode != 'none'
- inventory_hostname == groups['kube-master'][0]
- not item|skipped
tags: dnsmasq
- name: Kubernetes Apps | Netchecker
include: tasks/netchecker.yml
when: deploy_netchecker
tags: netchecker
- name: Kubernetes Apps | Dashboard
include: tasks/dashboard.yml
when: dashboard_enabled
tags: dashboard

View file

@ -1,3 +1,21 @@
---
- name: Kubernetes Apps | Check if netchecker-server manifest already exists
stat:
path: "{{ kube_config_dir }}/netchecker-server-deployment.yml.j2"
register: netchecker_server_manifest
tags: ['facts', 'upgrade']
- name: Kubernetes Apps | Apply netchecker-server manifest to update annotations
kube:
name: "netchecker-server"
namespace: "{{ netcheck_namespace }}"
kubectl: "{{bin_dir}}/kubectl"
resource: "deploy"
state: latest
when: inventory_hostname == groups['kube-master'][0] and netchecker_server_manifest.stat.exists
tags: upgrade
- name: Kubernetes Apps | Lay Down Netchecker Template
template:
src: "{{item.file}}"
@ -24,18 +42,6 @@
state: absent
when: inventory_hostname == groups['kube-master'][0]
#FIXME: remove if kubernetes/features#124 is implemented
- name: Kubernetes Apps | Purge old Netchecker daemonsets
kube:
name: "{{item.item.name}}"
namespace: "{{netcheck_namespace}}"
kubectl: "{{bin_dir}}/kubectl"
resource: "{{item.item.type}}"
filename: "{{kube_config_dir}}/{{item.item.file}}"
state: absent
with_items: "{{ manifests.results }}"
when: inventory_hostname == groups['kube-master'][0] and item.item.type == "ds" and item.changed
- name: Kubernetes Apps | Start Netchecker Resources
kube:
name: "{{item.item.name}}"
@ -43,7 +49,6 @@
kubectl: "{{bin_dir}}/kubectl"
resource: "{{item.item.type}}"
filename: "{{kube_config_dir}}/{{item.item.file}}"
state: "{{item.changed | ternary('latest','present') }}"
state: "latest"
with_items: "{{ manifests.results }}"
failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube-master'][0] and not item|skipped

View file

@ -0,0 +1,110 @@
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration to deploy head version of the Dashboard UI compatible with
# Kubernetes 1.6 (RBAC enabled).
#
# Example usage: kubectl create -f <this_file>
{% if rbac_enabled %}
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: {{ system_namespace }}
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
labels:
k8s-app: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: {{ system_namespace }}
{% endif %}
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: {{ system_namespace }}
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: {{ dashboard_image_repo }}:{{ dashboard_image_tag }}
# Image is tagged and updated with :head, so always pull it.
imagePullPolicy: Always
resources:
limits:
cpu: {{ dashboard_cpu_limit }}
memory: {{ dashboard_memory_limit }}
requests:
cpu: {{ dashboard_cpu_requests }}
memory: {{ dashboard_memory_requests }}
ports:
- containerPort: 9090
protocol: TCP
args:
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
livenessProbe:
httpGet:
path: /
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30
{% if rbac_enabled %}
serviceAccountName: kubernetes-dashboard
{% endif %}
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: {{ system_namespace }}
spec:
ports:
- port: 80
targetPort: 9090
selector:
k8s-app: kubernetes-dashboard

View file

@ -1,3 +1,4 @@
---
# Copyright 2016 The Kubernetes Authors. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");

View file

@ -1,3 +1,4 @@
---
# Copyright 2016 The Kubernetes Authors. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");

View file

@ -1,3 +1,4 @@
---
# Copyright 2016 The Kubernetes Authors. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");

View file

@ -1,3 +1,4 @@
---
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -26,26 +27,26 @@ spec:
metadata:
labels:
k8s-app: kubedns-autoscaler
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
tolerations:
- effect: NoSchedule
operator: Exists
containers:
- name: autoscaler
image: "{{ kubednsautoscaler_image_repo }}:{{ kubednsautoscaler_image_tag }}"
resources:
requests:
cpu: "20m"
memory: "10Mi"
requests:
cpu: "20m"
memory: "10Mi"
command:
- /cluster-proportional-autoscaler
- --namespace={{ system_namespace }}
- --configmap=kubedns-autoscaler
# Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
- --target=Deployment/kube-dns
- --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}}
- --logtostderr=true
- --v=2
- /cluster-proportional-autoscaler
- --namespace={{ system_namespace }}
- --configmap=kubedns-autoscaler
# Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
- --target=Deployment/kube-dns
- --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}}
- --logtostderr=true
- --v=2
{% if rbac_enabled %}
serviceAccountName: cluster-proportional-autoscaler
{% endif %}

View file

@ -1,3 +1,4 @@
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
@ -29,6 +30,8 @@ spec:
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
- effect: NoSchedule
operator: Exists
volumes:
- name: kube-dns-config
configMap:

View file

@ -1,3 +1,4 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:

View file

@ -1,3 +1,4 @@
---
apiVersion: v1
kind: Service
metadata:
@ -19,4 +20,3 @@ spec:
- name: dns-tcp
port: 53
protocol: TCP

View file

@ -12,6 +12,9 @@ spec:
labels:
app: netchecker-agent
spec:
tolerations:
- effect: NoSchedule
operator: Exists
containers:
- name: netchecker-agent
image: "{{ agent_img }}"
@ -37,3 +40,8 @@ spec:
requests:
cpu: {{ netchecker_agent_cpu_requests }}
memory: {{ netchecker_agent_memory_requests }}
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate

View file

@ -16,6 +16,9 @@ spec:
{% if kube_version | version_compare('v1.6', '>=') %}
dnsPolicy: ClusterFirstWithHostNet
{% endif %}
tolerations:
- effect: NoSchedule
operator: Exists
containers:
- name: netchecker-agent
image: "{{ agent_img }}"
@ -41,3 +44,7 @@ spec:
requests:
cpu: {{ netchecker_agent_cpu_requests }}
memory: {{ netchecker_agent_memory_requests }}
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate

View file

@ -25,12 +25,14 @@ spec:
memory: {{ netchecker_server_memory_requests }}
ports:
- containerPort: 8081
hostPort: 8081
args:
- "-v=5"
- "-logtostderr"
- "-kubeproxyinit"
- "-endpoint=0.0.0.0:8081"
tolerations:
- effect: NoSchedule
operator: Exists
{% if rbac_enabled %}
serviceAccountName: netchecker-server
{% endif %}

View file

@ -1,5 +1,5 @@
---
elasticsearch_cpu_limit: 1000m
elasticsearch_cpu_limit: 1000m
elasticsearch_mem_limit: 0M
elasticsearch_cpu_requests: 100m
elasticsearch_mem_requests: 0M

View file

@ -1,3 +1,4 @@
---
dependencies:
- role: download
file: "{{ downloads.elasticsearch }}"

View file

@ -10,7 +10,7 @@
when: rbac_enabled
- name: "ElasticSearch | Create Serviceaccount and Clusterrolebinding (RBAC)"
command: "kubectl apply -f {{ kube_config_dir }}/{{ item }} -n {{ system_namespace }}"
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }} -n {{ system_namespace }}"
with_items:
- "efk-sa.yml"
- "efk-clusterrolebinding.yml"
@ -38,4 +38,3 @@
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-service.yaml -n {{ system_namespace }}"
run_once: true
when: es_service_manifest.changed

View file

@ -1,3 +1,4 @@
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:

View file

@ -1,3 +1,4 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:

View file

@ -1,5 +1,5 @@
---
fluentd_cpu_limit: 0m
fluentd_cpu_limit: 0m
fluentd_mem_limit: 200Mi
fluentd_cpu_requests: 100m
fluentd_mem_requests: 200Mi

View file

@ -1,3 +1,4 @@
---
dependencies:
- role: download
file: "{{ downloads.fluentd }}"

View file

@ -20,4 +20,3 @@
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/fluentd-ds.yaml -n {{ system_namespace }}"
run_once: true
when: fluentd_ds_manifest.changed

View file

@ -17,6 +17,9 @@ spec:
kubernetes.io/cluster-service: "true"
version: "v{{ fluentd_version }}"
spec:
tolerations:
- effect: NoSchedule
operator: Exists
containers:
- name: fluentd-es
image: "{{ fluentd_image_repo }}:{{ fluentd_image_tag }}"
@ -55,4 +58,3 @@ spec:
{% if rbac_enabled %}
serviceAccountName: efk
{% endif %}

View file

@ -1,5 +1,5 @@
---
kibana_cpu_limit: 100m
kibana_cpu_limit: 100m
kibana_mem_limit: 0M
kibana_cpu_requests: 100m
kibana_mem_requests: 0M

View file

@ -1,3 +1,4 @@
---
dependencies:
- role: download
file: "{{ downloads.kibana }}"

View file

@ -1,6 +1,6 @@
---
- name: "Kibana | Write Kibana deployment"
template:
template:
src: kibana-deployment.yml.j2
dest: "{{ kube_config_dir }}/kibana-deployment.yaml"
register: kibana_deployment_manifest
@ -12,12 +12,12 @@
name: "kibana-logging"
namespace: "{{system_namespace}}"
resource: "deployment"
state: "{{ item | ternary('latest','present') }}"
state: "latest"
with_items: "{{ kibana_deployment_manifest.changed }}"
run_once: true
- name: "Kibana | Write Kibana service "
template:
template:
src: kibana-service.yml.j2
dest: "{{ kube_config_dir }}/kibana-service.yaml"
register: kibana_service_manifest
@ -29,6 +29,6 @@
name: "kibana-logging"
namespace: "{{system_namespace}}"
resource: "svc"
state: "{{ item | ternary('latest','present') }}"
state: "latest"
with_items: "{{ kibana_service_manifest.changed }}"
run_once: true

View file

@ -1,3 +1,4 @@
---
dependencies:
- role: kubernetes-apps/efk/elasticsearch
- role: kubernetes-apps/efk/fluentd

View file

@ -1,3 +1,4 @@
---
helm_enabled: false
# specify a dir and attach it to helm for HELM_HOME.

View file

@ -1,3 +1,4 @@
---
dependencies:
- role: download
file: "{{ downloads.helm }}"

View file

@ -27,9 +27,8 @@
kubectl: "{{bin_dir}}/kubectl"
resource: "{{item.item.type}}"
filename: "{{kube_config_dir}}/{{item.item.file}}"
state: "{{item.changed | ternary('latest','present') }}"
state: "latest"
with_items: "{{ manifests.results }}"
failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg
when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled
- name: Helm | Install/upgrade helm

View file

@ -1,3 +1,4 @@
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:

View file

@ -1,3 +1,4 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:

View file

@ -1,3 +1,4 @@
---
dependencies:
- role: download
file: "{{ downloads.netcheck_server }}"

View file

@ -0,0 +1,11 @@
---
- name: Start Calico resources
kube:
name: "{{item.item.name}}"
namespace: "{{ system_namespace }}"
kubectl: "{{bin_dir}}/kubectl"
resource: "{{item.item.type}}"
filename: "{{kube_config_dir}}/{{item.item.file}}"
state: "latest"
with_items: "{{ calico_node_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0] and not item|skipped

View file

@ -1,32 +1,11 @@
- name: Create canal ConfigMap
run_once: true
---
- name: Canal | Start Resources
kube:
name: "canal-config"
name: "{{item.item.name}}"
namespace: "{{ system_namespace }}"
kubectl: "{{bin_dir}}/kubectl"
filename: "{{kube_config_dir}}/canal-config.yaml"
resource: "configmap"
namespace: "{{system_namespace}}"
#FIXME: remove if kubernetes/features#124 is implemented
- name: Purge old flannel and canal-node
run_once: true
kube:
name: "canal-node"
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/canal-node.yaml"
resource: "ds"
namespace: "{{system_namespace}}"
state: absent
when: inventory_hostname == groups['kube-master'][0] and canal_node_manifest.changed
- name: Start flannel and calico-node
run_once: true
kube:
name: "canal-node"
kubectl: "{{bin_dir}}/kubectl"
filename: "{{kube_config_dir}}/canal-node.yaml"
resource: "ds"
namespace: "{{system_namespace}}"
state: "{{ item | ternary('latest','present') }}"
with_items: "{{ canal_node_manifest.changed }}"
resource: "{{item.item.type}}"
filename: "{{kube_config_dir}}/{{item.item.file}}"
state: "latest"
with_items: "{{ canal_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0] and not item|skipped

View file

@ -0,0 +1,22 @@
---
- name: "Flannel | Create ServiceAccount ClusterRole and ClusterRoleBinding"
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/cni-flannel-rbac.yml"
run_once: true
when: rbac_enabled and flannel_rbac_manifest.changed
- name: Flannel | Start Resources
kube:
name: "kube-flannel"
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/cni-flannel.yml"
resource: "ds"
namespace: "{{system_namespace}}"
state: "latest"
with_items: "{{ flannel_manifest.changed }}"
when: inventory_hostname == groups['kube-master'][0]
- name: Flannel | Wait for flannel subnet.env file presence
wait_for:
path: /run/flannel/subnet.env
delay: 5
timeout: 600

View file

@ -1,8 +1,14 @@
---
dependencies:
- role: kubernetes-apps/network_plugin/canal
when: kube_network_plugin == 'canal'
tags: canal
- role: kubernetes-apps/network_plugin/weave
when: kube_network_plugin == 'weave'
tags: weave
- role: kubernetes-apps/network_plugin/calico
when: kube_network_plugin == 'calico'
tags: calico
- role: kubernetes-apps/network_plugin/canal
when: kube_network_plugin == 'canal'
tags: canal
- role: kubernetes-apps/network_plugin/flannel
when: kube_network_plugin == 'flannel'
tags: flannel
- role: kubernetes-apps/network_plugin/weave
when: kube_network_plugin == 'weave'
tags: weave

View file

@ -1,4 +1,5 @@
#FIXME: remove if kubernetes/features#124 is implemented
---
# FIXME: remove if kubernetes/features#124 is implemented
- name: Weave | Purge old weave daemonset
kube:
name: "weave-net"
@ -9,7 +10,6 @@
state: absent
when: inventory_hostname == groups['kube-master'][0] and weave_manifest.changed
- name: Weave | Start Resources
kube:
name: "weave-net"
@ -17,11 +17,9 @@
filename: "{{ kube_config_dir }}/weave-net.yml"
resource: "ds"
namespace: "{{system_namespace}}"
state: "{{ item | ternary('latest','present') }}"
with_items: "{{ weave_manifest.changed }}"
state: "latest"
when: inventory_hostname == groups['kube-master'][0]
- name: "Weave | wait for weave to become available"
uri:
url: http://127.0.0.1:6784/status

View file

@ -1,3 +1,4 @@
---
# Limits for calico apps
calico_policy_controller_cpu_limit: 100m
calico_policy_controller_memory_limit: 256M
@ -7,3 +8,8 @@ calico_policy_controller_memory_requests: 64M
# SSL
calico_cert_dir: "/etc/calico/certs"
canal_cert_dir: "/etc/canal/certs"
rbac_resources:
- sa
- clusterrole
- clusterrolebinding

View file

@ -1,21 +1,49 @@
- set_fact:
---
- name: Set cert dir
set_fact:
calico_cert_dir: "{{ canal_cert_dir }}"
when: kube_network_plugin == 'canal'
tags: [facts, canal]
- name: Write calico-policy-controller yaml
- name: Get calico-policy-controller version if running
shell: "{{ bin_dir }}/kubectl -n {{ system_namespace }} get rs calico-policy-controller -o=jsonpath='{$.spec.template.spec.containers[:1].image}' | cut -d':' -f2"
register: existing_calico_policy_version
run_once: true
failed_when: false
# FIXME(mattymo): This should not be necessary
- name: Delete calico-policy-controller if an old one is installed
kube:
name: calico-policy-controller
kubectl: "{{bin_dir}}/kubectl"
resource: rs
namespace: "{{ system_namespace }}"
state: absent
run_once: true
when:
- not "NotFound" in existing_calico_policy_version.stderr
- existing_calico_policy_version.stdout | version_compare('v0.7.0', '<')
- name: Create calico-policy-controller manifests
template:
src: calico-policy-controller.yml.j2
dest: "{{kube_config_dir}}/calico-policy-controller.yml"
when: inventory_hostname == groups['kube-master'][0]
tags: canal
src: "{{item.file}}.j2"
dest: "{{kube_config_dir}}/{{item.file}}"
with_items:
- {name: calico-policy-controller, file: calico-policy-controller.yml, type: rs}
- {name: calico-policy-controller, file: calico-policy-sa.yml, type: sa}
- {name: calico-policy-controller, file: calico-policy-cr.yml, type: clusterrole}
- {name: calico-policy-controller, file: calico-policy-crb.yml, type: clusterrolebinding}
register: calico_policy_manifests
when:
- rbac_enabled or item.type not in rbac_resources
- name: Start of Calico policy controller
kube:
name: "calico-policy-controller"
name: "{{item.item.name}}"
namespace: "{{ system_namespace }}"
kubectl: "{{bin_dir}}/kubectl"
filename: "{{kube_config_dir}}/calico-policy-controller.yml"
namespace: "{{system_namespace}}"
resource: "rs"
when: inventory_hostname == groups['kube-master'][0]
tags: canal
resource: "{{item.item.type}}"
filename: "{{kube_config_dir}}/{{item.item.file}}"
state: "latest"
with_items: "{{ calico_policy_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0] and not item|skipped

View file

@ -15,12 +15,18 @@ spec:
template:
metadata:
name: calico-policy-controller
namespace: {{system_namespace}}
namespace: {{ system_namespace }}
labels:
kubernetes.io/cluster-service: "true"
k8s-app: calico-policy
spec:
hostNetwork: true
{% if rbac_enabled %}
serviceAccountName: calico-policy-controller
{% endif %}
tolerations:
- effect: NoSchedule
operator: Exists
containers:
- name: calico-policy-controller
image: {{ calico_policy_image_repo }}:{{ calico_policy_image_tag }}

View file

@ -0,0 +1,17 @@
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-policy-controller
namespace: {{ system_namespace }}
rules:
- apiGroups:
- ""
- extensions
resources:
- pods
- namespaces
- networkpolicies
verbs:
- watch
- list

View file

@ -0,0 +1,13 @@
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-policy-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-policy-controller
subjects:
- kind: ServiceAccount
name: calico-policy-controller
namespace: {{ system_namespace }}

Some files were not shown because too many files have changed in this diff Show more