diff --git a/.gitignore b/.gitignore index 8d5d5088b..4df491aa1 100644 --- a/.gitignore +++ b/.gitignore @@ -24,6 +24,7 @@ __pycache__/ .Python env/ build/ +credentials/ develop-eggs/ dist/ downloads/ diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 948ef2983..7080c7c67 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -18,10 +18,7 @@ variables: # us-west1-a before_script: - - pip install ansible==2.3.0 - - pip install netaddr - - pip install apache-libcloud==0.20.1 - - pip install boto==2.9.0 + - pip install -r tests/requirements.txt - mkdir -p /.ssh - cp tests/ansible.cfg . @@ -75,10 +72,7 @@ before_script: - $HOME/.cache before_script: - docker info - - pip install ansible==2.3.0 - - pip install netaddr - - pip install apache-libcloud==0.20.1 - - pip install boto==2.9.0 + - pip install -r tests/requirements.txt - mkdir -p /.ssh - mkdir -p $HOME/.ssh - echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa @@ -265,6 +259,7 @@ before_script: # Test matrix. Leave the comments for markup scripts. .coreos_calico_sep_variables: &coreos_calico_sep_variables # stage: deploy-gce-part1 + AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }" KUBE_NETWORK_PLUGIN: calico CLOUD_IMAGE: coreos-stable-1465-6-0-v20170817 CLOUD_REGION: us-west1-b @@ -275,9 +270,10 @@ before_script: ##User-data to simply turn off coreos upgrades STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd' -.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables +.ubuntu_canal_ha_rbac_variables: &ubuntu_canal_ha_rbac_variables # stage: deploy-gce-part1 KUBE_NETWORK_PLUGIN: canal + AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }" CLOUD_IMAGE: ubuntu-1604-xenial CLOUD_REGION: europe-west1-b CLUSTER_MODE: ha @@ -370,6 +366,8 @@ before_script: .ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables # stage: deploy-gce-part1 + AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }" + CLOUD_MACHINE_TYPE: "n1-standard-2" KUBE_NETWORK_PLUGIN: canal CERT_MGMT: vault CLOUD_IMAGE: ubuntu-1604-xenial @@ -451,24 +449,24 @@ ubuntu-weave-sep-triggers: only: ['triggers'] # More builds for PRs/merges (manual) and triggers (auto) -ubuntu-canal-ha: +ubuntu-canal-ha-rbac: stage: deploy-gce-part1 <<: *job <<: *gce variables: <<: *gce_variables - <<: *ubuntu_canal_ha_variables + <<: *ubuntu_canal_ha_rbac_variables when: manual except: ['triggers'] only: ['master', /^pr-.*$/] -ubuntu-canal-ha-triggers: +ubuntu-canal-ha-rbac-triggers: stage: deploy-gce-part1 <<: *job <<: *gce variables: <<: *gce_variables - <<: *ubuntu_canal_ha_variables + <<: *ubuntu_canal_ha_rbac_variables when: on_success only: ['triggers'] @@ -642,6 +640,13 @@ syntax-check: - ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root extra_playbooks/upgrade-only-k8s.yml -vvv --syntax-check except: ['triggers', 'master'] +yamllint: + <<: *job + stage: unit-tests + script: + - yamllint roles + except: ['triggers', 'master'] + tox-inventory-builder: stage: unit-tests <<: *job diff --git a/.yamllint b/.yamllint new file mode 100644 index 000000000..50e7b167e --- /dev/null +++ b/.yamllint @@ -0,0 +1,16 @@ +--- +extends: default + +rules: + braces: + min-spaces-inside: 0 + max-spaces-inside: 1 + brackets: + min-spaces-inside: 0 + max-spaces-inside: 1 + indentation: + spaces: 2 + indent-sequences: consistent + line-length: disable + new-line-at-end-of-file: disable + truthy: disable diff --git a/README.md b/README.md index bb49cd041..641f783d7 100644 --- a/README.md +++ b/README.md @@ -53,13 +53,13 @@ Versions of supported components -------------------------------- -[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.6.7
-[etcd](https://github.com/coreos/etcd/releases) v3.0.17
+[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.7.3
+[etcd](https://github.com/coreos/etcd/releases) v3.2.4
[flanneld](https://github.com/coreos/flannel/releases) v0.8.0
-[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.23.0
+[calico](https://docs.projectcalico.org/v2.5/releases/) v2.5.0
[canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
[weave](http://weave.works/) v2.0.1
-[docker](https://www.docker.com/) v1.13.1 (see note)
+[docker](https://www.docker.com/) v1.13 (see note)
[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin). diff --git a/contrib/terraform/aws/README.md b/contrib/terraform/aws/README.md index 451fc58a7..d0d63f7e3 100644 --- a/contrib/terraform/aws/README.md +++ b/contrib/terraform/aws/README.md @@ -25,16 +25,29 @@ export AWS_DEFAULT_REGION="zzz" - Rename `contrib/terraform/aws/terraform.tfvars.example` to `terraform.tfvars` - Update `contrib/terraform/aws/terraform.tfvars` with your data - - Allocate new AWS Elastic IPs: Depending on # of Availability Zones used (2 for each AZ) - - Create an AWS EC2 SSH Key - - +- Allocate a new AWS Elastic IP. Use this for your `loadbalancer_apiserver_address` value (below) +- Create an AWS EC2 SSH Key - Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials +Example: +```commandline +terraform apply -var-file=credentials.tfvars -var 'loadbalancer_apiserver_address=34.212.228.77' +``` + - Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory` +- Ansible will automatically generate an ssh config file for your bastion hosts. To make use of it, make sure you have a line in your `ansible.cfg` file that looks like the following: +```commandline +ssh_args = -F ./ssh-bastion.conf -o ControlMaster=auto -o ControlPersist=30m +``` + - Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag. +Example (this one assumes you are using CoreOS) +```commandline +ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_ssh_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache +``` + **Troubleshooting** ***Remaining AWS IAM Instance Profile***: diff --git a/contrib/terraform/aws/create-infrastructure.tf b/contrib/terraform/aws/create-infrastructure.tf index a58bca53c..04c5a8881 100644 --- a/contrib/terraform/aws/create-infrastructure.tf +++ b/contrib/terraform/aws/create-infrastructure.tf @@ -162,7 +162,7 @@ resource "aws_instance" "k8s-worker" { */ data "template_file" "inventory" { template = "${file("${path.module}/templates/inventory.tpl")}" - + vars { public_ip_address_bastion = "${join("\n",formatlist("bastion ansible_ssh_host=%s" , aws_instance.bastion-server.*.public_ip))}" connection_strings_master = "${join("\n",formatlist("%s ansible_ssh_host=%s",aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}" @@ -173,9 +173,9 @@ data "template_file" "inventory" { list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}" elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\"" elb_api_port = "loadbalancer_apiserver.port=${var.aws_elb_api_port}" - kube_insecure_apiserver_address = "kube_apiserver_insecure_bind_address: ${var.kube_insecure_apiserver_address}" - + loadbalancer_apiserver_address = "loadbalancer_apiserver.address=${var.loadbalancer_apiserver_address}" } + } resource "null_resource" "inventories" { @@ -183,4 +183,8 @@ resource "null_resource" "inventories" { command = "echo '${data.template_file.inventory.rendered}' > ../../../inventory/hosts" } + triggers { + template = "${data.template_file.inventory.rendered}" + } + } diff --git a/contrib/terraform/aws/templates/inventory.tpl b/contrib/terraform/aws/templates/inventory.tpl index 8d5afd1cf..dd8126002 100644 --- a/contrib/terraform/aws/templates/inventory.tpl +++ b/contrib/terraform/aws/templates/inventory.tpl @@ -25,4 +25,4 @@ kube-master [k8s-cluster:vars] ${elb_api_fqdn} ${elb_api_port} -${kube_insecure_apiserver_address} +${loadbalancer_apiserver_address} diff --git a/contrib/terraform/aws/terraform.tfvars b/contrib/terraform/aws/terraform.tfvars index a538d46f3..bc83a719d 100644 --- a/contrib/terraform/aws/terraform.tfvars +++ b/contrib/terraform/aws/terraform.tfvars @@ -5,11 +5,11 @@ aws_cluster_name = "devtest" aws_vpc_cidr_block = "10.250.192.0/18" aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"] aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"] -aws_avail_zones = ["eu-central-1a","eu-central-1b"] +aws_avail_zones = ["us-west-2a","us-west-2b"] #Bastion Host -aws_bastion_ami = "ami-5900cc36" -aws_bastion_size = "t2.small" +aws_bastion_ami = "ami-db56b9a3" +aws_bastion_size = "t2.medium" #Kubernetes Cluster @@ -23,9 +23,10 @@ aws_etcd_size = "t2.medium" aws_kube_worker_num = 4 aws_kube_worker_size = "t2.medium" -aws_cluster_ami = "ami-903df7ff" +aws_cluster_ami = "ami-db56b9a3" #Settings AWS ELB -aws_elb_api_port = 443 -k8s_secure_api_port = 443 +aws_elb_api_port = 6443 +k8s_secure_api_port = 6443 +kube_insecure_apiserver_address = "0.0.0.0" diff --git a/contrib/terraform/aws/variables.tf b/contrib/terraform/aws/variables.tf index c740e6472..c7c65c772 100644 --- a/contrib/terraform/aws/variables.tf +++ b/contrib/terraform/aws/variables.tf @@ -96,6 +96,6 @@ variable "k8s_secure_api_port" { description = "Secure Port of K8S API Server" } -variable "kube_insecure_apiserver_address" { - description= "Bind Address for insecure Port of K8s API Server" +variable "loadbalancer_apiserver_address" { + description= "Bind Address for ELB of K8s API Server" } diff --git a/docs/flannel.md b/docs/flannel.md index 307eab56c..06351538d 100644 --- a/docs/flannel.md +++ b/docs/flannel.md @@ -23,13 +23,6 @@ ip a show dev flannel.1 valid_lft forever preferred_lft forever ``` -* Docker must be configured with a bridge ip in the flannel subnet. - -``` -ps aux | grep docker -root 20196 1.7 2.7 1260616 56840 ? Ssl 10:18 0:07 /usr/bin/docker daemon --bip=10.233.16.1/24 --mtu=1450 -``` - * Try to run a container and check its ip address ``` diff --git a/docs/getting-started.md b/docs/getting-started.md index 25bcbfaad..65b590a2f 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -57,7 +57,7 @@ ansible-playbook -i my_inventory/inventory.cfg cluster.yml -b -v \ See more details in the [ansible guide](ansible.md). Adding nodes --------------------------- +------------ You may want to add worker nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters. @@ -66,4 +66,38 @@ You may want to add worker nodes to your existing cluster. This can be done by r ``` ansible-playbook -i my_inventory/inventory.cfg scale.yml -b -v \ --private-key=~/.ssh/private_key -``` \ No newline at end of file +``` + +Connecting to Kubernetes +------------------------ +By default, Kubespray configures kube-master hosts with insecure access to +kube-apiserver via port 8080. A kubeconfig file is not necessary in this case, +because kubectl will use http://localhost:8080 to connect. The kubeconfig files +generated will point to localhost (on kube-masters) and kube-node hosts will +connect either to a localhost nginx proxy or to a loadbalancer if configured. +More details on this process is in the [HA guide](ha.md). + +Kubespray permits connecting to the cluster remotely on any IP of any +kube-master host on port 6443 by default. However, this requires +authentication. One could generate a kubeconfig based on one installed +kube-master hosts (needs improvement) or connect with a username and password. +By default, a user with admin rights is created, named `kube`. +The password can be viewed after deployment by looking at the file +`PATH_TO_KUBESPRAY/credentials/kube_user`. This contains a randomly generated +password. If you wish to set your own password, just precreate/modify this +file yourself. + +For more information on kubeconfig and accessing a Kubernetes cluster, refer to +the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/). + +Accessing Kubernetes Dashboard +------------------------------ + +If the variable `dashboard_enabled` is set (default is true), then you can +access the Kubernetes Dashboard at the following URL: + + https://kube:_kube-password_@_host_:6443/ui/ + +To see the password, refer to the section above, titled *Connecting to +Kubernetes*. The host can be any kube-master or kube-node or loadbalancer +(when enabled). diff --git a/docs/vars.md b/docs/vars.md index f50197832..b2b66d3c3 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -67,6 +67,8 @@ following default cluster paramters: OpenStack (default is unset) * *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in Kubernetes +* *kube_feature_gates* - A list of key=value pairs that describe feature gates for + alpha/experimental Kubernetes features. (defaults is `[]`) * *authorization_modes* - A list of [authorization mode]( https://kubernetes.io/docs/admin/authorization/#using-flags-for-your-authorization-module) that the cluster should be configured for. Defaults to `[]` (i.e. no authorization). diff --git a/docs/vault.md b/docs/vault.md index 3850d04b5..056d76356 100644 --- a/docs/vault.md +++ b/docs/vault.md @@ -26,7 +26,6 @@ first task, is to stop any temporary instances of Vault, to free the port for the long-term. At the end of this task, the entire Vault cluster should be up and read to go. - Keys to the Kingdom ------------------- @@ -44,30 +43,38 @@ to authenticate to almost everything in Kubernetes and decode all private (HTTPS) traffic on your network signed by Vault certificates. For even greater security, you may want to remove and store elsewhere any -CA keys generated as well (e.g. /etc/vault/ssl/ca-key.pem). +CA keys generated as well (e.g. /etc/vault/ssl/ca-key.pem). Vault by default encrypts all traffic to and from the datastore backend, all resting data, and uses TLS for its TCP listener. It is recommended that you do not change the Vault config to disable TLS, unless you absolutely have to. - Usage ----- To get the Vault role running, you must to do two things at a minimum: 1. Assign the ``vault`` group to at least 1 node in your inventory -2. Change ``cert_management`` to be ``vault`` instead of ``script`` +1. Change ``cert_management`` to be ``vault`` instead of ``script`` Nothing else is required, but customization is possible. Check ``roles/vault/defaults/main.yml`` for the different variables that can be overridden, most common being ``vault_config``, ``vault_port``, and ``vault_deployment_type``. -Also, if you intend to use a Root or Intermediate CA generated elsewhere, -you'll need to copy the certificate and key to the hosts in the vault group -prior to running the vault role. By default, they'll be located at -``/etc/vault/ssl/ca.pem`` and ``/etc/vault/ssl/ca-key.pem``, respectively. +As a result of the Vault role will be create separated Root CA for `etcd`, +`kubernetes` and `vault`. Also, if you intend to use a Root or Intermediate CA +generated elsewhere, you'll need to copy the certificate and key to the hosts in the vault group prior to running the vault role. By default, they'll be located at: + +* vault: + * ``/etc/vault/ssl/ca.pem`` + * ``/etc/vault/ssl/ca-key.pem`` +* etcd: + * ``/etc/ssl/etcd/ssl/ca.pem`` + * ``/etc/ssl/etcd/ssl/ca-key.pem`` +* kubernetes: + * ``/etc/kubernetes/ssl/ca.pem`` + * ``/etc/kubernetes/ssl/ca-key.pem`` Additional Notes: @@ -77,7 +84,6 @@ Additional Notes: credentials are saved to ``/etc/vault/roles//``. The service will need to read in those credentials, if they want to interact with Vault. - Potential Work -------------- @@ -87,6 +93,3 @@ Potential Work - Add the ability to start temp Vault with Host, Rkt, or Docker - Add a dynamic way to change out the backend role creation during Bootstrap, so other services can be used (such as Consul) -- Segregate Server Cert generation from Auth Cert generation (separate CAs). - This work was partially started with the `auth_cert_backend` tasks, but would - need to be further applied to all roles (particularly Etcd and Kubernetes). diff --git a/inventory/group_vars/k8s-cluster.yml b/inventory/group_vars/k8s-cluster.yml index b70cd6766..fbb1a34e5 100644 --- a/inventory/group_vars/k8s-cluster.yml +++ b/inventory/group_vars/k8s-cluster.yml @@ -40,23 +40,18 @@ kube_log_level: 2 # Users to create for basic auth in Kubernetes API via HTTP # Optionally add groups for user -kube_api_pwd: "changeme" +kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}" kube_users: kube: pass: "{{kube_api_pwd}}" role: admin - root: - pass: "{{kube_api_pwd}}" - role: admin - # groups: - # - system:masters - - + groups: + - system:masters ## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth) #kube_oidc_auth: false -#kube_basic_auth: false -#kube_token_auth: false +#kube_basic_auth: true +#kube_token_auth: true ## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ @@ -148,6 +143,9 @@ vault_deployment_type: docker # K8s image pull policy (imagePullPolicy) k8s_image_pull_policy: IfNotPresent +# Kubernetes dashboard (available at http://first_master:6443/ui by default) +dashboard_enabled: true + # Monitoring apps for k8s efk_enabled: false diff --git a/library/kube.py b/library/kube.py index fdc783fff..52f6a235d 100644 --- a/library/kube.py +++ b/library/kube.py @@ -139,7 +139,7 @@ class KubeManager(object): if check and self.exists(): return [] - cmd = ['create'] + cmd = ['apply'] if not self.filename: self.module.fail_json(msg='filename required to create') @@ -150,10 +150,7 @@ class KubeManager(object): def replace(self): - if not self.force and not self.exists(): - return [] - - cmd = ['replace'] + cmd = ['apply'] if self.force: cmd.append('--force') @@ -270,9 +267,8 @@ def main(): manager = KubeManager(module) state = module.params.get('state') - if state == 'present': - result = manager.create() + result = manager.create(check=False) elif state == 'absent': result = manager.delete() @@ -284,11 +280,7 @@ def main(): result = manager.stop() elif state == 'latest': - if manager.exists(): - manager.force = True - result = manager.replace() - else: - result = manager.create(check=False) + result = manager.replace() else: module.fail_json(msg='Unrecognized state %s.' % state) diff --git a/roles/bastion-ssh-config/templates/ssh-bastion.conf b/roles/bastion-ssh-config/templates/ssh-bastion.conf index ebb380665..a6a5bc592 100644 --- a/roles/bastion-ssh-config/templates/ssh-bastion.conf +++ b/roles/bastion-ssh-config/templates/ssh-bastion.conf @@ -16,6 +16,6 @@ Host {{ bastion_ip }} ControlPersist 5m Host {{ vars['hosts'] }} - ProxyCommand ssh -W %h:%p {{ real_user }}@{{ bastion_ip }} + ProxyCommand ssh -W %h:%p {{ real_user }}@{{ bastion_ip }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %} StrictHostKeyChecking no {% endif %} diff --git a/roles/bootstrap-os/tasks/bootstrap-coreos.yml b/roles/bootstrap-os/tasks/bootstrap-coreos.yml index 892da1c04..2a2271055 100644 --- a/roles/bootstrap-os/tasks/bootstrap-coreos.yml +++ b/roles/bootstrap-os/tasks/bootstrap-coreos.yml @@ -49,4 +49,3 @@ pip: name: "{{ item }}" with_items: "{{pip_python_modules}}" - diff --git a/roles/bootstrap-os/tasks/main.yml b/roles/bootstrap-os/tasks/main.yml index 73268031e..5e1cdbc03 100644 --- a/roles/bootstrap-os/tasks/main.yml +++ b/roles/bootstrap-os/tasks/main.yml @@ -21,10 +21,20 @@ - name: Gather nodes hostnames setup: gather_subset: '!all' - filter: ansible_hostname + filter: ansible_* -- name: Assign inventory name to unconfigured hostnames +- name: Assign inventory name to unconfigured hostnames (non-CoreOS) hostname: name: "{{inventory_hostname}}" - when: ansible_hostname == 'localhost' + when: ansible_os_family not in ['CoreOS', 'Container Linux by CoreOS'] +- name: Assign inventory name to unconfigured hostnames (CoreOS only) + command: "hostnamectl set-hostname {{inventory_hostname}}" + register: hostname_changed + when: ansible_hostname == 'localhost' and ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] + +- name: Update hostname fact (CoreOS only) + setup: + gather_subset: '!all' + filter: ansible_hostname + when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] and hostname_changed.changed diff --git a/roles/bootstrap-os/tasks/setup-pipelining.yml b/roles/bootstrap-os/tasks/setup-pipelining.yml index 7143f260e..559cef25e 100644 --- a/roles/bootstrap-os/tasks/setup-pipelining.yml +++ b/roles/bootstrap-os/tasks/setup-pipelining.yml @@ -6,4 +6,3 @@ regexp: '^\w+\s+requiretty' dest: /etc/sudoers state: absent - diff --git a/roles/dnsmasq/defaults/main.yml b/roles/dnsmasq/defaults/main.yml index bf670c788..15fb7f169 100644 --- a/roles/dnsmasq/defaults/main.yml +++ b/roles/dnsmasq/defaults/main.yml @@ -4,12 +4,12 @@ # Max of 4 names is allowed and no more than 256 - 17 chars total # (a 2 is reserved for the 'default.svc.' and'svc.') -#searchdomains: -# - foo.bar.lc +# searchdomains: +# - foo.bar.lc # Max of 2 is allowed here (a 1 is reserved for the dns_server) -#nameservers: -# - 127.0.0.1 +# nameservers: +# - 127.0.0.1 dns_forward_max: 150 cache_size: 1000 diff --git a/roles/dnsmasq/tasks/main.yml b/roles/dnsmasq/tasks/main.yml index edc50703d..4a9031013 100644 --- a/roles/dnsmasq/tasks/main.yml +++ b/roles/dnsmasq/tasks/main.yml @@ -1,6 +1,4 @@ --- -- include: pre_upgrade.yml - - name: ensure dnsmasq.d directory exists file: path: /etc/dnsmasq.d @@ -56,6 +54,26 @@ dest: /etc/dnsmasq.d/01-kube-dns.conf state: link +- name: Create dnsmasq RBAC manifests + template: + src: "{{ item }}" + dest: "{{ kube_config_dir }}/{{ item }}" + with_items: + - "dnsmasq-clusterrolebinding.yml" + - "dnsmasq-serviceaccount.yml" + when: rbac_enabled + delegate_to: "{{ groups['kube-master'][0] }}" + run_once: true + +- name: Apply dnsmasq RBAC manifests + command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }}" + with_items: + - "dnsmasq-clusterrolebinding.yml" + - "dnsmasq-serviceaccount.yml" + when: rbac_enabled + delegate_to: "{{ groups['kube-master'][0] }}" + run_once: true + - name: Create dnsmasq manifests template: src: "{{item.file}}" @@ -63,7 +81,7 @@ with_items: - {name: dnsmasq, file: dnsmasq-deploy.yml, type: deployment} - {name: dnsmasq, file: dnsmasq-svc.yml, type: svc} - - {name: dnsmasq-autoscaler, file: dnsmasq-autoscaler.yml, type: deployment} + - {name: dnsmasq-autoscaler, file: dnsmasq-autoscaler.yml.j2, type: deployment} register: manifests delegate_to: "{{ groups['kube-master'][0] }}" run_once: true @@ -75,7 +93,7 @@ kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" - state: "{{item.changed | ternary('latest','present') }}" + state: "latest" with_items: "{{ manifests.results }}" delegate_to: "{{ groups['kube-master'][0] }}" run_once: true @@ -86,4 +104,3 @@ port: 53 timeout: 180 when: inventory_hostname == groups['kube-node'][0] and groups['kube-node'][0] in ansible_play_hosts - diff --git a/roles/dnsmasq/tasks/pre_upgrade.yml b/roles/dnsmasq/tasks/pre_upgrade.yml deleted file mode 100644 index 9d1517580..000000000 --- a/roles/dnsmasq/tasks/pre_upgrade.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: Delete legacy dnsmasq daemonset - kube: - name: dnsmasq - namespace: "{{system_namespace}}" - kubectl: "{{bin_dir}}/kubectl" - resource: "ds" - state: absent - when: inventory_hostname == groups['kube-master'][0] diff --git a/roles/dnsmasq/templates/dnsmasq-autoscaler.yml b/roles/dnsmasq/templates/dnsmasq-autoscaler.yml.j2 similarity index 55% rename from roles/dnsmasq/templates/dnsmasq-autoscaler.yml rename to roles/dnsmasq/templates/dnsmasq-autoscaler.yml.j2 index 4e5e2ddcc..d9e7b10f3 100644 --- a/roles/dnsmasq/templates/dnsmasq-autoscaler.yml +++ b/roles/dnsmasq/templates/dnsmasq-autoscaler.yml.j2 @@ -1,3 +1,4 @@ +--- # Copyright 2016 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -30,21 +31,26 @@ spec: scheduler.alpha.kubernetes.io/critical-pod: '' scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' spec: +{% if rbac_enabled %} + serviceAccountName: dnsmasq +{% endif %} + tolerations: + - effect: NoSchedule + operator: Exists containers: - - name: autoscaler - image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1 - resources: + - name: autoscaler + image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1 + resources: requests: - cpu: "20m" - memory: "10Mi" - command: - - /cluster-proportional-autoscaler - - --namespace=kube-system - - --configmap=dnsmasq-autoscaler - - --target=Deployment/dnsmasq - # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. - # If using small nodes, "nodesPerReplica" should dominate. - - --default-params={"linear":{"nodesPerReplica":{{ dnsmasq_nodes_per_replica }},"preventSinglePointFailure":true}} - - --logtostderr=true - - --v={{ kube_log_level }} - + cpu: "20m" + memory: "10Mi" + command: + - /cluster-proportional-autoscaler + - --namespace=kube-system + - --configmap=dnsmasq-autoscaler + - --target=Deployment/dnsmasq + # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. + # If using small nodes, "nodesPerReplica" should dominate. + - --default-params={"linear":{"nodesPerReplica":{{ dnsmasq_nodes_per_replica }},"preventSinglePointFailure":true}} + - --logtostderr=true + - --v={{ kube_log_level }} diff --git a/roles/dnsmasq/templates/dnsmasq-clusterrolebinding.yml b/roles/dnsmasq/templates/dnsmasq-clusterrolebinding.yml new file mode 100644 index 000000000..817de877b --- /dev/null +++ b/roles/dnsmasq/templates/dnsmasq-clusterrolebinding.yml @@ -0,0 +1,14 @@ +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: dnsmasq + namespace: "{{ system_namespace }}" +subjects: + - kind: ServiceAccount + name: dnsmasq + namespace: "{{ system_namespace}}" +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io diff --git a/roles/dnsmasq/templates/dnsmasq-deploy.yml b/roles/dnsmasq/templates/dnsmasq-deploy.yml index e811e1995..838471050 100644 --- a/roles/dnsmasq/templates/dnsmasq-deploy.yml +++ b/roles/dnsmasq/templates/dnsmasq-deploy.yml @@ -21,6 +21,9 @@ spec: kubernetes.io/cluster-service: "true" kubespray/dnsmasq-checksum: "{{ dnsmasq_stat.stat.checksum }}" spec: + tolerations: + - effect: NoSchedule + operator: Exists containers: - name: dnsmasq image: "{{ dnsmasq_image_repo }}:{{ dnsmasq_image_tag }}" @@ -35,7 +38,6 @@ spec: capabilities: add: - NET_ADMIN - imagePullPolicy: IfNotPresent resources: limits: cpu: {{ dns_cpu_limit }} @@ -55,7 +57,6 @@ spec: mountPath: /etc/dnsmasq.d - name: etcdnsmasqdavailable mountPath: /etc/dnsmasq.d-available - volumes: - name: etcdnsmasqd hostPath: @@ -64,4 +65,3 @@ spec: hostPath: path: /etc/dnsmasq.d-available dnsPolicy: Default # Don't use cluster DNS. - diff --git a/roles/dnsmasq/templates/dnsmasq-serviceaccount.yml b/roles/dnsmasq/templates/dnsmasq-serviceaccount.yml new file mode 100644 index 000000000..bce8a232f --- /dev/null +++ b/roles/dnsmasq/templates/dnsmasq-serviceaccount.yml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: dnsmasq + namespace: "{{ system_namespace }}" + labels: + kubernetes.io/cluster-service: "true" diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index e262d908a..9da348cda 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -1,3 +1,4 @@ +--- docker_version: '1.13' docker_package_info: @@ -10,3 +11,6 @@ docker_repo_info: repos: docker_dns_servers_strict: yes + +docker_rh_repo_base_url: 'https://yum.dockerproject.org/repo/main/centos/7' +docker_rh_repo_gpgkey: 'https://yum.dockerproject.org/gpg' diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml index 90d7aacb8..a43d843ee 100644 --- a/roles/docker/handlers/main.yml +++ b/roles/docker/handlers/main.yml @@ -8,7 +8,7 @@ - Docker | pause while Docker restarts - Docker | wait for docker -- name : Docker | reload systemd +- name: Docker | reload systemd shell: systemctl daemon-reload - name: Docker | reload docker.socket diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 09240bf9d..ef7e7fe8d 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -3,14 +3,14 @@ include_vars: "{{ item }}" with_first_found: - files: - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}.yml" - - "{{ ansible_os_family|lower }}.yml" - - defaults.yml + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}.yml" + - "{{ ansible_os_family|lower }}.yml" + - defaults.yml paths: - - ../vars + - ../vars skip: true tags: facts diff --git a/roles/docker/tasks/set_facts_dns.yml b/roles/docker/tasks/set_facts_dns.yml index 64a09bff2..13f342ea9 100644 --- a/roles/docker/tasks/set_facts_dns.yml +++ b/roles/docker/tasks/set_facts_dns.yml @@ -48,7 +48,7 @@ - name: add system search domains to docker options set_fact: docker_dns_search_domains: "{{ docker_dns_search_domains | union(system_search_domains.stdout.split(' ')|default([])) | unique }}" - when: system_search_domains.stdout != "" + when: system_search_domains.stdout != "" - name: check number of nameservers fail: diff --git a/roles/docker/tasks/systemd.yml b/roles/docker/tasks/systemd.yml index 1275de5d7..ec4bbf9ab 100644 --- a/roles/docker/tasks/systemd.yml +++ b/roles/docker/tasks/systemd.yml @@ -10,11 +10,18 @@ dest: /etc/systemd/system/docker.service.d/http-proxy.conf when: http_proxy is defined or https_proxy is defined or no_proxy is defined +- name: get systemd version + command: rpm -q --qf '%{V}\n' systemd + register: systemd_version + when: ansible_os_family == "RedHat" and not is_atomic + changed_when: false + - name: Write docker.service systemd file template: src: docker.service.j2 dest: /etc/systemd/system/docker.service register: docker_service_file + notify: restart docker when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic) - name: Write docker.service systemd file for atomic diff --git a/roles/docker/templates/docker-options.conf.j2 b/roles/docker/templates/docker-options.conf.j2 index 3f54c853d..c70f3d89f 100644 --- a/roles/docker/templates/docker-options.conf.j2 +++ b/roles/docker/templates/docker-options.conf.j2 @@ -1,3 +1,3 @@ [Service] Environment="DOCKER_OPTS={{ docker_options | default('') }} \ ---iptables={% if kube_network_plugin == 'flannel' %}true{% else %}false{% endif %}" +--iptables=false" diff --git a/roles/docker/templates/docker.service.j2 b/roles/docker/templates/docker.service.j2 index 54e4b7c06..29a80c107 100644 --- a/roles/docker/templates/docker.service.j2 +++ b/roles/docker/templates/docker.service.j2 @@ -24,7 +24,9 @@ ExecStart={{ docker_bin_dir }}/docker daemon \ $DOCKER_NETWORK_OPTIONS \ $DOCKER_DNS_OPTIONS \ $INSECURE_REGISTRY +{% if ansible_os_family == "RedHat" and systemd_version.stdout|int >= 226 %} TasksMax=infinity +{% endif %} LimitNOFILE=1048576 LimitNPROC=1048576 LimitCORE=infinity diff --git a/roles/docker/templates/rh_docker.repo.j2 b/roles/docker/templates/rh_docker.repo.j2 index e783c0ddf..7cb728625 100644 --- a/roles/docker/templates/rh_docker.repo.j2 +++ b/roles/docker/templates/rh_docker.repo.j2 @@ -1,7 +1,7 @@ [dockerrepo] name=Docker Repository -baseurl=https://yum.dockerproject.org/repo/main/centos/7 +baseurl={{ docker_rh_repo_base_url }} enabled=1 gpgcheck=1 -gpgkey=https://yum.dockerproject.org/gpg +gpgkey={{ docker_rh_repo_gpgkey }} {% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %} diff --git a/roles/docker/vars/debian.yml b/roles/docker/vars/debian.yml index a4689ffbc..240e86ea4 100644 --- a/roles/docker/vars/debian.yml +++ b/roles/docker/vars/debian.yml @@ -1,3 +1,4 @@ +--- docker_kernel_min_version: '3.10' # https://apt.dockerproject.org/repo/dists/debian-wheezy/main/filelist diff --git a/roles/docker/vars/fedora-20.yml b/roles/docker/vars/fedora-20.yml index c74cd9f28..31d431ee8 100644 --- a/roles/docker/vars/fedora-20.yml +++ b/roles/docker/vars/fedora-20.yml @@ -1,3 +1,4 @@ +--- docker_kernel_min_version: '0' # versioning: docker-io itself is pinned at docker 1.5 diff --git a/roles/docker/vars/fedora.yml b/roles/docker/vars/fedora.yml index f89c90a52..b82e5fc30 100644 --- a/roles/docker/vars/fedora.yml +++ b/roles/docker/vars/fedora.yml @@ -1,3 +1,4 @@ +--- docker_kernel_min_version: '0' # https://docs.docker.com/engine/installation/linux/fedora/#install-from-a-package diff --git a/roles/docker/vars/redhat.yml b/roles/docker/vars/redhat.yml index 7abf2cda7..8b20def55 100644 --- a/roles/docker/vars/redhat.yml +++ b/roles/docker/vars/redhat.yml @@ -1,3 +1,4 @@ +--- docker_kernel_min_version: '0' # https://yum.dockerproject.org/repo/main/centos/7/Packages/ @@ -8,7 +9,7 @@ docker_versioned_pkg: '1.12': docker-engine-1.12.6-1.el7.centos '1.13': docker-engine-1.13.1-1.el7.centos 'stable': docker-engine-17.03.0.ce-1.el7.centos - 'edge': docker-engine-17.03.0.ce-1.el7.centos + 'edge': docker-engine-17.03.0.ce-1.el7.centos # https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package # https://download.docker.com/linux/centos/7/x86_64/stable/Packages/ diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index e5a4aa31b..d5c5ef7e4 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -20,20 +20,22 @@ download_always_pull: False # Versions kube_version: v1.7.3 etcd_version: v3.2.4 -#TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults +# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults # after migration to container download -calico_version: "v1.1.3" -calico_cni_version: "v1.8.0" -calico_policy_version: "v0.5.4" +calico_version: "v2.5.0" +calico_ctl_version: "v1.5.0" +calico_cni_version: "v1.10.0" +calico_policy_version: "v0.7.0" weave_version: 2.0.1 -flannel_version: v0.8.0 +flannel_version: "v0.8.0" +flannel_cni_version: "v0.2.0" pod_infra_version: 3.0 # Download URL's etcd_download_url: "https://storage.googleapis.com/kargo/{{etcd_version}}_etcd" # Checksums -etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485" +etcd_checksum: "274c46a7f8d26f7ae99d6880610f54933cbcf7f3beafa19236c52eb5df8c7a0b" # Containers # Possible values: host, docker @@ -42,13 +44,15 @@ etcd_image_repo: "quay.io/coreos/etcd" etcd_image_tag: "{{ etcd_version }}" flannel_image_repo: "quay.io/coreos/flannel" flannel_image_tag: "{{ flannel_version }}" -calicoctl_image_repo: "calico/ctl" -calicoctl_image_tag: "{{ calico_version }}" -calico_node_image_repo: "calico/node" +flannel_cni_image_repo: "quay.io/coreos/flannel-cni" +flannel_cni_image_tag: "{{ flannel_cni_version }}" +calicoctl_image_repo: "quay.io/calico/ctl" +calicoctl_image_tag: "{{ calico_ctl_version }}" +calico_node_image_repo: "quay.io/calico/node" calico_node_image_tag: "{{ calico_version }}" -calico_cni_image_repo: "calico/cni" +calico_cni_image_repo: "quay.io/calico/cni" calico_cni_image_tag: "{{ calico_cni_version }}" -calico_policy_image_repo: "calico/kube-policy-controller" +calico_policy_image_repo: "quay.io/calico/kube-policy-controller" calico_policy_image_tag: "{{ calico_policy_version }}" calico_rr_image_repo: "quay.io/calico/routereflector" calico_rr_image_tag: "v0.3.0" @@ -56,6 +60,8 @@ hyperkube_image_repo: "quay.io/coreos/hyperkube" hyperkube_image_tag: "{{ kube_version }}_coreos.0" pod_infra_image_repo: "gcr.io/google_containers/pause-amd64" pod_infra_image_tag: "{{ pod_infra_version }}" +install_socat_image_repo: "xueshanf/install-socat" +install_socat_image_tag: "latest" netcheck_version: "v1.0" netcheck_agent_img_repo: "quay.io/l23network/k8s-netchecker-agent" netcheck_agent_tag: "{{ netcheck_version }}" @@ -137,6 +143,12 @@ downloads: tag: "{{ flannel_image_tag }}" sha256: "{{ flannel_digest_checksum|default(None) }}" enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}" + flannel_cni: + container: true + repo: "{{ flannel_cni_image_repo }}" + tag: "{{ flannel_cni_image_tag }}" + sha256: "{{ flannel_cni_digest_checksum|default(None) }}" + enabled: "{{ kube_network_plugin == 'flannel' }}" calicoctl: container: true repo: "{{ calicoctl_image_repo }}" @@ -184,6 +196,11 @@ downloads: repo: "{{ pod_infra_image_repo }}" tag: "{{ pod_infra_image_tag }}" sha256: "{{ pod_infra_digest_checksum|default(None) }}" + install_socat: + container: true + repo: "{{ install_socat_image_repo }}" + tag: "{{ install_socat_image_tag }}" + sha256: "{{ install_socat_digest_checksum|default(None) }}" nginx: container: true repo: "{{ nginx_image_repo }}" diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml index 24d1b5bca..9fa0d7ca8 100644 --- a/roles/download/tasks/main.yml +++ b/roles/download/tasks/main.yml @@ -1,12 +1,5 @@ --- -- name: downloading... - debug: - msg: "{{ download.url }}" - when: - - download.enabled|bool - - not download.container|bool - -- name: Create dest directories +- name: file_download | Create dest directories file: path: "{{local_release_dir}}/{{download.dest|dirname}}" state: directory @@ -16,7 +9,7 @@ - not download.container|bool tags: bootstrap-os -- name: Download items +- name: file_download | Download item get_url: url: "{{download.url}}" dest: "{{local_release_dir}}/{{download.dest}}" @@ -31,7 +24,7 @@ - download.enabled|bool - not download.container|bool -- name: Extract archives +- name: file_download | Extract archives unarchive: src: "{{ local_release_dir }}/{{download.dest}}" dest: "{{ local_release_dir }}/{{download.dest|dirname}}" @@ -41,10 +34,9 @@ when: - download.enabled|bool - not download.container|bool - - download.unarchive is defined - - download.unarchive == True + - download.unarchive|default(False) -- name: Fix permissions +- name: file_download | Fix permissions file: state: file path: "{{local_release_dir}}/{{download.dest}}" @@ -56,10 +48,11 @@ - (download.unarchive is not defined or download.unarchive == False) - set_fact: - download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube-master'][0]}}{% endif %}" + download_delegate: "{% if download_localhost|bool %}localhost{% else %}{{groups['kube-master'][0]}}{% endif %}" + run_once: true tags: facts -- name: Create dest directory for saved/loaded container images +- name: container_download | Create dest directory for saved/loaded container images file: path: "{{local_release_dir}}/containers" state: directory @@ -72,15 +65,14 @@ tags: bootstrap-os # This is required for the download_localhost delegate to work smooth with Container Linux by CoreOS cluster nodes -- name: Hack python binary path for localhost +- name: container_download | Hack python binary path for localhost raw: sh -c "mkdir -p /opt/bin; ln -sf /usr/bin/python /opt/bin/python" - when: download_delegate == 'localhost' delegate_to: localhost + when: download_delegate == 'localhost' failed_when: false - run_once: true tags: localhost -- name: Download | create local directory for saved/loaded container images +- name: container_download | create local directory for saved/loaded container images file: path: "{{local_release_dir}}/containers" state: directory @@ -95,24 +87,16 @@ - download_delegate == 'localhost' tags: localhost -- name: Make download decision if pull is required by tag or sha256 +- name: container_download | Make download decision if pull is required by tag or sha256 include: set_docker_image_facts.yml when: - download.enabled|bool - download.container|bool - delegate_to: "{{ download_delegate if download_run_once|bool else inventory_hostname }}" + delegate_to: "{{ download_delegate if download_run_once|bool or omit }}" run_once: "{{ download_run_once|bool }}" tags: facts -- name: pulling... - debug: - msg: "{{ pull_args }}" - when: - - download.enabled|bool - - download.container|bool - -#NOTE(bogdando) this brings no docker-py deps for nodes -- name: Download containers if pull is required or told to always pull +- name: container_download | Download containers if pull is required or told to always pull command: "{{ docker_bin_dir }}/docker pull {{ pull_args }}" register: pull_task_result until: pull_task_result|succeeded @@ -122,29 +106,29 @@ - download.enabled|bool - download.container|bool - pull_required|bool|default(download_always_pull) - delegate_to: "{{ download_delegate if download_run_once|bool else inventory_hostname }}" + delegate_to: "{{ download_delegate if download_run_once|bool or omit }}" run_once: "{{ download_run_once|bool }}" - set_fact: fname: "{{local_release_dir}}/containers/{{download.repo|regex_replace('/|\0|:', '_')}}:{{download.tag|default(download.sha256)|regex_replace('/|\0|:', '_')}}.tar" + run_once: true tags: facts -- name: "Set default value for 'container_changed' to false" +- name: "container_download | Set default value for 'container_changed' to false" set_fact: container_changed: "{{pull_required|default(false)|bool}}" -- name: "Update the 'container_changed' fact" +- name: "container_download | Update the 'container_changed' fact" set_fact: container_changed: "{{ pull_required|bool|default(false) or not 'up to date' in pull_task_result.stdout }}" when: - download.enabled|bool - download.container|bool - pull_required|bool|default(download_always_pull) - delegate_to: "{{ download_delegate if download_run_once|bool else inventory_hostname }}" run_once: "{{ download_run_once|bool }}" tags: facts -- name: Stat saved container image +- name: container_download | Stat saved container image stat: path: "{{fname}}" register: img @@ -158,7 +142,7 @@ run_once: true tags: facts -- name: Download | save container images +- name: container_download | save container images shell: "{{ docker_bin_dir }}/docker save {{ pull_args }} | gzip -{{ download_compress }} > {{ fname }}" delegate_to: "{{ download_delegate }}" register: saved @@ -170,7 +154,7 @@ - download.container|bool - (container_changed|bool or not img.stat.exists) -- name: Download | copy container images to ansible host +- name: container_download | copy container images to ansible host synchronize: src: "{{ fname }}" dest: "{{ fname }}" @@ -186,7 +170,7 @@ - download.container|bool - saved.changed -- name: Download | upload container images to nodes +- name: container_download | upload container images to nodes synchronize: src: "{{ fname }}" dest: "{{ fname }}" @@ -206,7 +190,7 @@ - download.container|bool tags: [upload, upgrade] -- name: Download | load container images +- name: container_download | load container images shell: "{{ docker_bin_dir }}/docker load < {{ fname }}" when: - (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] and diff --git a/roles/download/tasks/set_docker_image_facts.yml b/roles/download/tasks/set_docker_image_facts.yml index 4ae81d954..832c076b1 100644 --- a/roles/download/tasks/set_docker_image_facts.yml +++ b/roles/download/tasks/set_docker_image_facts.yml @@ -9,25 +9,20 @@ - name: Register docker images info raw: >- - {{ docker_bin_dir }}/docker images -q | xargs {{ docker_bin_dir }}/docker inspect -f "{{ '{{' }} .RepoTags {{ '}}' }},{{ '{{' }} .RepoDigests {{ '}}' }}" + {{ docker_bin_dir }}/docker images -q | xargs {{ docker_bin_dir }}/docker inspect -f "{{ '{{' }} (index .RepoTags 0) {{ '}}' }},{{ '{{' }} (index .RepoDigests 0) {{ '}}' }}" | tr '\n' ',' no_log: true - register: docker_images_raw + register: docker_images failed_when: false changed_when: false check_mode: no when: not download_always_pull|bool -- set_fact: - docker_images: "{{docker_images_raw.stdout|regex_replace('\\[|\\]|\\n]','')|regex_replace('\\s',',')}}" - no_log: true - when: not download_always_pull|bool - - set_fact: pull_required: >- - {%- if pull_args in docker_images.split(',') %}false{%- else -%}true{%- endif -%} + {%- if pull_args in docker_images.stdout.split(',') %}false{%- else -%}true{%- endif -%} when: not download_always_pull|bool - name: Check the local digest sha256 corresponds to the given image tag assert: - that: "{{download.repo}}:{{download.tag}} in docker_images.split(',')" + that: "{{download.repo}}:{{download.tag}} in docker_images.stdout.split(',')" when: not download_always_pull|bool and not pull_required|bool and pull_by_digest|bool diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml index 7d1d976af..eb0cab951 100644 --- a/roles/etcd/defaults/main.yml +++ b/roles/etcd/defaults/main.yml @@ -21,8 +21,10 @@ etcd_metrics: "basic" etcd_memory_limit: 512M # Uncomment to set CPU share for etcd -#etcd_cpu_limit: 300m +# etcd_cpu_limit: 300m etcd_node_cert_hosts: "{{ groups['k8s-cluster'] | union(groups.get('calico-rr', [])) }}" etcd_compaction_retention: "8" + +etcd_vault_mount_path: etcd diff --git a/roles/etcd/handlers/backup.yml b/roles/etcd/handlers/backup.yml index 68fe71f07..9be90a5b1 100644 --- a/roles/etcd/handlers/backup.yml +++ b/roles/etcd/handlers/backup.yml @@ -5,6 +5,7 @@ - Refresh Time Fact - Set Backup Directory - Create Backup Directory + - Stat etcd v2 data directory - Backup etcd v2 data - Backup etcd v3 data when: etcd_cluster_is_healthy.rc == 0 @@ -24,7 +25,13 @@ group: root mode: 0600 +- name: Stat etcd v2 data directory + stat: + path: "{{ etcd_data_dir }}/member" + register: etcd_data_dir_member + - name: Backup etcd v2 data + when: etcd_data_dir_member.stat.exists command: >- {{ bin_dir }}/etcdctl backup --data-dir {{ etcd_data_dir }} @@ -43,4 +50,3 @@ ETCDCTL_API: 3 retries: 3 delay: "{{ retry_stagger | random + 3 }}" - diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml index 45da999ee..2575c25a4 100644 --- a/roles/etcd/handlers/main.yml +++ b/roles/etcd/handlers/main.yml @@ -30,4 +30,3 @@ - name: set etcd_secret_changed set_fact: etcd_secret_changed: true - diff --git a/roles/etcd/tasks/check_certs.yml b/roles/etcd/tasks/check_certs.yml index fe96ea01c..8795fe820 100644 --- a/roles/etcd/tasks/check_certs.yml +++ b/roles/etcd/tasks/check_certs.yml @@ -66,4 +66,3 @@ {%- set _ = certs.update({'sync': True}) -%} {% endif %} {{ certs.sync }} - diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml index f70c6ee21..46d0ddb9a 100644 --- a/roles/etcd/tasks/gen_certs_script.yml +++ b/roles/etcd/tasks/gen_certs_script.yml @@ -73,11 +73,10 @@ 'member-{{ node }}-key.pem', {% endfor %}]" my_master_certs: ['ca-key.pem', - 'admin-{{ inventory_hostname }}.pem', - 'admin-{{ inventory_hostname }}-key.pem', - 'member-{{ inventory_hostname }}.pem', - 'member-{{ inventory_hostname }}-key.pem' - ] + 'admin-{{ inventory_hostname }}.pem', + 'admin-{{ inventory_hostname }}-key.pem', + 'member-{{ inventory_hostname }}.pem', + 'member-{{ inventory_hostname }}-key.pem'] all_node_certs: "['ca.pem', {% for node in (groups['k8s-cluster'] + groups['calico-rr']|default([]))|unique %} 'node-{{ node }}.pem', @@ -111,22 +110,22 @@ sync_certs|default(false) and inventory_hostname not in groups['etcd'] notify: set etcd_secret_changed -#NOTE(mattymo): Use temporary file to copy master certs because we have a ~200k -#char limit when using shell command - -#FIXME(mattymo): Use tempfile module in ansible 2.3 -- name: Gen_certs | Prepare tempfile for unpacking certs - shell: mktemp /tmp/certsXXXXX.tar.gz - register: cert_tempfile - when: inventory_hostname in groups['etcd'] and sync_certs|default(false) and - inventory_hostname != groups['etcd'][0] +# NOTE(mattymo): Use temporary file to copy master certs because we have a ~200k +# char limit when using shell command -- name: Gen_certs | Write master certs to tempfile - copy: - content: "{{etcd_master_cert_data.stdout}}" - dest: "{{cert_tempfile.stdout}}" - owner: root - mode: "0600" +# FIXME(mattymo): Use tempfile module in ansible 2.3 +- name: Gen_certs | Prepare tempfile for unpacking certs + shell: mktemp /tmp/certsXXXXX.tar.gz + register: cert_tempfile + when: inventory_hostname in groups['etcd'] and sync_certs|default(false) and + inventory_hostname != groups['etcd'][0] + +- name: Gen_certs | Write master certs to tempfile + copy: + content: "{{etcd_master_cert_data.stdout}}" + dest: "{{cert_tempfile.stdout}}" + owner: root + mode: "0600" when: inventory_hostname in groups['etcd'] and sync_certs|default(false) and inventory_hostname != groups['etcd'][0] @@ -162,30 +161,3 @@ owner: kube mode: "u=rwX,g-rwx,o-rwx" recurse: yes - -- name: Gen_certs | target ca-certificate store file - set_fact: - ca_cert_path: |- - {% if ansible_os_family == "Debian" -%} - /usr/local/share/ca-certificates/etcd-ca.crt - {%- elif ansible_os_family == "RedHat" -%} - /etc/pki/ca-trust/source/anchors/etcd-ca.crt - {%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%} - /etc/ssl/certs/etcd-ca.pem - {%- endif %} - tags: facts - -- name: Gen_certs | add CA to trusted CA dir - copy: - src: "{{ etcd_cert_dir }}/ca.pem" - dest: "{{ ca_cert_path }}" - remote_src: true - register: etcd_ca_cert - -- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS) - command: update-ca-certificates - when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"] - -- name: Gen_certs | update ca-certificates (RedHat) - command: update-ca-trust extract - when: etcd_ca_cert.changed and ansible_os_family == "RedHat" diff --git a/roles/etcd/tasks/gen_certs_vault.yml b/roles/etcd/tasks/gen_certs_vault.yml index a0bf6cfdc..0048a7003 100644 --- a/roles/etcd/tasks/gen_certs_vault.yml +++ b/roles/etcd/tasks/gen_certs_vault.yml @@ -7,52 +7,14 @@ when: inventory_hostname in etcd_node_cert_hosts tags: etcd-secrets - -- name: gen_certs_vault | Read in the local credentials - command: cat /etc/vault/roles/etcd/userpass - register: etcd_vault_creds_cat - delegate_to: "{{ groups['vault'][0] }}" - -- name: gen_certs_vault | Set facts for read Vault Creds - set_fact: - etcd_vault_creds: "{{ etcd_vault_creds_cat.stdout|from_json }}" - delegate_to: "{{ groups['vault'][0] }}" - -- name: gen_certs_vault | Log into Vault and obtain an token - uri: - url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}/v1/auth/userpass/login/{{ etcd_vault_creds.username }}" - headers: - Accept: application/json - Content-Type: application/json - method: POST - body_format: json - body: - password: "{{ etcd_vault_creds.password }}" - register: etcd_vault_login_result - delegate_to: "{{ groups['vault'][0] }}" - -- name: gen_certs_vault | Set fact for vault_client_token - set_fact: - vault_client_token: "{{ etcd_vault_login_result.get('json', {}).get('auth', {}).get('client_token') }}" - run_once: true - -- name: gen_certs_vault | Set fact for Vault API token - set_fact: - etcd_vault_headers: - Accept: application/json - Content-Type: application/json - X-Vault-Token: "{{ vault_client_token }}" - run_once: true - when: vault_client_token != "" - # Issue master certs to Etcd nodes - include: ../../vault/tasks/shared/issue_cert.yml vars: + issue_cert_common_name: "etcd:master:{{ item.rsplit('/', 1)[1].rsplit('.', 1)[0] }}" issue_cert_alt_names: "{{ groups.etcd + ['localhost'] }}" issue_cert_copy_ca: "{{ item == etcd_master_certs_needed|first }}" issue_cert_file_group: "{{ etcd_cert_group }}" issue_cert_file_owner: kube - issue_cert_headers: "{{ etcd_vault_headers }}" issue_cert_hosts: "{{ groups.etcd }}" issue_cert_ip_sans: >- [ @@ -67,6 +29,7 @@ issue_cert_path: "{{ item }}" issue_cert_role: etcd issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" + issue_cert_mount_path: "{{ etcd_vault_mount_path }}" with_items: "{{ etcd_master_certs_needed|d([]) }}" when: inventory_hostname in groups.etcd notify: set etcd_secret_changed @@ -74,11 +37,11 @@ # Issue node certs to everyone else - include: ../../vault/tasks/shared/issue_cert.yml vars: + issue_cert_common_name: "etcd:node:{{ item.rsplit('/', 1)[1].rsplit('.', 1)[0] }}" issue_cert_alt_names: "{{ etcd_node_cert_hosts }}" issue_cert_copy_ca: "{{ item == etcd_node_certs_needed|first }}" issue_cert_file_group: "{{ etcd_cert_group }}" issue_cert_file_owner: kube - issue_cert_headers: "{{ etcd_vault_headers }}" issue_cert_hosts: "{{ etcd_node_cert_hosts }}" issue_cert_ip_sans: >- [ @@ -93,8 +56,7 @@ issue_cert_path: "{{ item }}" issue_cert_role: etcd issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" + issue_cert_mount_path: "{{ etcd_vault_mount_path }}" with_items: "{{ etcd_node_certs_needed|d([]) }}" when: inventory_hostname in etcd_node_cert_hosts notify: set etcd_secret_changed - - diff --git a/roles/etcd/tasks/install_docker.yml b/roles/etcd/tasks/install_docker.yml index f87caeb4c..f7589e812 100644 --- a/roles/etcd/tasks/install_docker.yml +++ b/roles/etcd/tasks/install_docker.yml @@ -1,5 +1,5 @@ --- -#Plan A: no docker-py deps +# Plan A: no docker-py deps - name: Install | Copy etcdctl binary from docker container command: sh -c "{{ docker_bin_dir }}/docker rm -f etcdctl-binarycopy; {{ docker_bin_dir }}/docker create --name etcdctl-binarycopy {{ etcd_image_repo }}:{{ etcd_image_tag }} && @@ -11,22 +11,3 @@ retries: 4 delay: "{{ retry_stagger | random + 3 }}" changed_when: false - -#Plan B: looks nicer, but requires docker-py on all hosts: -#- name: Install | Set up etcd-binarycopy container -# docker: -# name: etcd-binarycopy -# state: present -# image: "{{ etcd_image_repo }}:{{ etcd_image_tag }}" -# when: etcd_deployment_type == "docker" -# -#- name: Install | Copy etcdctl from etcd-binarycopy container -# command: /usr/bin/docker cp "etcd-binarycopy:{{ etcd_container_bin_dir }}etcdctl" "{{ bin_dir }}/etcdctl" -# when: etcd_deployment_type == "docker" -# -#- name: Install | Clean up etcd-binarycopy container -# docker: -# name: etcd-binarycopy -# state: absent -# image: "{{ etcd_image_repo }}:{{ etcd_image_tag }}" -# when: etcd_deployment_type == "docker" diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index afd5fa883..3f8403570 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -1,8 +1,4 @@ --- -- include: pre_upgrade.yml - when: etcd_cluster_setup - tags: etcd-pre-upgrade - - include: check_certs.yml when: cert_management == "script" tags: [etcd-secrets, facts] @@ -10,6 +6,14 @@ - include: "gen_certs_{{ cert_management }}.yml" tags: etcd-secrets +- include: upd_ca_trust.yml + tags: etcd-secrets + +- name: "Gen_certs | Get etcd certificate serials" + shell: "openssl x509 -in {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem -noout -serial | cut -d= -f2" + register: "node-{{ inventory_hostname }}_serial" + when: inventory_hostname in groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique|sort + - include: "install_{{ etcd_deployment_type }}.yml" when: is_etcd_master tags: upgrade diff --git a/roles/etcd/tasks/pre_upgrade.yml b/roles/etcd/tasks/pre_upgrade.yml deleted file mode 100644 index 0f171094a..000000000 --- a/roles/etcd/tasks/pre_upgrade.yml +++ /dev/null @@ -1,59 +0,0 @@ -- name: "Pre-upgrade | check for etcd-proxy unit file" - stat: - path: /etc/systemd/system/etcd-proxy.service - register: etcd_proxy_service_file - tags: facts - -- name: "Pre-upgrade | check for etcd-proxy init script" - stat: - path: /etc/init.d/etcd-proxy - register: etcd_proxy_init_script - tags: facts - -- name: "Pre-upgrade | stop etcd-proxy if service defined" - service: - name: etcd-proxy - state: stopped - when: (etcd_proxy_service_file.stat.exists|default(False) or etcd_proxy_init_script.stat.exists|default(False)) - -- name: "Pre-upgrade | remove etcd-proxy service definition" - file: - path: "{{ item }}" - state: absent - when: (etcd_proxy_service_file.stat.exists|default(False) or etcd_proxy_init_script.stat.exists|default(False)) - with_items: - - /etc/systemd/system/etcd-proxy.service - - /etc/init.d/etcd-proxy - -- name: "Pre-upgrade | find etcd-proxy container" - command: "{{ docker_bin_dir }}/docker ps -aq --filter 'name=etcd-proxy*'" - register: etcd_proxy_container - changed_when: false - failed_when: false - -- name: "Pre-upgrade | remove etcd-proxy if it exists" - command: "{{ docker_bin_dir }}/docker rm -f {{item}}" - with_items: "{{etcd_proxy_container.stdout_lines}}" - -- name: "Pre-upgrade | see if etcdctl is installed" - stat: - path: "{{ bin_dir }}/etcdctl" - register: etcdctl_installed - -- name: "Pre-upgrade | check if member list is non-SSL" - command: "{{ bin_dir }}/etcdctl --no-sync --peers={{ etcd_access_addresses | regex_replace('https','http') }} member list" - register: etcd_member_list - retries: 10 - delay: 3 - until: etcd_member_list.rc != 2 - run_once: true - when: etcdctl_installed.stat.exists - changed_when: false - failed_when: false - -- name: "Pre-upgrade | change peer names to SSL" - shell: >- - {{ bin_dir }}/etcdctl --no-sync --peers={{ etcd_access_addresses | regex_replace('https','http') }} member list | - awk -F"[: =]" '{print "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses | regex_replace('https','http') }} member update "$1" https:"$7":"$8}' | bash - run_once: true - when: 'etcdctl_installed.stat.exists and etcd_member_list.rc == 0 and "http://" in etcd_member_list.stdout' diff --git a/roles/etcd/tasks/refresh_config.yml b/roles/etcd/tasks/refresh_config.yml index e6f8186d3..0691d1df9 100644 --- a/roles/etcd/tasks/refresh_config.yml +++ b/roles/etcd/tasks/refresh_config.yml @@ -1,7 +1,7 @@ --- - name: Refresh config | Create etcd config file template: - src: etcd.env.yml + src: etcd.env.j2 dest: /etc/etcd.env notify: restart etcd when: is_etcd_master diff --git a/roles/etcd/tasks/sync_etcd_master_certs.yml b/roles/etcd/tasks/sync_etcd_master_certs.yml index 27ce303e9..d436c97f5 100644 --- a/roles/etcd/tasks/sync_etcd_master_certs.yml +++ b/roles/etcd/tasks/sync_etcd_master_certs.yml @@ -1,7 +1,7 @@ --- - name: sync_etcd_master_certs | Create list of master certs needing creation - set_fact: + set_fact: etcd_master_cert_list: >- {{ etcd_master_cert_list|default([]) + [ "admin-" + item + ".pem", @@ -11,7 +11,7 @@ run_once: true - include: ../../vault/tasks/shared/sync_file.yml - vars: + vars: sync_file: "{{ item }}" sync_file_dir: "{{ etcd_cert_dir }}" sync_file_hosts: "{{ groups.etcd }}" diff --git a/roles/etcd/tasks/sync_etcd_node_certs.yml b/roles/etcd/tasks/sync_etcd_node_certs.yml index 2f82dcffd..e535168fc 100644 --- a/roles/etcd/tasks/sync_etcd_node_certs.yml +++ b/roles/etcd/tasks/sync_etcd_node_certs.yml @@ -1,12 +1,12 @@ --- - name: sync_etcd_node_certs | Create list of node certs needing creation - set_fact: + set_fact: etcd_node_cert_list: "{{ etcd_node_cert_list|default([]) + ['node-' + item + '.pem'] }}" with_items: "{{ etcd_node_cert_hosts }}" - include: ../../vault/tasks/shared/sync_file.yml - vars: + vars: sync_file: "{{ item }}" sync_file_dir: "{{ etcd_cert_dir }}" sync_file_hosts: "{{ etcd_node_cert_hosts }}" @@ -24,7 +24,7 @@ sync_file_results: [] - include: ../../vault/tasks/shared/sync_file.yml - vars: + vars: sync_file: ca.pem sync_file_dir: "{{ etcd_cert_dir }}" sync_file_hosts: "{{ etcd_node_cert_hosts }}" diff --git a/roles/etcd/tasks/upd_ca_trust.yml b/roles/etcd/tasks/upd_ca_trust.yml new file mode 100644 index 000000000..81ce1e573 --- /dev/null +++ b/roles/etcd/tasks/upd_ca_trust.yml @@ -0,0 +1,27 @@ +--- +- name: Gen_certs | target ca-certificate store file + set_fact: + ca_cert_path: |- + {% if ansible_os_family == "Debian" -%} + /usr/local/share/ca-certificates/etcd-ca.crt + {%- elif ansible_os_family == "RedHat" -%} + /etc/pki/ca-trust/source/anchors/etcd-ca.crt + {%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%} + /etc/ssl/certs/etcd-ca.pem + {%- endif %} + tags: facts + +- name: Gen_certs | add CA to trusted CA dir + copy: + src: "{{ etcd_cert_dir }}/ca.pem" + dest: "{{ ca_cert_path }}" + remote_src: true + register: etcd_ca_cert + +- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS) + command: update-ca-certificates + when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"] + +- name: Gen_certs | update ca-certificates (RedHat) + command: update-ca-trust extract + when: etcd_ca_cert.changed and ansible_os_family == "RedHat" diff --git a/roles/etcd/templates/etcd.env.yml b/roles/etcd/templates/etcd.env.j2 similarity index 100% rename from roles/etcd/templates/etcd.env.yml rename to roles/etcd/templates/etcd.env.j2 diff --git a/roles/kernel-upgrade/defaults/main.yml b/roles/kernel-upgrade/defaults/main.yml index 8a1116785..688e6e018 100644 --- a/roles/kernel-upgrade/defaults/main.yml +++ b/roles/kernel-upgrade/defaults/main.yml @@ -1,9 +1,8 @@ --- - elrepo_key_url: 'https://www.elrepo.org/RPM-GPG-KEY-elrepo.org' -elrepo_rpm : elrepo-release-7.0-3.el7.elrepo.noarch.rpm -elrepo_mirror : http://www.elrepo.org +elrepo_rpm: elrepo-release-7.0-3.el7.elrepo.noarch.rpm +elrepo_mirror: http://www.elrepo.org -elrepo_url : '{{elrepo_mirror}}/{{elrepo_rpm}}' +elrepo_url: '{{elrepo_mirror}}/{{elrepo_rpm}}' elrepo_kernel_package: "kernel-lt" diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml index d42b2ffed..3665254da 100644 --- a/roles/kubernetes-apps/ansible/defaults/main.yml +++ b/roles/kubernetes-apps/ansible/defaults/main.yml @@ -1,5 +1,6 @@ +--- # Versions -kubedns_version : 1.14.2 +kubedns_version: 1.14.2 kubednsautoscaler_version: 1.1.1 # Limits for dnsmasq/kubedns apps @@ -37,6 +38,17 @@ netchecker_server_memory_limit: 256M netchecker_server_cpu_requests: 50m netchecker_server_memory_requests: 64M +# Dashboard +dashboard_enabled: false +dashboard_image_repo: kubernetesdashboarddev/kubernetes-dashboard-amd64 +dashboard_image_tag: head + +# Limits for dashboard +dashboard_cpu_limit: 100m +dashboard_memory_limit: 256M +dashboard_cpu_requests: 50m +dashboard_memory_requests: 64M + # SSL etcd_cert_dir: "/etc/ssl/etcd/ssl" canal_cert_dir: "/etc/canal/certs" diff --git a/roles/kubernetes-apps/ansible/tasks/dashboard.yml b/roles/kubernetes-apps/ansible/tasks/dashboard.yml new file mode 100644 index 000000000..63ea3cf70 --- /dev/null +++ b/roles/kubernetes-apps/ansible/tasks/dashboard.yml @@ -0,0 +1,20 @@ +--- +- name: Kubernetes Apps | Lay down dashboard template + template: + src: "{{item.file}}" + dest: "{{kube_config_dir}}/{{item.file}}" + with_items: + - {file: dashboard.yml.j2, type: deploy, name: netchecker-agent} + register: manifests + when: inventory_hostname == groups['kube-master'][0] + +- name: Kubernetes Apps | Start dashboard + kube: + name: "{{item.item.name}}" + namespace: "{{system_namespace}}" + kubectl: "{{bin_dir}}/kubectl" + resource: "{{item.item.type}}" + filename: "{{kube_config_dir}}/{{item.item.file}}" + state: "latest" + with_items: "{{ manifests.results }}" + when: inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index e7bd934de..c2ffd7507 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: Kubernetes Apps | Wait for kube-apiserver uri: - url: http://localhost:{{ kube_apiserver_insecure_port }}/healthz + url: "{{ kube_apiserver_insecure_endpoint }}/healthz" register: result until: result.status == 200 retries: 10 @@ -14,12 +14,12 @@ dest: "{{kube_config_dir}}/{{item.file}}" with_items: - {name: kubedns, file: kubedns-sa.yml, type: sa} - - {name: kubedns, file: kubedns-deploy.yml, type: deployment} + - {name: kubedns, file: kubedns-deploy.yml.j2, type: deployment} - {name: kubedns, file: kubedns-svc.yml, type: svc} - {name: kubedns-autoscaler, file: kubedns-autoscaler-sa.yml, type: sa} - {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrole.yml, type: clusterrole} - {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding} - - {name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment} + - {name: kubedns-autoscaler, file: kubedns-autoscaler.yml.j2, type: deployment} register: manifests when: - dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] @@ -51,13 +51,20 @@ kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" - state: "{{item.changed | ternary('latest','present') }}" + state: "latest" with_items: "{{ manifests.results }}" - failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg - when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] + when: + - dns_mode != 'none' + - inventory_hostname == groups['kube-master'][0] + - not item|skipped tags: dnsmasq - name: Kubernetes Apps | Netchecker include: tasks/netchecker.yml when: deploy_netchecker tags: netchecker + +- name: Kubernetes Apps | Dashboard + include: tasks/dashboard.yml + when: dashboard_enabled + tags: dashboard diff --git a/roles/kubernetes-apps/ansible/tasks/netchecker.yml b/roles/kubernetes-apps/ansible/tasks/netchecker.yml index 2d88b288c..4e91da224 100644 --- a/roles/kubernetes-apps/ansible/tasks/netchecker.yml +++ b/roles/kubernetes-apps/ansible/tasks/netchecker.yml @@ -1,3 +1,21 @@ +--- + +- name: Kubernetes Apps | Check if netchecker-server manifest already exists + stat: + path: "{{ kube_config_dir }}/netchecker-server-deployment.yml.j2" + register: netchecker_server_manifest + tags: ['facts', 'upgrade'] + +- name: Kubernetes Apps | Apply netchecker-server manifest to update annotations + kube: + name: "netchecker-server" + namespace: "{{ netcheck_namespace }}" + kubectl: "{{bin_dir}}/kubectl" + resource: "deploy" + state: latest + when: inventory_hostname == groups['kube-master'][0] and netchecker_server_manifest.stat.exists + tags: upgrade + - name: Kubernetes Apps | Lay Down Netchecker Template template: src: "{{item.file}}" @@ -24,18 +42,6 @@ state: absent when: inventory_hostname == groups['kube-master'][0] -#FIXME: remove if kubernetes/features#124 is implemented -- name: Kubernetes Apps | Purge old Netchecker daemonsets - kube: - name: "{{item.item.name}}" - namespace: "{{netcheck_namespace}}" - kubectl: "{{bin_dir}}/kubectl" - resource: "{{item.item.type}}" - filename: "{{kube_config_dir}}/{{item.item.file}}" - state: absent - with_items: "{{ manifests.results }}" - when: inventory_hostname == groups['kube-master'][0] and item.item.type == "ds" and item.changed - - name: Kubernetes Apps | Start Netchecker Resources kube: name: "{{item.item.name}}" @@ -43,7 +49,6 @@ kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" - state: "{{item.changed | ternary('latest','present') }}" + state: "latest" with_items: "{{ manifests.results }}" - failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube-master'][0] and not item|skipped diff --git a/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 b/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 new file mode 100644 index 000000000..ac32b1c7f --- /dev/null +++ b/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 @@ -0,0 +1,110 @@ +# Copyright 2015 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Configuration to deploy head version of the Dashboard UI compatible with +# Kubernetes 1.6 (RBAC enabled). +# +# Example usage: kubectl create -f + +{% if rbac_enabled %} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: {{ system_namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: kubernetes-dashboard + labels: + k8s-app: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: kubernetes-dashboard + namespace: {{ system_namespace }} +{% endif %} +--- +kind: Deployment +apiVersion: extensions/v1beta1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: {{ system_namespace }} +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: kubernetes-dashboard + template: + metadata: + labels: + k8s-app: kubernetes-dashboard + spec: + containers: + - name: kubernetes-dashboard + image: {{ dashboard_image_repo }}:{{ dashboard_image_tag }} + # Image is tagged and updated with :head, so always pull it. + imagePullPolicy: Always + resources: + limits: + cpu: {{ dashboard_cpu_limit }} + memory: {{ dashboard_memory_limit }} + requests: + cpu: {{ dashboard_cpu_requests }} + memory: {{ dashboard_memory_requests }} + ports: + - containerPort: 9090 + protocol: TCP + args: + # Uncomment the following line to manually specify Kubernetes API server Host + # If not specified, Dashboard will attempt to auto discover the API server and connect + # to it. Uncomment only if the default does not work. + # - --apiserver-host=http://my-address:port + livenessProbe: + httpGet: + path: / + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 +{% if rbac_enabled %} + serviceAccountName: kubernetes-dashboard +{% endif %} + # Comment the following tolerations if Dashboard must not be deployed on master + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule +--- +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: {{ system_namespace }} +spec: + ports: + - port: 80 + targetPort: 9090 + selector: + k8s-app: kubernetes-dashboard + diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml index a194426c6..f80d3d90c 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml @@ -1,3 +1,4 @@ +--- # Copyright 2016 The Kubernetes Authors. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml index a368ae333..eb76f2d4e 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml @@ -1,3 +1,4 @@ +--- # Copyright 2016 The Kubernetes Authors. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml index 9544a7dd9..542ae86ce 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml @@ -1,3 +1,4 @@ +--- # Copyright 2016 The Kubernetes Authors. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 similarity index 62% rename from roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml rename to roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 index 9e0462290..df92ee615 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 @@ -1,3 +1,4 @@ +--- # Copyright 2016 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,26 +27,26 @@ spec: metadata: labels: k8s-app: kubedns-autoscaler - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' spec: + tolerations: + - effect: NoSchedule + operator: Exists containers: - name: autoscaler image: "{{ kubednsautoscaler_image_repo }}:{{ kubednsautoscaler_image_tag }}" resources: - requests: - cpu: "20m" - memory: "10Mi" + requests: + cpu: "20m" + memory: "10Mi" command: - - /cluster-proportional-autoscaler - - --namespace={{ system_namespace }} - - --configmap=kubedns-autoscaler - # Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base - - --target=Deployment/kube-dns - - --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}} - - --logtostderr=true - - --v=2 + - /cluster-proportional-autoscaler + - --namespace={{ system_namespace }} + - --configmap=kubedns-autoscaler + # Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base + - --target=Deployment/kube-dns + - --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}} + - --logtostderr=true + - --v=2 {% if rbac_enabled %} serviceAccountName: cluster-proportional-autoscaler {% endif %} diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 similarity index 98% rename from roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml rename to roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 index 7e4615676..682bdf491 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 @@ -1,3 +1,4 @@ +--- apiVersion: extensions/v1beta1 kind: Deployment metadata: @@ -29,6 +30,8 @@ spec: tolerations: - key: "CriticalAddonsOnly" operator: "Exists" + - effect: NoSchedule + operator: Exists volumes: - name: kube-dns-config configMap: diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml b/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml index e520ccbfc..f399fd6f4 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml @@ -1,3 +1,4 @@ +--- apiVersion: v1 kind: ServiceAccount metadata: diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml b/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml index 0565a01e8..1c4710db1 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml @@ -1,3 +1,4 @@ +--- apiVersion: v1 kind: Service metadata: @@ -19,4 +20,3 @@ spec: - name: dns-tcp port: 53 protocol: TCP - diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 index df0b8ba90..d73004242 100644 --- a/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 @@ -12,6 +12,9 @@ spec: labels: app: netchecker-agent spec: + tolerations: + - effect: NoSchedule + operator: Exists containers: - name: netchecker-agent image: "{{ agent_img }}" @@ -37,3 +40,8 @@ spec: requests: cpu: {{ netchecker_agent_cpu_requests }} memory: {{ netchecker_agent_memory_requests }} + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 index 10a74da84..70194c900 100644 --- a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 @@ -16,6 +16,9 @@ spec: {% if kube_version | version_compare('v1.6', '>=') %} dnsPolicy: ClusterFirstWithHostNet {% endif %} + tolerations: + - effect: NoSchedule + operator: Exists containers: - name: netchecker-agent image: "{{ agent_img }}" @@ -41,3 +44,7 @@ spec: requests: cpu: {{ netchecker_agent_cpu_requests }} memory: {{ netchecker_agent_memory_requests }} + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 index c3dbf3cb5..6e2738e6f 100644 --- a/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 @@ -25,12 +25,14 @@ spec: memory: {{ netchecker_server_memory_requests }} ports: - containerPort: 8081 - hostPort: 8081 args: - "-v=5" - "-logtostderr" - "-kubeproxyinit" - "-endpoint=0.0.0.0:8081" + tolerations: + - effect: NoSchedule + operator: Exists {% if rbac_enabled %} serviceAccountName: netchecker-server {% endif %} diff --git a/roles/kubernetes-apps/efk/elasticsearch/defaults/main.yml b/roles/kubernetes-apps/efk/elasticsearch/defaults/main.yml index e5af87425..d38ba6a6b 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/defaults/main.yml +++ b/roles/kubernetes-apps/efk/elasticsearch/defaults/main.yml @@ -1,5 +1,5 @@ --- -elasticsearch_cpu_limit: 1000m +elasticsearch_cpu_limit: 1000m elasticsearch_mem_limit: 0M elasticsearch_cpu_requests: 100m elasticsearch_mem_requests: 0M diff --git a/roles/kubernetes-apps/efk/elasticsearch/meta/main.yml b/roles/kubernetes-apps/efk/elasticsearch/meta/main.yml index cd0a80606..3dc6f3ca1 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/meta/main.yml +++ b/roles/kubernetes-apps/efk/elasticsearch/meta/main.yml @@ -1,3 +1,4 @@ +--- dependencies: - role: download file: "{{ downloads.elasticsearch }}" diff --git a/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml b/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml index 7e3626571..8abbe2317 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml +++ b/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml @@ -10,7 +10,7 @@ when: rbac_enabled - name: "ElasticSearch | Create Serviceaccount and Clusterrolebinding (RBAC)" - command: "kubectl apply -f {{ kube_config_dir }}/{{ item }} -n {{ system_namespace }}" + command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }} -n {{ system_namespace }}" with_items: - "efk-sa.yml" - "efk-clusterrolebinding.yml" @@ -38,4 +38,3 @@ command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-service.yaml -n {{ system_namespace }}" run_once: true when: es_service_manifest.changed - diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml index 2c11e566b..a5aba61ae 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml +++ b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml @@ -1,3 +1,4 @@ +--- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml index b73c2a49d..e79e26be8 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml +++ b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml @@ -1,3 +1,4 @@ +--- apiVersion: v1 kind: ServiceAccount metadata: diff --git a/roles/kubernetes-apps/efk/fluentd/defaults/main.yml b/roles/kubernetes-apps/efk/fluentd/defaults/main.yml index eeb95b71a..e8d93732c 100644 --- a/roles/kubernetes-apps/efk/fluentd/defaults/main.yml +++ b/roles/kubernetes-apps/efk/fluentd/defaults/main.yml @@ -1,5 +1,5 @@ --- -fluentd_cpu_limit: 0m +fluentd_cpu_limit: 0m fluentd_mem_limit: 200Mi fluentd_cpu_requests: 100m fluentd_mem_requests: 200Mi diff --git a/roles/kubernetes-apps/efk/fluentd/meta/main.yml b/roles/kubernetes-apps/efk/fluentd/meta/main.yml index 1ba777c76..0e1e03813 100644 --- a/roles/kubernetes-apps/efk/fluentd/meta/main.yml +++ b/roles/kubernetes-apps/efk/fluentd/meta/main.yml @@ -1,3 +1,4 @@ +--- dependencies: - role: download file: "{{ downloads.fluentd }}" diff --git a/roles/kubernetes-apps/efk/fluentd/tasks/main.yml b/roles/kubernetes-apps/efk/fluentd/tasks/main.yml index 31b41412e..c91bf6827 100644 --- a/roles/kubernetes-apps/efk/fluentd/tasks/main.yml +++ b/roles/kubernetes-apps/efk/fluentd/tasks/main.yml @@ -20,4 +20,3 @@ command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/fluentd-ds.yaml -n {{ system_namespace }}" run_once: true when: fluentd_ds_manifest.changed - diff --git a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 index 77ed3c4ff..2dc26991c 100644 --- a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 +++ b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 @@ -17,6 +17,9 @@ spec: kubernetes.io/cluster-service: "true" version: "v{{ fluentd_version }}" spec: + tolerations: + - effect: NoSchedule + operator: Exists containers: - name: fluentd-es image: "{{ fluentd_image_repo }}:{{ fluentd_image_tag }}" @@ -55,4 +58,3 @@ spec: {% if rbac_enabled %} serviceAccountName: efk {% endif %} - diff --git a/roles/kubernetes-apps/efk/kibana/defaults/main.yml b/roles/kubernetes-apps/efk/kibana/defaults/main.yml index ad6215c93..baf07cdf2 100644 --- a/roles/kubernetes-apps/efk/kibana/defaults/main.yml +++ b/roles/kubernetes-apps/efk/kibana/defaults/main.yml @@ -1,5 +1,5 @@ --- -kibana_cpu_limit: 100m +kibana_cpu_limit: 100m kibana_mem_limit: 0M kibana_cpu_requests: 100m kibana_mem_requests: 0M diff --git a/roles/kubernetes-apps/efk/kibana/meta/main.yml b/roles/kubernetes-apps/efk/kibana/meta/main.yml index 34d0ab21a..775880d54 100644 --- a/roles/kubernetes-apps/efk/kibana/meta/main.yml +++ b/roles/kubernetes-apps/efk/kibana/meta/main.yml @@ -1,3 +1,4 @@ +--- dependencies: - role: download file: "{{ downloads.kibana }}" diff --git a/roles/kubernetes-apps/efk/kibana/tasks/main.yml b/roles/kubernetes-apps/efk/kibana/tasks/main.yml index 5e2b15f71..ea8568286 100644 --- a/roles/kubernetes-apps/efk/kibana/tasks/main.yml +++ b/roles/kubernetes-apps/efk/kibana/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: "Kibana | Write Kibana deployment" - template: + template: src: kibana-deployment.yml.j2 dest: "{{ kube_config_dir }}/kibana-deployment.yaml" register: kibana_deployment_manifest @@ -12,12 +12,12 @@ name: "kibana-logging" namespace: "{{system_namespace}}" resource: "deployment" - state: "{{ item | ternary('latest','present') }}" + state: "latest" with_items: "{{ kibana_deployment_manifest.changed }}" run_once: true - name: "Kibana | Write Kibana service " - template: + template: src: kibana-service.yml.j2 dest: "{{ kube_config_dir }}/kibana-service.yaml" register: kibana_service_manifest @@ -29,6 +29,6 @@ name: "kibana-logging" namespace: "{{system_namespace}}" resource: "svc" - state: "{{ item | ternary('latest','present') }}" + state: "latest" with_items: "{{ kibana_service_manifest.changed }}" run_once: true diff --git a/roles/kubernetes-apps/efk/meta/main.yml b/roles/kubernetes-apps/efk/meta/main.yml index e11bbae29..550ba9497 100644 --- a/roles/kubernetes-apps/efk/meta/main.yml +++ b/roles/kubernetes-apps/efk/meta/main.yml @@ -1,3 +1,4 @@ +--- dependencies: - role: kubernetes-apps/efk/elasticsearch - role: kubernetes-apps/efk/fluentd diff --git a/roles/kubernetes-apps/helm/defaults/main.yml b/roles/kubernetes-apps/helm/defaults/main.yml index b1b2dfca9..bb7ca244e 100644 --- a/roles/kubernetes-apps/helm/defaults/main.yml +++ b/roles/kubernetes-apps/helm/defaults/main.yml @@ -1,3 +1,4 @@ +--- helm_enabled: false # specify a dir and attach it to helm for HELM_HOME. diff --git a/roles/kubernetes-apps/helm/meta/main.yml b/roles/kubernetes-apps/helm/meta/main.yml index 805439250..5092ec83b 100644 --- a/roles/kubernetes-apps/helm/meta/main.yml +++ b/roles/kubernetes-apps/helm/meta/main.yml @@ -1,3 +1,4 @@ +--- dependencies: - role: download file: "{{ downloads.helm }}" diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml index 2d26c5a0f..d01211e2f 100644 --- a/roles/kubernetes-apps/helm/tasks/main.yml +++ b/roles/kubernetes-apps/helm/tasks/main.yml @@ -27,9 +27,8 @@ kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" - state: "{{item.changed | ternary('latest','present') }}" + state: "latest" with_items: "{{ manifests.results }}" - failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled - name: Helm | Install/upgrade helm diff --git a/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml b/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml index 0ac9341ee..0c8db4c78 100644 --- a/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml +++ b/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml @@ -1,3 +1,4 @@ +--- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: diff --git a/roles/kubernetes-apps/helm/templates/tiller-sa.yml b/roles/kubernetes-apps/helm/templates/tiller-sa.yml index c840f57f8..26e575fb6 100644 --- a/roles/kubernetes-apps/helm/templates/tiller-sa.yml +++ b/roles/kubernetes-apps/helm/templates/tiller-sa.yml @@ -1,3 +1,4 @@ +--- apiVersion: v1 kind: ServiceAccount metadata: diff --git a/roles/kubernetes-apps/meta/main.yml b/roles/kubernetes-apps/meta/main.yml index c2dd39d73..9652e1a96 100644 --- a/roles/kubernetes-apps/meta/main.yml +++ b/roles/kubernetes-apps/meta/main.yml @@ -1,3 +1,4 @@ +--- dependencies: - role: download file: "{{ downloads.netcheck_server }}" diff --git a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml new file mode 100644 index 000000000..f17e45c7a --- /dev/null +++ b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: Start Calico resources + kube: + name: "{{item.item.name}}" + namespace: "{{ system_namespace }}" + kubectl: "{{bin_dir}}/kubectl" + resource: "{{item.item.type}}" + filename: "{{kube_config_dir}}/{{item.item.file}}" + state: "latest" + with_items: "{{ calico_node_manifests.results }}" + when: inventory_hostname == groups['kube-master'][0] and not item|skipped diff --git a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml index f5ffc4393..cbe4f0ac7 100644 --- a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml @@ -1,32 +1,11 @@ -- name: Create canal ConfigMap - run_once: true +--- +- name: Canal | Start Resources kube: - name: "canal-config" + name: "{{item.item.name}}" + namespace: "{{ system_namespace }}" kubectl: "{{bin_dir}}/kubectl" - filename: "{{kube_config_dir}}/canal-config.yaml" - resource: "configmap" - namespace: "{{system_namespace}}" - -#FIXME: remove if kubernetes/features#124 is implemented -- name: Purge old flannel and canal-node - run_once: true - kube: - name: "canal-node" - kubectl: "{{ bin_dir }}/kubectl" - filename: "{{ kube_config_dir }}/canal-node.yaml" - resource: "ds" - namespace: "{{system_namespace}}" - state: absent - when: inventory_hostname == groups['kube-master'][0] and canal_node_manifest.changed - -- name: Start flannel and calico-node - run_once: true - kube: - name: "canal-node" - kubectl: "{{bin_dir}}/kubectl" - filename: "{{kube_config_dir}}/canal-node.yaml" - resource: "ds" - namespace: "{{system_namespace}}" - state: "{{ item | ternary('latest','present') }}" - with_items: "{{ canal_node_manifest.changed }}" - + resource: "{{item.item.type}}" + filename: "{{kube_config_dir}}/{{item.item.file}}" + state: "latest" + with_items: "{{ canal_manifests.results }}" + when: inventory_hostname == groups['kube-master'][0] and not item|skipped diff --git a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml new file mode 100644 index 000000000..607c7d617 --- /dev/null +++ b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml @@ -0,0 +1,22 @@ +--- +- name: "Flannel | Create ServiceAccount ClusterRole and ClusterRoleBinding" + command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/cni-flannel-rbac.yml" + run_once: true + when: rbac_enabled and flannel_rbac_manifest.changed + +- name: Flannel | Start Resources + kube: + name: "kube-flannel" + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/cni-flannel.yml" + resource: "ds" + namespace: "{{system_namespace}}" + state: "latest" + with_items: "{{ flannel_manifest.changed }}" + when: inventory_hostname == groups['kube-master'][0] + +- name: Flannel | Wait for flannel subnet.env file presence + wait_for: + path: /run/flannel/subnet.env + delay: 5 + timeout: 600 diff --git a/roles/kubernetes-apps/network_plugin/meta/main.yml b/roles/kubernetes-apps/network_plugin/meta/main.yml index 43382f2ae..4df295ea4 100644 --- a/roles/kubernetes-apps/network_plugin/meta/main.yml +++ b/roles/kubernetes-apps/network_plugin/meta/main.yml @@ -1,8 +1,14 @@ --- dependencies: - - role: kubernetes-apps/network_plugin/canal - when: kube_network_plugin == 'canal' - tags: canal - - role: kubernetes-apps/network_plugin/weave - when: kube_network_plugin == 'weave' - tags: weave + - role: kubernetes-apps/network_plugin/calico + when: kube_network_plugin == 'calico' + tags: calico + - role: kubernetes-apps/network_plugin/canal + when: kube_network_plugin == 'canal' + tags: canal + - role: kubernetes-apps/network_plugin/flannel + when: kube_network_plugin == 'flannel' + tags: flannel + - role: kubernetes-apps/network_plugin/weave + when: kube_network_plugin == 'weave' + tags: weave diff --git a/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml b/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml index 232f2d781..3b01d0e66 100644 --- a/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml @@ -1,4 +1,5 @@ -#FIXME: remove if kubernetes/features#124 is implemented +--- +# FIXME: remove if kubernetes/features#124 is implemented - name: Weave | Purge old weave daemonset kube: name: "weave-net" @@ -9,7 +10,6 @@ state: absent when: inventory_hostname == groups['kube-master'][0] and weave_manifest.changed - - name: Weave | Start Resources kube: name: "weave-net" @@ -17,11 +17,9 @@ filename: "{{ kube_config_dir }}/weave-net.yml" resource: "ds" namespace: "{{system_namespace}}" - state: "{{ item | ternary('latest','present') }}" - with_items: "{{ weave_manifest.changed }}" + state: "latest" when: inventory_hostname == groups['kube-master'][0] - - name: "Weave | wait for weave to become available" uri: url: http://127.0.0.1:6784/status diff --git a/roles/kubernetes-apps/policy_controller/calico/defaults/main.yml b/roles/kubernetes-apps/policy_controller/calico/defaults/main.yml index 7a4db0ea8..0e66359cc 100644 --- a/roles/kubernetes-apps/policy_controller/calico/defaults/main.yml +++ b/roles/kubernetes-apps/policy_controller/calico/defaults/main.yml @@ -1,3 +1,4 @@ +--- # Limits for calico apps calico_policy_controller_cpu_limit: 100m calico_policy_controller_memory_limit: 256M @@ -7,3 +8,8 @@ calico_policy_controller_memory_requests: 64M # SSL calico_cert_dir: "/etc/calico/certs" canal_cert_dir: "/etc/canal/certs" + +rbac_resources: + - sa + - clusterrole + - clusterrolebinding diff --git a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml index 8b4271d6a..a6b1e18c1 100644 --- a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml +++ b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml @@ -1,21 +1,49 @@ -- set_fact: +--- +- name: Set cert dir + set_fact: calico_cert_dir: "{{ canal_cert_dir }}" when: kube_network_plugin == 'canal' tags: [facts, canal] -- name: Write calico-policy-controller yaml +- name: Get calico-policy-controller version if running + shell: "{{ bin_dir }}/kubectl -n {{ system_namespace }} get rs calico-policy-controller -o=jsonpath='{$.spec.template.spec.containers[:1].image}' | cut -d':' -f2" + register: existing_calico_policy_version + run_once: true + failed_when: false + +# FIXME(mattymo): This should not be necessary +- name: Delete calico-policy-controller if an old one is installed + kube: + name: calico-policy-controller + kubectl: "{{bin_dir}}/kubectl" + resource: rs + namespace: "{{ system_namespace }}" + state: absent + run_once: true + when: + - not "NotFound" in existing_calico_policy_version.stderr + - existing_calico_policy_version.stdout | version_compare('v0.7.0', '<') + +- name: Create calico-policy-controller manifests template: - src: calico-policy-controller.yml.j2 - dest: "{{kube_config_dir}}/calico-policy-controller.yml" - when: inventory_hostname == groups['kube-master'][0] - tags: canal + src: "{{item.file}}.j2" + dest: "{{kube_config_dir}}/{{item.file}}" + with_items: + - {name: calico-policy-controller, file: calico-policy-controller.yml, type: rs} + - {name: calico-policy-controller, file: calico-policy-sa.yml, type: sa} + - {name: calico-policy-controller, file: calico-policy-cr.yml, type: clusterrole} + - {name: calico-policy-controller, file: calico-policy-crb.yml, type: clusterrolebinding} + register: calico_policy_manifests + when: + - rbac_enabled or item.type not in rbac_resources - name: Start of Calico policy controller kube: - name: "calico-policy-controller" + name: "{{item.item.name}}" + namespace: "{{ system_namespace }}" kubectl: "{{bin_dir}}/kubectl" - filename: "{{kube_config_dir}}/calico-policy-controller.yml" - namespace: "{{system_namespace}}" - resource: "rs" - when: inventory_hostname == groups['kube-master'][0] - tags: canal + resource: "{{item.item.type}}" + filename: "{{kube_config_dir}}/{{item.item.file}}" + state: "latest" + with_items: "{{ calico_policy_manifests.results }}" + when: inventory_hostname == groups['kube-master'][0] and not item|skipped diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller.yml.j2 index 322d3a37b..ca1711463 100644 --- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller.yml.j2 +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller.yml.j2 @@ -15,12 +15,18 @@ spec: template: metadata: name: calico-policy-controller - namespace: {{system_namespace}} + namespace: {{ system_namespace }} labels: kubernetes.io/cluster-service: "true" k8s-app: calico-policy spec: hostNetwork: true +{% if rbac_enabled %} + serviceAccountName: calico-policy-controller +{% endif %} + tolerations: + - effect: NoSchedule + operator: Exists containers: - name: calico-policy-controller image: {{ calico_policy_image_repo }}:{{ calico_policy_image_tag }} diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-cr.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-cr.yml.j2 new file mode 100644 index 000000000..aac341ca6 --- /dev/null +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-cr.yml.j2 @@ -0,0 +1,17 @@ +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-policy-controller + namespace: {{ system_namespace }} +rules: + - apiGroups: + - "" + - extensions + resources: + - pods + - namespaces + - networkpolicies + verbs: + - watch + - list diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-crb.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-crb.yml.j2 new file mode 100644 index 000000000..d5c192018 --- /dev/null +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-crb.yml.j2 @@ -0,0 +1,13 @@ +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-policy-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-policy-controller +subjects: +- kind: ServiceAccount + name: calico-policy-controller + namespace: {{ system_namespace }} diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-sa.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-sa.yml.j2 new file mode 100644 index 000000000..c6bc07fbb --- /dev/null +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-sa.yml.j2 @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-policy-controller + namespace: {{ system_namespace }} + labels: + kubernetes.io/cluster-service: "true" diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml index 7cfe9cc9a..979622731 100644 --- a/roles/kubernetes/master/defaults/main.yml +++ b/roles/kubernetes/master/defaults/main.yml @@ -1,3 +1,4 @@ +--- # An experimental dev/test only dynamic volumes provisioner, # for PetSets. Works for kube>=v1.3 only. kube_hostpath_dynamic_provisioner: "false" @@ -52,14 +53,14 @@ kube_oidc_auth: false ## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ ## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) -#kube_oidc_url: https:// ... +# kube_oidc_url: https:// ... # kube_oidc_client_id: kubernetes ## Optional settings for OIDC # kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem # kube_oidc_username_claim: sub # kube_oidc_groups_claim: groups -##Variables for custom flags +## Variables for custom flags apiserver_custom_flags: [] controller_mgr_custom_flags: [] diff --git a/roles/kubernetes/master/handlers/main.yml b/roles/kubernetes/master/handlers/main.yml index e408ce04e..d6034aeb2 100644 --- a/roles/kubernetes/master/handlers/main.yml +++ b/roles/kubernetes/master/handlers/main.yml @@ -39,7 +39,7 @@ - name: Master | wait for the apiserver to be running uri: - url: http://localhost:{{ kube_apiserver_insecure_port }}/healthz + url: "{{ kube_apiserver_insecure_endpoint }}/healthz" register: result until: result.status == 200 retries: 20 diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml index 6922e6a51..452463118 100644 --- a/roles/kubernetes/master/tasks/main.yml +++ b/roles/kubernetes/master/tasks/main.yml @@ -85,7 +85,3 @@ dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest" notify: Master | wait for kube-controller-manager tags: kube-controller-manager - -- include: post-upgrade.yml - tags: k8s-post-upgrade - diff --git a/roles/kubernetes/master/tasks/post-upgrade.yml b/roles/kubernetes/master/tasks/post-upgrade.yml deleted file mode 100644 index 221bf542d..000000000 --- a/roles/kubernetes/master/tasks/post-upgrade.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- -- name: "Post-upgrade | stop kubelet on all masters" - service: - name: kubelet - state: stopped - delegate_to: "{{item}}" - with_items: "{{groups['kube-master']}}" - when: needs_etcd_migration|bool - run_once: true - -- name: "Post-upgrade | Pause for kubelet stop" - pause: - seconds: 10 - when: needs_etcd_migration|bool - -- name: "Post-upgrade | start kubelet on all masters" - service: - name: kubelet - state: started - delegate_to: "{{item}}" - with_items: "{{groups['kube-master']}}" - when: needs_etcd_migration|bool - run_once: true - -- name: "Post-upgrade | etcd3 upgrade | purge etcd2 k8s data" - command: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} rm -r /registry" - environment: - ETCDCTL_API: 2 - delegate_to: "{{groups['etcd'][0]}}" - run_once: true - when: kube_apiserver_storage_backend == "etcd3" and needs_etcd_migration|bool|default(false) diff --git a/roles/kubernetes/master/tasks/pre-upgrade.yml b/roles/kubernetes/master/tasks/pre-upgrade.yml index 604659279..7cd650cbd 100644 --- a/roles/kubernetes/master/tasks/pre-upgrade.yml +++ b/roles/kubernetes/master/tasks/pre-upgrade.yml @@ -1,38 +1,4 @@ --- -- name: "Pre-upgrade | check for kube-apiserver unit file" - stat: - path: /etc/systemd/system/kube-apiserver.service - register: kube_apiserver_service_file - tags: [facts, kube-apiserver] - -- name: "Pre-upgrade | check for kube-apiserver init script" - stat: - path: /etc/init.d/kube-apiserver - register: kube_apiserver_init_script - tags: [facts, kube-apiserver] - -- name: "Pre-upgrade | stop kube-apiserver if service defined" - service: - name: kube-apiserver - state: stopped - when: (kube_apiserver_service_file.stat.exists|default(False) or kube_apiserver_init_script.stat.exists|default(False)) - tags: kube-apiserver - -- name: "Pre-upgrade | remove kube-apiserver service definition" - file: - path: "{{ item }}" - state: absent - when: (kube_apiserver_service_file.stat.exists|default(False) or kube_apiserver_init_script.stat.exists|default(False)) - with_items: - - /etc/systemd/system/kube-apiserver.service - - /etc/init.d/kube-apiserver - tags: kube-apiserver - -- name: "Pre-upgrade | See if kube-apiserver manifest exists" - stat: - path: /etc/kubernetes/manifests/kube-apiserver.manifest - register: kube_apiserver_manifest - - name: "Pre-upgrade | etcd3 upgrade | see if old config exists" command: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} ls /registry/minions" environment: @@ -47,19 +13,6 @@ kube_apiserver_storage_backend: "etcd2" when: old_data_exists.rc == 0 and not force_etcd3|bool -- name: "Pre-upgrade | etcd3 upgrade | see if data was already migrated" - command: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} get --limit=1 --prefix=true /registry/minions" - environment: - ETCDCTL_API: 3 - register: data_migrated - delegate_to: "{{groups['etcd'][0]}}" - when: kube_apiserver_storage_backend == "etcd3" - failed_when: false - -- name: "Pre-upgrade | etcd3 upgrade | set needs_etcd_migration" - set_fact: - needs_etcd_migration: "{{ force_etcd3|default(false) and kube_apiserver_storage_backend == 'etcd3' and data_migrated.stdout_lines|length == 0 and old_data_exists.rc == 0 }}" - - name: "Pre-upgrade | Delete master manifests on all kube-masters" file: path: "/etc/kubernetes/manifests/{{item[1]}}.manifest" @@ -69,7 +22,7 @@ - "{{groups['kube-master']}}" - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"] register: kube_apiserver_manifest_replaced - when: (secret_changed|default(false) or etcd_secret_changed|default(false) or needs_etcd_migration|bool) and kube_apiserver_manifest.stat.exists + when: (secret_changed|default(false) or etcd_secret_changed|default(false)) - name: "Pre-upgrade | Delete master containers forcefully on all kube-masters" shell: "docker ps -f name=k8s-{{item}}* -q | xargs --no-run-if-empty docker rm -f" @@ -77,34 +30,5 @@ with_nested: - "{{groups['kube-master']}}" - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"] - register: kube_apiserver_manifest_replaced - when: (secret_changed|default(false) or etcd_secret_changed|default(false) or needs_etcd_migration|bool) and kube_apiserver_manifest.stat.exists - run_once: true - -- name: "Pre-upgrade | etcd3 upgrade | stop etcd" - service: - name: etcd - state: stopped - delegate_to: "{{item}}" - with_items: "{{groups['etcd']}}" - when: needs_etcd_migration|bool - run_once: true - -- name: "Pre-upgrade | etcd3 upgrade | migrate data" - command: "{{ bin_dir }}/etcdctl migrate --data-dir=\"{{ etcd_data_dir }}\" --wal-dir=\"{{ etcd_data_dir }}/member/wal\"" - environment: - ETCDCTL_API: 3 - delegate_to: "{{item}}" - with_items: "{{groups['etcd']}}" - register: etcd_migrated - when: needs_etcd_migration|bool - run_once: true - -- name: "Pre-upgrade | etcd3 upgrade | start etcd" - service: - name: etcd - state: started - delegate_to: "{{item}}" - with_items: "{{groups['etcd']}}" - when: needs_etcd_migration|bool + when: kube_apiserver_manifest_replaced.changed run_once: true diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 24094fefb..f5dec5589 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -6,6 +6,9 @@ metadata: labels: k8s-app: kube-apiserver kubespray: v2 + annotations: + kubespray.etcd-cert/serial: "{{ etcd_node_cert_serial }}" + kubespray.apiserver-cert/serial: "{{ apiserver_cert_serial }}" spec: hostNetwork: true {% if kube_version | version_compare('v1.6', '>=') %} @@ -84,6 +87,9 @@ spec: {% if authorization_modes %} - --authorization-mode={{ authorization_modes|join(',') }} {% endif %} +{% if kube_feature_gates %} + - --feature-gates={{ kube_feature_gates|join(',') }} +{% endif %} {% if apiserver_custom_flags is string %} - {{ apiserver_custom_flags }} {% else %} @@ -102,9 +108,14 @@ spec: - mountPath: {{ kube_config_dir }} name: kubernetes-config readOnly: true - - mountPath: /etc/ssl/certs + - mountPath: /etc/ssl name: ssl-certs-host readOnly: true +{% for dir in ssl_ca_dirs %} + - mountPath: {{ dir }} + name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }} + readOnly: true +{% endfor %} - mountPath: {{ etcd_cert_dir }} name: etcd-certs readOnly: true @@ -117,9 +128,14 @@ spec: - hostPath: path: {{ kube_config_dir }} name: kubernetes-config - - hostPath: - path: /etc/ssl/certs/ - name: ssl-certs-host + - name: ssl-certs-host + hostPath: + path: /etc/ssl +{% for dir in ssl_ca_dirs %} + - name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }} + hostPath: + path: {{ dir }} +{% endfor %} - hostPath: path: {{ etcd_cert_dir }} name: etcd-certs diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 index a6b69fa14..e0ef08fe4 100644 --- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 @@ -5,6 +5,9 @@ metadata: namespace: {{system_namespace}} labels: k8s-app: kube-controller + annotations: + kubespray.etcd-cert/serial: "{{ etcd_node_cert_serial }}" + kubespray.controller-manager-cert/serial: "{{ controller_manager_cert_serial }}" spec: hostNetwork: true {% if kube_version | version_compare('v1.6', '>=') %} @@ -45,9 +48,15 @@ spec: - --cloud-provider={{cloud_provider}} {% endif %} {% if kube_network_plugin is defined and kube_network_plugin == 'cloud' %} - - --allocate-node-cidrs=true - --configure-cloud-routes=true +{% endif %} +{% if kube_network_plugin is defined and kube_network_plugin in ["cloud", "flannel"] %} + - --allocate-node-cidrs=true - --cluster-cidr={{ kube_pods_subnet }} + - --service-cluster-ip-range={{ kube_service_addresses }} +{% endif %} +{% if kube_feature_gates %} + - --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if controller_mgr_custom_flags is string %} - {{ controller_mgr_custom_flags }} @@ -64,9 +73,14 @@ spec: initialDelaySeconds: 30 timeoutSeconds: 10 volumeMounts: - - mountPath: /etc/ssl/certs + - mountPath: /etc/ssl name: ssl-certs-host readOnly: true +{% for dir in ssl_ca_dirs %} + - mountPath: {{ dir }} + name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }} + readOnly: true +{% endfor %} - mountPath: "{{kube_config_dir}}/ssl" name: etc-kube-ssl readOnly: true @@ -81,11 +95,12 @@ spec: volumes: - name: ssl-certs-host hostPath: -{% if ansible_os_family == 'RedHat' %} - path: /etc/pki/tls -{% else %} - path: /usr/share/ca-certificates -{% endif %} + path: /etc/ssl +{% for dir in ssl_ca_dirs %} + - name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }} + hostPath: + path: {{ dir }} +{% endfor %} - name: etc-kube-ssl hostPath: path: "{{ kube_config_dir }}/ssl" diff --git a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 index fdc16bf7f..6353ca102 100644 --- a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 @@ -5,6 +5,8 @@ metadata: namespace: {{ system_namespace }} labels: k8s-app: kube-scheduler + annotations: + kubespray.scheduler-cert/serial: "{{ scheduler_cert_serial }}" spec: hostNetwork: true {% if kube_version | version_compare('v1.6', '>=') %} @@ -27,6 +29,9 @@ spec: - --leader-elect=true - --kubeconfig={{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml - --v={{ kube_log_level }} +{% if kube_feature_gates %} + - --feature-gates={{ kube_feature_gates|join(',') }} +{% endif %} {% if scheduler_custom_flags is string %} - {{ scheduler_custom_flags }} {% else %} @@ -42,9 +47,14 @@ spec: initialDelaySeconds: 30 timeoutSeconds: 10 volumeMounts: - - mountPath: /etc/ssl/certs + - mountPath: /etc/ssl name: ssl-certs-host readOnly: true +{% for dir in ssl_ca_dirs %} + - mountPath: {{ dir }} + name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }} + readOnly: true +{% endfor %} - mountPath: "{{ kube_config_dir }}/ssl" name: etc-kube-ssl readOnly: true @@ -54,11 +64,12 @@ spec: volumes: - name: ssl-certs-host hostPath: -{% if ansible_os_family == 'RedHat' %} - path: /etc/pki/tls -{% else %} - path: /usr/share/ca-certificates -{% endif %} + path: /etc/ssl +{% for dir in ssl_ca_dirs %} + - name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }} + hostPath: + path: {{ dir }} +{% endfor %} - name: etc-kube-ssl hostPath: path: "{{ kube_config_dir }}/ssl" diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml index 6e2ff835f..940bdfff4 100644 --- a/roles/kubernetes/node/defaults/main.yml +++ b/roles/kubernetes/node/defaults/main.yml @@ -1,3 +1,4 @@ +--- # Valid options: docker (default), rkt, or host kubelet_deployment_type: host @@ -49,7 +50,7 @@ kube_apiserver_node_port_range: "30000-32767" kubelet_load_modules: false -##Support custom flags to be passed to kubelet +## Support custom flags to be passed to kubelet kubelet_custom_flags: [] # This setting is used for rkt based kubelet for deploying hyperkube diff --git a/roles/kubernetes/node/meta/main.yml b/roles/kubernetes/node/meta/main.yml index 12a7d73b7..f0656e571 100644 --- a/roles/kubernetes/node/meta/main.yml +++ b/roles/kubernetes/node/meta/main.yml @@ -6,6 +6,10 @@ dependencies: - role: download file: "{{ downloads.pod_infra }}" tags: [download, kubelet] + - role: download + file: "{{ downloads.install_socat }}" + tags: [download, kubelet] + when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] - role: kubernetes/secrets tags: k8s-secrets - role: download @@ -33,4 +37,4 @@ dependencies: tags: [download, dnsmasq] - role: download file: "{{ downloads.kubednsautoscaler }}" - tags: [download, dnsmasq] \ No newline at end of file + tags: [download, dnsmasq] diff --git a/roles/kubernetes/node/tasks/install.yml b/roles/kubernetes/node/tasks/install.yml index ad4cbacf1..692f8247c 100644 --- a/roles/kubernetes/node/tasks/install.yml +++ b/roles/kubernetes/node/tasks/install.yml @@ -21,4 +21,3 @@ dest: "/etc/systemd/system/kubelet.service" backup: "yes" notify: restart kubelet - diff --git a/roles/kubernetes/node/tasks/install_host.yml b/roles/kubernetes/node/tasks/install_host.yml index e80b20498..9f1523ffe 100644 --- a/roles/kubernetes/node/tasks/install_host.yml +++ b/roles/kubernetes/node/tasks/install_host.yml @@ -8,3 +8,9 @@ changed_when: false tags: [hyperkube, upgrade] notify: restart kubelet + +- name: install | Copy socat wrapper for Container Linux + command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/opt/bin {{ install_socat_image_repo }}:{{ install_socat_image_tag }}" + args: + creates: "{{ bin_dir }}/socat" + when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] diff --git a/roles/kubernetes/node/tasks/install_rkt.yml b/roles/kubernetes/node/tasks/install_rkt.yml index 68e90860c..d19b099bd 100644 --- a/roles/kubernetes/node/tasks/install_rkt.yml +++ b/roles/kubernetes/node/tasks/install_rkt.yml @@ -20,8 +20,8 @@ path: /var/lib/kubelet - name: Create kubelet service systemd directory - file: - path: /etc/systemd/system/kubelet.service.d + file: + path: /etc/systemd/system/kubelet.service.d state: directory - name: Write kubelet proxy drop-in @@ -30,4 +30,3 @@ dest: /etc/systemd/system/kubelet.service.d/http-proxy.conf when: http_proxy is defined or https_proxy is defined or no_proxy is defined notify: restart kubelet - diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index e0558f8cd..d166fe661 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -49,6 +49,37 @@ when: kube_apiserver_node_port_range is defined tags: kube-proxy +- name: Verify if br_netfilter module exists + shell: "modinfo br_netfilter" + register: modinfo_br_netfilter + failed_when: modinfo_br_netfilter.rc not in [0, 1] + changed_when: false + +- name: Enable br_netfilter module + modprobe: + name: br_netfilter + state: present + when: modinfo_br_netfilter.rc == 0 + +# kube-proxy needs net.bridge.bridge-nf-call-iptables enabled when found if br_netfilter is not a module +- name: Check if bridge-nf-call-iptables key exists + command: "sysctl net.bridge.bridge-nf-call-iptables" + failed_when: false + changed_when: false + register: sysctl_bridge_nf_call_iptables + +- name: Enable bridge-nf-call tables + sysctl: + name: "{{ item }}" + state: present + value: 1 + reload: yes + when: modinfo_br_netfilter.rc == 1 and sysctl_bridge_nf_call_iptables.rc == 0 + with_items: + - net.bridge.bridge-nf-call-iptables + - net.bridge.bridge-nf-call-arptables + - net.bridge.bridge-nf-call-ip6tables + - name: Write proxy manifest template: src: manifests/kube-proxy.manifest.j2 diff --git a/roles/kubernetes/node/templates/kubelet.docker.service.j2 b/roles/kubernetes/node/templates/kubelet.docker.service.j2 index cf79f6fa4..16d8a63d1 100644 --- a/roles/kubernetes/node/templates/kubelet.docker.service.j2 +++ b/roles/kubernetes/node/templates/kubelet.docker.service.j2 @@ -1,13 +1,8 @@ [Unit] Description=Kubernetes Kubelet Server Documentation=https://github.com/GoogleCloudPlatform/kubernetes -{% if kube_network_plugin is defined and kube_network_plugin == "calico" %} -After=docker.service docker.socket calico-node.service -Wants=docker.socket calico-node.service -{% else %} After=docker.service Wants=docker.socket -{% endif %} [Service] EnvironmentFile={{kube_config_dir}}/kubelet.env diff --git a/roles/kubernetes/node/templates/kubelet.host.service.j2 b/roles/kubernetes/node/templates/kubelet.host.service.j2 index 71a9da8c3..ec5e3d524 100644 --- a/roles/kubernetes/node/templates/kubelet.host.service.j2 +++ b/roles/kubernetes/node/templates/kubelet.host.service.j2 @@ -1,13 +1,8 @@ [Unit] Description=Kubernetes Kubelet Server Documentation=https://github.com/GoogleCloudPlatform/kubernetes -{% if kube_network_plugin is defined and kube_network_plugin == "calico" %} -After=docker.service docker.socket calico-node.service -Wants=docker.socket calico-node.service -{% else %} After=docker.service Wants=docker.socket -{% endif %} [Service] EnvironmentFile={{kube_config_dir}}/kubelet.env diff --git a/roles/kubernetes/node/templates/kubelet.j2 b/roles/kubernetes/node/templates/kubelet.j2 index 822153f39..3240b5611 100644 --- a/roles/kubernetes/node/templates/kubelet.j2 +++ b/roles/kubernetes/node/templates/kubelet.j2 @@ -36,8 +36,14 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" {% set kubelet_args_kubeconfig %}--kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --require-kubeconfig{% endset %} {% if standalone_kubelet|bool %} {# We are on a master-only host. Make the master unschedulable in this case. #} +{% if kube_version | version_compare('v1.6', '>=') %} +{# Set taints on the master so that it's unschedulable by default. Use node-role.kubernetes.io/master taint like kubeadm. #} +{% set kubelet_args_kubeconfig %}{{ kubelet_args_kubeconfig }} --register-with-taints=node-role.kubernetes.io/master=:NoSchedule{% endset %} +{% else %} +{# --register-with-taints was added in 1.6 so just register unschedulable if Kubernetes < 1.6 #} {% set kubelet_args_kubeconfig %}{{ kubelet_args_kubeconfig }} --register-schedulable=false{% endset %} {% endif %} +{% endif %} {# Kubelet node labels #} {% if inventory_hostname in groups['kube-master'] %} @@ -49,14 +55,13 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" {% set node_labels %}--node-labels=node-role.kubernetes.io/node=true{% endset %} {% endif %} -KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ node_labels }} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}" -{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "weave", "canal"] %} +KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ node_labels }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}" +{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave"] %} KUBELET_NETWORK_PLUGIN="--network-plugin=cni --network-plugin-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin" {% elif kube_network_plugin is defined and kube_network_plugin == "weave" %} DOCKER_SOCKET="--docker-endpoint=unix:/var/run/weave/weave.sock" {% elif kube_network_plugin is defined and kube_network_plugin == "cloud" %} -# Please note that --reconcile-cidr is deprecated and a no-op in Kubernetes 1.5 but still required in 1.4 -KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kubenet --reconcile-cidr=true" +KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kubenet" {% endif %} # Should this cluster be allowed to run privileged docker containers KUBE_ALLOW_PRIV="--allow-privileged=true" @@ -67,3 +72,5 @@ KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }}" {% else %} KUBELET_CLOUDPROVIDER="" {% endif %} + +PATH={{ bin_dir }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin diff --git a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 b/roles/kubernetes/node/templates/kubelet.rkt.service.j2 index 1f181a89d..522f58d8c 100644 --- a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 +++ b/roles/kubernetes/node/templates/kubelet.rkt.service.j2 @@ -1,12 +1,7 @@ [Unit] Description=Kubernetes Kubelet Server Documentation=https://github.com/GoogleCloudPlatform/kubernetes -{% if kube_network_plugin is defined and kube_network_plugin == "calico" %} -After=calico-node.service -Wants=network.target calico-node.service -{% else %} Wants=network.target -{% endif %} [Service] Restart=on-failure @@ -32,7 +27,7 @@ ExecStart=/usr/bin/rkt run \ --volume var-lib-docker,kind=host,source={{ docker_daemon_graph }},readOnly=false \ --volume var-lib-kubelet,kind=host,source=/var/lib/kubelet,readOnly=false,recursive=true \ --volume var-log,kind=host,source=/var/log \ -{% if kube_network_plugin in ["calico", "weave", "canal"] %} +{% if kube_network_plugin in ["calico", "weave", "canal", "flannel"] %} --volume etc-cni,kind=host,source=/etc/cni,readOnly=true \ --volume opt-cni,kind=host,source=/opt/cni,readOnly=true \ --volume var-lib-cni,kind=host,source=/var/lib/cni,readOnly=false \ diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 index 65feeee65..daf0fcb4f 100644 --- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 @@ -5,6 +5,8 @@ metadata: namespace: {{system_namespace}} labels: k8s-app: kube-proxy + annotations: + kubespray.kube-proxy-cert/serial: "{{ kube_proxy_cert_serial }}" spec: hostNetwork: true {% if kube_version | version_compare('v1.6', '>=') %} diff --git a/roles/kubernetes/preinstall/handlers/main.yml b/roles/kubernetes/preinstall/handlers/main.yml index 35fec7d94..dab1bf7de 100644 --- a/roles/kubernetes/preinstall/handlers/main.yml +++ b/roles/kubernetes/preinstall/handlers/main.yml @@ -1,3 +1,4 @@ +--- - name: Preinstall | restart network command: /bin/true notify: diff --git a/roles/kubernetes/preinstall/tasks/azure-credential-check.yml b/roles/kubernetes/preinstall/tasks/azure-credential-check.yml index ca50d5843..fa2d82fd2 100644 --- a/roles/kubernetes/preinstall/tasks/azure-credential-check.yml +++ b/roles/kubernetes/preinstall/tasks/azure-credential-check.yml @@ -48,5 +48,3 @@ fail: msg: "azure_route_table_name is missing" when: azure_route_table_name is not defined or azure_route_table_name == "" - - diff --git a/roles/kubernetes/preinstall/tasks/gitinfos.yml b/roles/kubernetes/preinstall/tasks/gitinfos.yml deleted file mode 100644 index 323c0babf..000000000 --- a/roles/kubernetes/preinstall/tasks/gitinfos.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -# Deploy git infos -# ---------------- -- name: 'GIT | Install script for collecting git info' - template: - src: "{{ role_path }}/gen-gitinfos.sh" - dest: "{{ bin_dir }}/gen-gitinfos.sh" - mode: a+rwx - -- name: 'GIT | generate git informations' - local_action: command {{ role_path }}/gen-gitinfos.sh global - register: gitinfo - check_mode: no - -- name: 'GIT | copy ansible information' - template: - src: ansible_git.j2 - dest: /etc/.ansible.ini - backup: yes - -- name: 'GIT | generate diff file' - local_action: command {{ role_path }}/gen-gitinfos.sh diff - register: gitdiff - check_mode: no - -- name: 'GIT | copy git diff file' - copy: - content: "{{ gitdiff.stdout }}" - dest: /etc/.git-ansible.diff - backup: yes diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml index e3f27192f..620aae35f 100644 --- a/roles/kubernetes/preinstall/tasks/main.yml +++ b/roles/kubernetes/preinstall/tasks/main.yml @@ -1,6 +1,6 @@ --- - include: pre-upgrade.yml - tags: [upgrade, bootstrap-os] + tags: [upgrade, bootstrap-os] - name: Force binaries directory for Container Linux by CoreOS set_fact: @@ -16,10 +16,6 @@ become: true tags: bootstrap-os -- include: gitinfos.yml - when: run_gitinfos - tags: facts - - include: set_facts.yml tags: facts @@ -27,14 +23,14 @@ include_vars: "{{ item }}" with_first_found: - files: - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}.yml" - - "{{ ansible_os_family|lower }}.yml" - - defaults.yml + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}.yml" + - "{{ ansible_os_family|lower }}.yml" + - defaults.yml paths: - - ../vars + - ../vars skip: true tags: facts @@ -85,7 +81,7 @@ - "/etc/cni/net.d" - "/opt/cni/bin" when: - - kube_network_plugin in ["calico", "weave", "canal"] + - kube_network_plugin in ["calico", "weave", "canal", "flannel"] - inventory_hostname in groups['k8s-cluster'] tags: [network, calico, weave, canal, bootstrap-os] diff --git a/roles/kubernetes/preinstall/tasks/set_facts.yml b/roles/kubernetes/preinstall/tasks/set_facts.yml index 056f9edcf..96ec25499 100644 --- a/roles/kubernetes/preinstall/tasks/set_facts.yml +++ b/roles/kubernetes/preinstall/tasks/set_facts.yml @@ -32,6 +32,10 @@ {%- endif -%} {%- endif %} +- set_fact: + kube_apiserver_insecure_endpoint: >- + http://{{ kube_apiserver_insecure_bind_address | regex_replace('0\.0\.0\.0','127.0.0.1') }}:{{ kube_apiserver_insecure_port }} + - set_fact: etcd_address: "{{ ip | default(ansible_default_ipv4['address']) }}" diff --git a/roles/kubernetes/preinstall/tasks/vsphere-credential-check.yml b/roles/kubernetes/preinstall/tasks/vsphere-credential-check.yml index b91726d50..9beeb6b50 100644 --- a/roles/kubernetes/preinstall/tasks/vsphere-credential-check.yml +++ b/roles/kubernetes/preinstall/tasks/vsphere-credential-check.yml @@ -1,3 +1,4 @@ +--- - name: check vsphere environment variables fail: msg: "{{ item.name }} is missing" diff --git a/roles/kubernetes/preinstall/vars/centos.yml b/roles/kubernetes/preinstall/vars/centos.yml index c1be4b9b3..b2fbcd80a 100644 --- a/roles/kubernetes/preinstall/vars/centos.yml +++ b/roles/kubernetes/preinstall/vars/centos.yml @@ -1,3 +1,4 @@ +--- required_pkgs: - libselinux-python - device-mapper-libs diff --git a/roles/kubernetes/preinstall/vars/debian.yml b/roles/kubernetes/preinstall/vars/debian.yml index 596d2ac8b..dfcb0bc34 100644 --- a/roles/kubernetes/preinstall/vars/debian.yml +++ b/roles/kubernetes/preinstall/vars/debian.yml @@ -1,3 +1,4 @@ +--- required_pkgs: - python-apt - aufs-tools diff --git a/roles/kubernetes/preinstall/vars/fedora.yml b/roles/kubernetes/preinstall/vars/fedora.yml index c1be4b9b3..b2fbcd80a 100644 --- a/roles/kubernetes/preinstall/vars/fedora.yml +++ b/roles/kubernetes/preinstall/vars/fedora.yml @@ -1,3 +1,4 @@ +--- required_pkgs: - libselinux-python - device-mapper-libs diff --git a/roles/kubernetes/preinstall/vars/redhat.yml b/roles/kubernetes/preinstall/vars/redhat.yml index c1be4b9b3..b2fbcd80a 100644 --- a/roles/kubernetes/preinstall/vars/redhat.yml +++ b/roles/kubernetes/preinstall/vars/redhat.yml @@ -1,3 +1,4 @@ +--- required_pkgs: - libselinux-python - device-mapper-libs diff --git a/roles/kubernetes/secrets/defaults/main.yml b/roles/kubernetes/secrets/defaults/main.yml index e6177857e..f0d10711d 100644 --- a/roles/kubernetes/secrets/defaults/main.yml +++ b/roles/kubernetes/secrets/defaults/main.yml @@ -1,2 +1,3 @@ --- kube_cert_group: kube-cert +kube_vault_mount_path: kube diff --git a/roles/kubernetes/secrets/files/make-ssl.sh b/roles/kubernetes/secrets/files/make-ssl.sh index 0cb7e37c6..09342625d 100755 --- a/roles/kubernetes/secrets/files/make-ssl.sh +++ b/roles/kubernetes/secrets/files/make-ssl.sh @@ -82,10 +82,13 @@ gen_key_and_cert() { # Admins if [ -n "$MASTERS" ]; then - # If any host requires new certs, just regenerate all master certs # kube-apiserver - gen_key_and_cert "apiserver" "/CN=kube-apiserver" - cat ca.pem >> apiserver.pem + # Generate only if we don't have existing ca and apiserver certs + if ! [ -e "$SSLDIR/ca-key.pem" ] || ! [ -e "$SSLDIR/apiserver-key.pem" ]; then + gen_key_and_cert "apiserver" "/CN=kube-apiserver" + cat ca.pem >> apiserver.pem + fi + # If any host requires new certs, just regenerate scheduler and controller-manager master certs # kube-scheduler gen_key_and_cert "kube-scheduler" "/CN=system:kube-scheduler" # kube-controller-manager diff --git a/roles/kubernetes/secrets/tasks/check-certs.yml b/roles/kubernetes/secrets/tasks/check-certs.yml index 69b82d957..3870a3e96 100644 --- a/roles/kubernetes/secrets/tasks/check-certs.yml +++ b/roles/kubernetes/secrets/tasks/check-certs.yml @@ -105,4 +105,3 @@ {%- set _ = certs.update({'sync': True}) -%} {% endif %} {{ certs.sync }} - diff --git a/roles/kubernetes/secrets/tasks/gen_certs_script.yml b/roles/kubernetes/secrets/tasks/gen_certs_script.yml index 80fb4a506..192787b97 100644 --- a/roles/kubernetes/secrets/tasks/gen_certs_script.yml +++ b/roles/kubernetes/secrets/tasks/gen_certs_script.yml @@ -56,26 +56,25 @@ - set_fact: all_master_certs: "['ca-key.pem', + 'apiserver.pem', + 'apiserver-key.pem', + 'kube-scheduler.pem', + 'kube-scheduler-key.pem', + 'kube-controller-manager.pem', + 'kube-controller-manager-key.pem', + {% for node in groups['kube-master'] %} + 'admin-{{ node }}.pem', + 'admin-{{ node }}-key.pem', + {% endfor %}]" + my_master_certs: ['ca-key.pem', + 'admin-{{ inventory_hostname }}.pem', + 'admin-{{ inventory_hostname }}-key.pem', 'apiserver.pem', 'apiserver-key.pem', 'kube-scheduler.pem', 'kube-scheduler-key.pem', 'kube-controller-manager.pem', - 'kube-controller-manager-key.pem', - {% for node in groups['kube-master'] %} - 'admin-{{ node }}.pem', - 'admin-{{ node }}-key.pem', - {% endfor %}]" - my_master_certs: ['ca-key.pem', - 'admin-{{ inventory_hostname }}.pem', - 'admin-{{ inventory_hostname }}-key.pem', - 'apiserver.pem', - 'apiserver-key.pem', - 'kube-scheduler.pem', - 'kube-scheduler-key.pem', - 'kube-controller-manager.pem', - 'kube-controller-manager-key.pem', - ] + 'kube-controller-manager-key.pem'] all_node_certs: "['ca.pem', {% for node in groups['k8s-cluster'] %} 'node-{{ node }}.pem', @@ -84,11 +83,10 @@ 'kube-proxy-{{ node }}-key.pem', {% endfor %}]" my_node_certs: ['ca.pem', - 'node-{{ inventory_hostname }}.pem', - 'node-{{ inventory_hostname }}-key.pem', - 'kube-proxy-{{ inventory_hostname }}.pem', - 'kube-proxy-{{ inventory_hostname }}-key.pem', - ] + 'node-{{ inventory_hostname }}.pem', + 'node-{{ inventory_hostname }}-key.pem', + 'kube-proxy-{{ inventory_hostname }}.pem', + 'kube-proxy-{{ inventory_hostname }}-key.pem'] tags: facts - name: Gen_certs | Gather master certs @@ -114,10 +112,10 @@ sync_certs|default(false) and inventory_hostname != groups['kube-master'][0] -#NOTE(mattymo): Use temporary file to copy master certs because we have a ~200k -#char limit when using shell command +# NOTE(mattymo): Use temporary file to copy master certs because we have a ~200k +# char limit when using shell command -#FIXME(mattymo): Use tempfile module in ansible 2.3 +# FIXME(mattymo): Use tempfile module in ansible 2.3 - name: Gen_certs | Prepare tempfile for unpacking certs shell: mktemp /tmp/certsXXXXX.tar.gz register: cert_tempfile @@ -168,31 +166,3 @@ owner: kube mode: "u=rwX,g-rwx,o-rwx" recurse: yes - -- name: Gen_certs | target ca-certificates path - set_fact: - ca_cert_path: |- - {% if ansible_os_family == "Debian" -%} - /usr/local/share/ca-certificates/kube-ca.crt - {%- elif ansible_os_family == "RedHat" -%} - /etc/pki/ca-trust/source/anchors/kube-ca.crt - {%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%} - /etc/ssl/certs/kube-ca.pem - {%- endif %} - tags: facts - -- name: Gen_certs | add CA to trusted CA dir - copy: - src: "{{ kube_cert_dir }}/ca.pem" - dest: "{{ ca_cert_path }}" - remote_src: true - register: kube_ca_cert - -- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS) - command: update-ca-certificates - when: kube_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"] - -- name: Gen_certs | update ca-certificates (RedHat) - command: update-ca-trust extract - when: kube_ca_cert.changed and ansible_os_family == "RedHat" - diff --git a/roles/kubernetes/secrets/tasks/gen_certs_vault.yml b/roles/kubernetes/secrets/tasks/gen_certs_vault.yml index e516db0f2..4c5dc2eaa 100644 --- a/roles/kubernetes/secrets/tasks/gen_certs_vault.yml +++ b/roles/kubernetes/secrets/tasks/gen_certs_vault.yml @@ -1,68 +1,48 @@ --- - include: sync_kube_master_certs.yml when: inventory_hostname in groups['kube-master'] - tags: k8s-secrets - include: sync_kube_node_certs.yml when: inventory_hostname in groups['k8s-cluster'] - tags: k8s-secrets -- name: gen_certs_vault | Read in the local credentials - command: cat /etc/vault/roles/kube/userpass - register: kube_vault_creds_cat - delegate_to: "{{ groups['k8s-cluster'][0] }}" - -- name: gen_certs_vault | Set facts for read Vault Creds - set_fact: - kube_vault_creds: "{{ kube_vault_creds_cat.stdout|from_json }}" - delegate_to: "{{ groups['k8s-cluster'][0] }}" - -- name: gen_certs_vault | Log into Vault and obtain an token - uri: - url: "{{ hostvars[groups['vault'][0]]['vault_leader_url'] }}/v1/auth/userpass/login/{{ kube_vault_creds.username }}" - headers: - Accept: application/json - Content-Type: application/json - method: POST - body_format: json - body: - password: "{{ kube_vault_creds.password }}" - register: kube_vault_login_result - delegate_to: "{{ groups['k8s-cluster'][0] }}" - -- name: gen_certs_vault | Set fact for Vault API token - set_fact: - kube_vault_headers: - Accept: application/json - Content-Type: application/json - X-Vault-Token: "{{ kube_vault_login_result.get('json',{}).get('auth', {}).get('client_token') }}" - run_once: true - -# Issue certs to kube-master nodes +# Issue admin certs to kube-master hosts - include: ../../../vault/tasks/shared/issue_cert.yml vars: - issue_cert_copy_ca: "{{ item == kube_master_certs_needed|first }}" + issue_cert_common_name: "admin" + issue_cert_copy_ca: "{{ item == kube_admin_certs_needed|first }}" issue_cert_file_group: "{{ kube_cert_group }}" issue_cert_file_owner: kube - issue_cert_headers: "{{ kube_vault_headers }}" issue_cert_hosts: "{{ groups['kube-master'] }}" issue_cert_path: "{{ item }}" - issue_cert_role: kube + issue_cert_role: kube-master issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" - with_items: "{{ kube_master_certs_needed|d([]) }}" + issue_cert_mount_path: "{{ kube_vault_mount_path }}" + with_items: "{{ kube_admin_certs_needed|d([]) }}" when: inventory_hostname in groups['kube-master'] +- name: gen_certs_vault | Set fact about certificate alt names + set_fact: + kube_cert_alt_names: >- + {{ + groups['kube-master'] + + ['kubernetes.default.svc.cluster.local', 'kubernetes.default.svc', 'kubernetes.default', 'kubernetes'] + + ['localhost'] + }} + run_once: true + +- name: gen_certs_vault | Add external load balancer domain name to certificate alt names + set_fact: + kube_cert_alt_names: "{{ kube_cert_alt_names + [apiserver_loadbalancer_domain_name] }}" + when: loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined + run_once: true + +# Issue master components certs to kube-master hosts - include: ../../../vault/tasks/shared/issue_cert.yml vars: - issue_cert_alt_names: >- - {{ - groups['kube-master'] + - ['kubernetes.default.svc.cluster.local', 'kubernetes.default.svc', 'kubernetes.default', 'kubernetes'] + - ['localhost'] - }} + issue_cert_common_name: "kubernetes" + issue_cert_alt_names: "{{ kube_cert_alt_names }}" issue_cert_file_group: "{{ kube_cert_group }}" issue_cert_file_owner: kube - issue_cert_headers: "{{ kube_vault_headers }}" issue_cert_hosts: "{{ groups['kube-master'] }}" issue_cert_ip_sans: >- [ @@ -75,34 +55,41 @@ "127.0.0.1","::1","{{ kube_apiserver_ip }}" ] issue_cert_path: "{{ item }}" - issue_cert_role: kube + issue_cert_role: kube-master issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" + issue_cert_mount_path: "{{ kube_vault_mount_path }}" with_items: "{{ kube_master_components_certs_needed|d([]) }}" when: inventory_hostname in groups['kube-master'] + notify: set secret_changed # Issue node certs to k8s-cluster nodes - include: ../../../vault/tasks/shared/issue_cert.yml vars: + # Need to strip out the 'node-' prefix from the cert name so it can be used + # with the node authorization plugin ( CN matches kubelet node name ) + issue_cert_common_name: "system:node:{{ item.rsplit('/', 1)[1].rsplit('.', 1)[0] | regex_replace('^node-', '') }}" issue_cert_copy_ca: "{{ item == kube_node_certs_needed|first }}" issue_cert_file_group: "{{ kube_cert_group }}" issue_cert_file_owner: kube - issue_cert_headers: "{{ kube_vault_headers }}" issue_cert_hosts: "{{ groups['k8s-cluster'] }}" issue_cert_path: "{{ item }}" - issue_cert_role: kube + issue_cert_role: kube-node issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" + issue_cert_mount_path: "{{ kube_vault_mount_path }}" with_items: "{{ kube_node_certs_needed|d([]) }}" when: inventory_hostname in groups['k8s-cluster'] +# Issue proxy certs to k8s-cluster nodes - include: ../../../vault/tasks/shared/issue_cert.yml vars: + issue_cert_common_name: "system:kube-proxy" issue_cert_copy_ca: "{{ item == kube_proxy_certs_needed|first }}" issue_cert_file_group: "{{ kube_cert_group }}" issue_cert_file_owner: kube - issue_cert_headers: "{{ kube_vault_headers }}" issue_cert_hosts: "{{ groups['k8s-cluster'] }}" issue_cert_path: "{{ item }}" - issue_cert_role: kube + issue_cert_role: kube-proxy issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" + issue_cert_mount_path: "{{ kube_vault_mount_path }}" with_items: "{{ kube_proxy_certs_needed|d([]) }}" when: inventory_hostname in groups['k8s-cluster'] diff --git a/roles/kubernetes/secrets/tasks/main.yml b/roles/kubernetes/secrets/tasks/main.yml index 5f55b775b..97987f706 100644 --- a/roles/kubernetes/secrets/tasks/main.yml +++ b/roles/kubernetes/secrets/tasks/main.yml @@ -31,7 +31,7 @@ src: known_users.csv.j2 dest: "{{ kube_users_dir }}/known_users.csv" backup: yes - when: inventory_hostname in "{{ groups['kube-master'] }}" and kube_basic_auth|default(true) + when: inventory_hostname in groups['kube-master'] and kube_basic_auth|default(true) notify: set secret_changed # @@ -72,5 +72,40 @@ - include: "gen_certs_{{ cert_management }}.yml" tags: k8s-secrets +- include: upd_ca_trust.yml + tags: k8s-secrets + +- name: "Gen_certs | Get certificate serials on kube masters" + shell: "openssl x509 -in {{ kube_cert_dir }}/{{ item }} -noout -serial | cut -d= -f2" + register: "master_certificate_serials" + with_items: + - "admin-{{ inventory_hostname }}.pem" + - "apiserver.pem" + - "kube-controller-manager.pem" + - "kube-scheduler.pem" + when: inventory_hostname in groups['kube-master'] + +- name: "Gen_certs | set kube master certificate serial facts" + set_fact: + etcd_admin_cert_serial: "{{ master_certificate_serials.results[0].stdout|default() }}" + apiserver_cert_serial: "{{ master_certificate_serials.results[1].stdout|default() }}" + controller_manager_cert_serial: "{{ master_certificate_serials.results[2].stdout|default() }}" + scheduler_cert_serial: "{{ master_certificate_serials.results[3].stdout|default() }}" + when: inventory_hostname in groups['kube-master'] + +- name: "Gen_certs | Get certificate serials on kube nodes" + shell: "openssl x509 -in {{ kube_cert_dir }}/{{ item }} -noout -serial | cut -d= -f2" + register: "node_certificate_serials" + with_items: + - "node-{{ inventory_hostname }}.pem" + - "kube-proxy-{{ inventory_hostname }}.pem" + when: inventory_hostname in groups['k8s-cluster'] + +- name: "Gen_certs | set kube node certificate serial facts" + set_fact: + etcd_node_cert_serial: "{{ node_certificate_serials.results[0].stdout|default() }}" + kube_proxy_cert_serial: "{{ node_certificate_serials.results[1].stdout|default() }}" + when: inventory_hostname in groups['k8s-cluster'] + - include: gen_tokens.yml tags: k8s-secrets diff --git a/roles/kubernetes/secrets/tasks/sync_kube_master_certs.yml b/roles/kubernetes/secrets/tasks/sync_kube_master_certs.yml index 6fa861a36..277038612 100644 --- a/roles/kubernetes/secrets/tasks/sync_kube_master_certs.yml +++ b/roles/kubernetes/secrets/tasks/sync_kube_master_certs.yml @@ -2,7 +2,7 @@ - name: sync_kube_master_certs | Create list of needed kube admin certs set_fact: - kube_master_cert_list: "{{ kube_master_cert_list|d([]) + ['admin-' + item + '.pem'] }}" + kube_admin_cert_list: "{{ kube_admin_cert_list|d([]) + ['admin-' + item + '.pem'] }}" with_items: "{{ groups['kube-master'] }}" - include: ../../../vault/tasks/shared/sync_file.yml @@ -13,11 +13,11 @@ sync_file_hosts: "{{ groups['kube-master'] }}" sync_file_is_cert: true sync_file_owner: kube - with_items: "{{ kube_master_cert_list|d([]) }}" + with_items: "{{ kube_admin_cert_list|d([]) }}" - name: sync_kube_master_certs | Set facts for kube admin sync_file results set_fact: - kube_master_certs_needed: "{{ kube_master_certs_needed|default([]) + [item.path] }}" + kube_admin_certs_needed: "{{ kube_admin_certs_needed|default([]) + [item.path] }}" with_items: "{{ sync_file_results|d([]) }}" when: item.no_srcs|bool diff --git a/roles/kubernetes/secrets/tasks/sync_kube_node_certs.yml b/roles/kubernetes/secrets/tasks/sync_kube_node_certs.yml index b97b85e17..7aafab5c8 100644 --- a/roles/kubernetes/secrets/tasks/sync_kube_node_certs.yml +++ b/roles/kubernetes/secrets/tasks/sync_kube_node_certs.yml @@ -6,7 +6,7 @@ with_items: "{{ groups['k8s-cluster'] }}" - include: ../../../vault/tasks/shared/sync_file.yml - vars: + vars: sync_file: "{{ item }}" sync_file_dir: "{{ kube_cert_dir }}" sync_file_group: "{{ kube_cert_group }}" @@ -26,7 +26,7 @@ sync_file_results: [] - include: ../../../vault/tasks/shared/sync_file.yml - vars: + vars: sync_file: ca.pem sync_file_dir: "{{ kube_cert_dir }}" sync_file_group: "{{ kube_cert_group }}" diff --git a/roles/kubernetes/secrets/tasks/upd_ca_trust.yml b/roles/kubernetes/secrets/tasks/upd_ca_trust.yml new file mode 100644 index 000000000..c980bb6aa --- /dev/null +++ b/roles/kubernetes/secrets/tasks/upd_ca_trust.yml @@ -0,0 +1,27 @@ +--- +- name: Gen_certs | target ca-certificates path + set_fact: + ca_cert_path: |- + {% if ansible_os_family == "Debian" -%} + /usr/local/share/ca-certificates/kube-ca.crt + {%- elif ansible_os_family == "RedHat" -%} + /etc/pki/ca-trust/source/anchors/kube-ca.crt + {%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%} + /etc/ssl/certs/kube-ca.pem + {%- endif %} + tags: facts + +- name: Gen_certs | add CA to trusted CA dir + copy: + src: "{{ kube_cert_dir }}/ca.pem" + dest: "{{ ca_cert_path }}" + remote_src: true + register: kube_ca_cert + +- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS) + command: update-ca-certificates + when: kube_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"] + +- name: Gen_certs | update ca-certificates (RedHat) + command: update-ca-trust extract + when: kube_ca_cert.changed and ansible_os_family == "RedHat" diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index c2152814f..e6015560a 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -1,8 +1,12 @@ +--- ## Required for bootstrap-os/preinstall/download roles and setting facts # Valid bootstrap options (required): ubuntu, coreos, centos, none bootstrap_os: none kube_api_anonymous_auth: false +# Default value, but will be set to true automatically if detected +is_atomic: false + ## Change this to use another Kubernetes version, e.g. a current beta release kube_version: v1.6.7 @@ -65,9 +69,6 @@ kube_users: kube: pass: "{{kube_api_pwd}}" role: admin - root: - pass: "{{kube_api_pwd}}" - role: admin # Choose network plugin (calico, weave or flannel) # Can also be set to 'cloud', which lets the cloud provider setup appropriate routing @@ -88,8 +89,11 @@ kube_network_node_prefix: 24 # The port the API Server will be listening on. kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" -kube_apiserver_port: 6443 # (https) -kube_apiserver_insecure_port: 8080 # (http) +# https +kube_apiserver_port: 6443 +# http +kube_apiserver_insecure_bind_address: 127.0.0.1 +kube_apiserver_insecure_port: 8080 # Path used to store Docker data docker_daemon_graph: "/var/lib/docker" @@ -128,3 +132,14 @@ openstack_lbaas_monitor_max_retries: false ## 'RBAC' modes are tested. authorization_modes: [] rbac_enabled: "{{ 'RBAC' in authorization_modes }}" + +## List of key=value pairs that describe feature gates for +## the k8s cluster. +kube_feature_gates: [] + +# Vault data dirs. +vault_base_dir: /etc/vault +vault_cert_dir: "{{ vault_base_dir }}/ssl" +vault_config_dir: "{{ vault_base_dir }}/config" +vault_roles_dir: "{{ vault_base_dir }}/roles" +vault_secrets_dir: "{{ vault_base_dir }}/secrets" diff --git a/roles/kubespray-defaults/tasks/main.yaml b/roles/kubespray-defaults/tasks/main.yaml index 5b2cb96a0..11b9e3653 100644 --- a/roles/kubespray-defaults/tasks/main.yaml +++ b/roles/kubespray-defaults/tasks/main.yaml @@ -1,3 +1,4 @@ +--- - name: Configure defaults debug: msg: "Check roles/kubespray-defaults/defaults/main.yml" diff --git a/roles/network_plugin/calico/defaults/main.yml b/roles/network_plugin/calico/defaults/main.yml index e09ab3e1e..148a28082 100644 --- a/roles/network_plugin/calico/defaults/main.yml +++ b/roles/network_plugin/calico/defaults/main.yml @@ -34,3 +34,8 @@ calicoctl_cpu_requests: 50m # Should calico ignore kernel's RPF check setting, # see https://github.com/projectcalico/felix/blob/ab8799eaea66627e5db7717e62fca61fd9c08646/python/calico/felix/config.py#L198 calico_node_ignorelooserpf: false + +rbac_resources: + - sa + - clusterrole + - clusterrolebinding diff --git a/roles/network_plugin/calico/handlers/main.yml b/roles/network_plugin/calico/handlers/main.yml index 78dad7505..05cc73289 100644 --- a/roles/network_plugin/calico/handlers/main.yml +++ b/roles/network_plugin/calico/handlers/main.yml @@ -5,7 +5,7 @@ - Calico | reload systemd - Calico | reload calico-node -- name : Calico | reload systemd +- name: Calico | reload systemd shell: systemctl daemon-reload - name: Calico | reload calico-node diff --git a/roles/network_plugin/calico/rr/handlers/main.yml b/roles/network_plugin/calico/rr/handlers/main.yml index efd0e12ac..cb166bda1 100644 --- a/roles/network_plugin/calico/rr/handlers/main.yml +++ b/roles/network_plugin/calico/rr/handlers/main.yml @@ -5,7 +5,7 @@ - Calico-rr | reload systemd - Calico-rr | reload calico-rr -- name : Calico-rr | reload systemd +- name: Calico-rr | reload systemd shell: systemctl daemon-reload - name: Calico-rr | reload calico-rr diff --git a/roles/network_plugin/calico/rr/meta/main.yml b/roles/network_plugin/calico/rr/meta/main.yml index 55104953e..511b89744 100644 --- a/roles/network_plugin/calico/rr/meta/main.yml +++ b/roles/network_plugin/calico/rr/meta/main.yml @@ -1,3 +1,4 @@ +--- dependencies: - role: etcd - role: docker diff --git a/roles/network_plugin/calico/tasks/main.yml b/roles/network_plugin/calico/tasks/main.yml index 38d3ad5db..7ea77d053 100644 --- a/roles/network_plugin/calico/tasks/main.yml +++ b/roles/network_plugin/calico/tasks/main.yml @@ -1,9 +1,10 @@ --- -- name: Calico | Check calicoctl version - run_once: true - set_fact: - legacy_calicoctl: "{{ calicoctl_image_tag | version_compare('v1.0.0', '<') }}" - tags: facts +- name: Calico | Disable calico-node service if it exists + service: + name: calico-node + state: stopped + enabled: yes + failed_when: false - name: Calico | Write Calico cni config template: @@ -38,7 +39,6 @@ owner: root group: root changed_when: false - notify: restart calico-node - name: Calico | Copy cni plugins from hyperkube command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /usr/bin/rsync -ac /opt/cni/bin/ /cnibindir/" @@ -103,38 +103,7 @@ environment: NO_DEFAULT_POOLS: true run_once: true - when: not legacy_calicoctl and - ("Key not found" in calico_conf.stdout or "nodes" not in calico_conf.stdout) - -- name: Calico (old) | Define ipip pool argument - run_once: true - set_fact: - ipip_arg: "--ipip" - when: (legacy_calicoctl and ipip ) - tags: facts - -- name: Calico (old) | Define nat-outgoing pool argument - run_once: true - set_fact: - nat_arg: "--nat-outgoing" - when: (legacy_calicoctl and - nat_outgoing|default(false) and not peer_with_router|default(false)) - tags: facts - -- name: Calico (old) | Define calico pool task name - run_once: true - set_fact: - pool_task_name: "with options {{ ipip_arg|default('') }} {{ nat_arg|default('') }}" - when: (legacy_calicoctl and ipip_arg|default(false) or nat_arg|default(false)) - tags: facts - -- name: Calico (old) | Configure calico network pool {{ pool_task_name|default('') }} - command: "{{ bin_dir}}/calicoctl pool add {{ kube_pods_subnet }} {{ ipip_arg|default('') }} {{ nat_arg|default('') }}" - environment: - NO_DEFAULT_POOLS: true - run_once: true - when: legacy_calicoctl and - ("Key not found" in calico_conf.stdout or "nodes" not in calico_conf.stdout) + when: ("Key not found" in calico_conf.stdout or "nodes" not in calico_conf.stdout) - name: Calico | Get calico configuration from etcd command: |- @@ -162,52 +131,11 @@ - name: Calico | Set global as_num command: "{{ bin_dir}}/calicoctl config set asNumber {{ global_as_num }}" run_once: true - when: not legacy_calicoctl - -- name: Calico (old) | Set global as_num - command: "{{ bin_dir}}/calicoctl bgp default-node-as {{ global_as_num }}" - run_once: true - when: legacy_calicoctl - -- name: Calico (old) | Write calico-node systemd init file - template: - src: calico-node.service.legacy.j2 - dest: /etc/systemd/system/calico-node.service - when: legacy_calicoctl - notify: restart calico-node - -- name: Calico | Write calico.env for systemd init file - template: - src: calico.env.j2 - dest: /etc/calico/calico.env - when: not legacy_calicoctl - notify: restart calico-node - -- name: Calico | Write calico-node systemd init file - template: - src: calico-node.service.j2 - dest: /etc/systemd/system/calico-node.service - when: not legacy_calicoctl - notify: restart calico-node - -- name: Calico | Restart calico-node if secrets changed - command: /bin/true - when: secret_changed|default(false) or etcd_secret_changed|default(false) - notify: restart calico-node - -- meta: flush_handlers - -- name: Calico | Enable calico-node - service: - name: calico-node - state: started - enabled: yes - name: Calico | Disable node mesh shell: "{{ bin_dir }}/calicoctl config set nodeToNodeMesh off" when: ((peer_with_router|default(false) or peer_with_calico_rr|default(false)) - and inventory_hostname in groups['k8s-cluster'] - and not legacy_calicoctl) + and inventory_hostname in groups['k8s-cluster']) run_once: true - name: Calico | Configure peering with router(s) @@ -220,8 +148,7 @@ }' | {{ bin_dir }}/calicoctl create --skip-exists -f - with_items: "{{ peers|default([]) }}" - when: (not legacy_calicoctl and - peer_with_router|default(false) and inventory_hostname in groups['k8s-cluster']) + when: peer_with_router|default(false) and inventory_hostname in groups['k8s-cluster'] - name: Calico | Configure peering with route reflectors shell: > @@ -235,26 +162,20 @@ }' | {{ bin_dir }}/calicoctl create --skip-exists -f - with_items: "{{ groups['calico-rr'] | default([]) }}" - when: (not legacy_calicoctl and - peer_with_calico_rr|default(false) and inventory_hostname in groups['k8s-cluster'] + when: (peer_with_calico_rr|default(false) and inventory_hostname in groups['k8s-cluster'] and hostvars[item]['cluster_id'] == cluster_id) -- name: Calico (old) | Disable node mesh - shell: "{{ bin_dir }}/calicoctl bgp node-mesh off" - when: ((peer_with_router|default(false) or peer_with_calico_rr|default(false)) - and inventory_hostname in groups['k8s-cluster'] - and legacy_calicoctl) - run_once: true - -- name: Calico (old) | Configure peering with router(s) - shell: "{{ bin_dir }}/calicoctl node bgp peer add {{ item.router_id }} as {{ item.as }}" - with_items: "{{ peers|default([]) }}" - when: (legacy_calicoctl and - peer_with_router|default(false) and inventory_hostname in groups['k8s-cluster']) - -- name: Calico (old) | Configure peering with route reflectors - shell: "{{ bin_dir }}/calicoctl node bgp peer add {{ hostvars[item]['calico_rr_ip']|default(hostvars[item]['ip'])|default(hostvars[item]['ansible_default_ipv4.address']) }} as {{ local_as | default(global_as_num) }}" - with_items: "{{ groups['calico-rr'] | default([]) }}" - when: (legacy_calicoctl and - peer_with_calico_rr|default(false) and inventory_hostname in groups['k8s-cluster'] - and hostvars[item]['cluster_id'] == cluster_id) +- name: Calico | Create calico manifests + template: + src: "{{item.file}}.j2" + dest: "{{kube_config_dir}}/{{item.file}}" + with_items: + - {name: calico-config, file: calico-config.yml, type: cm} + - {name: calico-node, file: calico-node.yml, type: ds} + - {name: calico, file: calico-node-sa.yml, type: sa} + - {name: calico, file: calico-cr.yml, type: clusterrole} + - {name: calico, file: calico-crb.yml, type: clusterrolebinding} + register: calico_node_manifests + when: + - inventory_hostname in groups['kube-master'] + - rbac_enabled or item.type not in rbac_resources diff --git a/roles/network_plugin/calico/templates/calico-config.yml.j2 b/roles/network_plugin/calico/templates/calico-config.yml.j2 new file mode 100644 index 000000000..a4207f1dc --- /dev/null +++ b/roles/network_plugin/calico/templates/calico-config.yml.j2 @@ -0,0 +1,19 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: {{ system_namespace }} +data: + etcd_endpoints: "{{ etcd_access_endpoint }}" + etcd_ca: "/calico-secrets/ca_cert.crt" + etcd_cert: "/calico-secrets/cert.crt" + etcd_key: "/calico-secrets/key.pem" +{% if calico_network_backend is defined and calico_network_backend == 'none' %} + cluster_type: "kubespray" +{%- else %} + cluster_type: "kubespray,bgp" +{% endif %} + calico_backend: "bird" + {%- if inventory_hostname in groups['k8s-cluster'] and peer_with_router|default(false) %} + as: "{{ local_as }}" + {% endif -%} diff --git a/roles/network_plugin/calico/templates/calico-cr.yml.j2 b/roles/network_plugin/calico/templates/calico-cr.yml.j2 new file mode 100644 index 000000000..47d626659 --- /dev/null +++ b/roles/network_plugin/calico/templates/calico-cr.yml.j2 @@ -0,0 +1,13 @@ +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-node + namespace: {{ system_namespace }} +rules: + - apiGroups: [""] + resources: + - pods + - nodes + verbs: + - get diff --git a/roles/network_plugin/calico/templates/calico-crb.yml.j2 b/roles/network_plugin/calico/templates/calico-crb.yml.j2 new file mode 100644 index 000000000..2e132a0dc --- /dev/null +++ b/roles/network_plugin/calico/templates/calico-crb.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: calico-node + namespace: {{ system_namespace }} diff --git a/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 b/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 new file mode 100644 index 000000000..5cce29793 --- /dev/null +++ b/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: {{ system_namespace }} + labels: + kubernetes.io/cluster-service: "true" diff --git a/roles/network_plugin/calico/templates/calico-node.service.j2 b/roles/network_plugin/calico/templates/calico-node.service.j2 deleted file mode 100644 index 015c91b08..000000000 --- a/roles/network_plugin/calico/templates/calico-node.service.j2 +++ /dev/null @@ -1,40 +0,0 @@ -[Unit] -Description=calico-node -After=docker.service -Requires=docker.service - -[Service] -EnvironmentFile=/etc/calico/calico.env -ExecStartPre=-{{ docker_bin_dir }}/docker rm -f calico-node -ExecStart={{ docker_bin_dir }}/docker run --net=host --privileged \ - --name=calico-node \ - -e HOSTNAME=${CALICO_HOSTNAME} \ - -e IP=${CALICO_IP} \ - -e IP6=${CALICO_IP6} \ - -e CALICO_NETWORKING_BACKEND=${CALICO_NETWORKING_BACKEND} \ - -e FELIX_DEFAULTENDPOINTTOHOSTACTION={{ calico_endpoint_to_host_action|default('RETURN') }} \ - -e AS=${CALICO_AS} \ - -e NO_DEFAULT_POOLS=${CALICO_NO_DEFAULT_POOLS} \ - -e CALICO_LIBNETWORK_ENABLED=${CALICO_LIBNETWORK_ENABLED} \ - -e ETCD_ENDPOINTS=${ETCD_ENDPOINTS} \ - -e ETCD_CA_CERT_FILE=${ETCD_CA_CERT_FILE} \ - -e ETCD_CERT_FILE=${ETCD_CERT_FILE} \ - -e ETCD_KEY_FILE=${ETCD_KEY_FILE} \ -{% if calico_node_ignorelooserpf %} - -e FELIX_IGNORELOOSERPF=true \ -{% endif %} - -v /var/log/calico:/var/log/calico \ - -v /run/docker/plugins:/run/docker/plugins \ - -v /lib/modules:/lib/modules \ - -v /var/run/calico:/var/run/calico \ - -v {{ calico_cert_dir }}:{{ calico_cert_dir }}:ro \ - --memory={{ calico_node_memory_limit|regex_replace('Mi', 'M') }} --cpu-shares={{ calico_node_cpu_limit|regex_replace('m', '') }} \ - {{ calico_node_image_repo }}:{{ calico_node_image_tag }} - -Restart=always -RestartSec=10s - -ExecStop=-{{ docker_bin_dir }}/docker stop calico-node - -[Install] -WantedBy=multi-user.target diff --git a/roles/network_plugin/calico/templates/calico-node.service.legacy.j2 b/roles/network_plugin/calico/templates/calico-node.service.legacy.j2 deleted file mode 100644 index f542f64f6..000000000 --- a/roles/network_plugin/calico/templates/calico-node.service.legacy.j2 +++ /dev/null @@ -1,19 +0,0 @@ -[Unit] -Description=Calico per-node agent -Documentation=https://github.com/projectcalico/calico-docker -After=docker.service docker.socket -Wants=docker.socket - -[Service] -User=root -PermissionsStartOnly=true -{% if inventory_hostname in groups['k8s-cluster'] and peer_with_router|default(false)%} -ExecStart={{ bin_dir }}/calicoctl node --ip={{ip | default(ansible_default_ipv4.address) }} --as={{ local_as }} --detach=false --node-image={{ calico_node_image_repo }}:{{ calico_node_image_tag }} -{% else %} -ExecStart={{ bin_dir }}/calicoctl node --ip={{ip | default(ansible_default_ipv4.address) }} --detach=false --node-image={{ calico_node_image_repo }}:{{ calico_node_image_tag }} -{% endif %} -Restart=always -RestartSec=10s - -[Install] -WantedBy=multi-user.target diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2 new file mode 100644 index 000000000..9f47d468a --- /dev/null +++ b/roles/network_plugin/calico/templates/calico-node.yml.j2 @@ -0,0 +1,166 @@ +--- +# This manifest installs the calico/node container, as well +# as the Calico CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: + name: calico-node + namespace: {{ system_namespace }} + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + template: + metadata: + labels: + k8s-app: calico-node + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + kubespray.etcd-cert/serial: "{{ etcd_node_cert_serial }}" + spec: + hostNetwork: true +{% if rbac_enabled %} + serviceAccountName: calico-node +{% endif %} + tolerations: + - effect: NoSchedule + operator: Exists + containers: + # Runs calico/node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: {{ calico_node_image_repo }}:{{ calico_node_image_tag }} + env: + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # Choose the backend to use. + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + valueFrom: + configMapKeyRef: + name: calico-config + key: cluster_type + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "{{ calico_endpoint_to_host_action|default('RETURN') }}" +# should be set in etcd before deployment +# # Configure the IP Pool from which Pod IPs will be chosen. +# - name: CALICO_IPV4POOL_CIDR +# value: "192.168.0.0/16" +# - name: CALICO_IPV4POOL_IPIP +# value: "always" + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: "info" + # Disable autocreation of pools + - name: CALICO_NO_DEFAULT_POOLS + value: "true" + # Enable libnetwork + - name: CALICO_LIBNETWORK_ENABLED + value: "true" + # Set MTU for tunnel device used if ipip is enabled +{% if calico_mtu is defined %} + - name: FELIX_IPINIPMTU + value: "{{ calico_mtu }}" +{% endif %} + # Location of the CA certificate for etcd. + - name: ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_ca + # Location of the client key for etcd. + - name: ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_key + # Location of the client certificate for etcd. + - name: ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_cert + - name: IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: FELIX_HEALTHENABLED + value: "true" + securityContext: + privileged: true + resources: + limits: + cpu: {{ calico_node_cpu_limit }} + memory: {{ calico_node_memory_limit }} + requests: + cpu: {{ calico_node_cpu_requests }} + memory: {{ calico_node_memory_requests }} + livenessProbe: + httpGet: + path: /liveness + port: 9099 + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + readinessProbe: + httpGet: + path: /readiness + port: 9099 + periodSeconds: 10 + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /calico-secrets + name: etcd-certs + volumes: + # Used by calico/node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + # Mount in the etcd TLS secrets. + - name: etcd-certs + hostPath: + path: "{{ calico_cert_dir }}" + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + diff --git a/roles/network_plugin/calico/templates/calico.env.j2 b/roles/network_plugin/calico/templates/calico.env.j2 deleted file mode 100644 index 83cf8f291..000000000 --- a/roles/network_plugin/calico/templates/calico.env.j2 +++ /dev/null @@ -1,15 +0,0 @@ -ETCD_ENDPOINTS="{{ etcd_access_endpoint }}" -ETCD_CA_CERT_FILE="{{ calico_cert_dir }}/ca_cert.crt" -ETCD_CERT_FILE="{{ calico_cert_dir }}/cert.crt" -ETCD_KEY_FILE="{{ calico_cert_dir }}/key.pem" -CALICO_IP="{{ip | default(ansible_default_ipv4.address) }}" -CALICO_IP6="" -{% if calico_network_backend is defined %} -CALICO_NETWORKING_BACKEND="{{calico_network_backend }}" -{% endif %} -{% if inventory_hostname in groups['k8s-cluster'] and peer_with_router|default(false)%} -CALICO_AS="{{ local_as }}" -{% endif %} -CALICO_NO_DEFAULT_POOLS="true" -CALICO_LIBNETWORK_ENABLED="true" -CALICO_HOSTNAME="{{ ansible_hostname }}" diff --git a/roles/network_plugin/calico/templates/cni-calico.conf.j2 b/roles/network_plugin/calico/templates/cni-calico.conf.j2 index 7cd3c902d..f49682ea9 100644 --- a/roles/network_plugin/calico/templates/cni-calico.conf.j2 +++ b/roles/network_plugin/calico/templates/cni-calico.conf.j2 @@ -1,7 +1,9 @@ { "name": "calico-k8s-network", -{% if not legacy_calicoctl %} - "hostname": "{{ ansible_hostname }}", +{% if cloud_provider is defined %} + "nodename": "{{ inventory_hostname }}", +{% else %} + "nodename": "{{ ansible_hostname }}", {% endif %} "type": "calico", "etcd_endpoints": "{{ etcd_access_endpoint }}", diff --git a/roles/network_plugin/canal/defaults/main.yml b/roles/network_plugin/canal/defaults/main.yml index d4018db4d..bf74653c7 100644 --- a/roles/network_plugin/canal/defaults/main.yml +++ b/roles/network_plugin/canal/defaults/main.yml @@ -1,3 +1,4 @@ +--- # The interface used by canal for host <-> host communication. # If left blank, then the interface is chosing using the node's # default route. @@ -31,3 +32,7 @@ calicoctl_cpu_limit: 100m calicoctl_memory_requests: 32M calicoctl_cpu_requests: 25m +rbac_resources: + - sa + - clusterrole + - clusterrolebinding diff --git a/roles/network_plugin/canal/tasks/main.yml b/roles/network_plugin/canal/tasks/main.yml index ea67e20cd..2cc1a8ffe 100644 --- a/roles/network_plugin/canal/tasks/main.yml +++ b/roles/network_plugin/canal/tasks/main.yml @@ -32,16 +32,22 @@ delegate_to: "{{groups['etcd'][0]}}" run_once: true -- name: Canal | Write canal configmap +- name: Canal | Create canal node manifests template: - src: canal-config.yml.j2 - dest: "{{kube_config_dir}}/canal-config.yaml" - -- name: Canal | Write canal node configuration - template: - src: canal-node.yml.j2 - dest: "{{kube_config_dir}}/canal-node.yaml" - register: canal_node_manifest + src: "{{item.file}}.j2" + dest: "{{kube_config_dir}}/{{item.file}}" + with_items: + - {name: canal-config, file: canal-config.yaml, type: cm} + - {name: canal-node, file: canal-node.yaml, type: ds} + - {name: canal, file: canal-node-sa.yml, type: sa} + - {name: calico, file: canal-cr-calico.yml, type: clusterrole} + - {name: flannel, file: canal-cr-flannel.yml, type: clusterrole} + - {name: canal-calico, file: canal-crb-calico.yml, type: clusterrolebinding} + - {name: canal-flannel, file: canal-crb-flannel.yml, type: clusterrolebinding} + register: canal_manifests + when: + - inventory_hostname in groups['kube-master'] + - rbac_enabled or item.type not in rbac_resources - name: Canal | Copy cni plugins from hyperkube command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /usr/bin/rsync -ac /opt/cni/bin/ /cnibindir/" diff --git a/roles/network_plugin/canal/templates/canal-config.yml.j2 b/roles/network_plugin/canal/templates/canal-config.yaml.j2 similarity index 100% rename from roles/network_plugin/canal/templates/canal-config.yml.j2 rename to roles/network_plugin/canal/templates/canal-config.yaml.j2 diff --git a/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2 b/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2 new file mode 100644 index 000000000..e3b048c64 --- /dev/null +++ b/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2 @@ -0,0 +1,80 @@ +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico + namespace: {{ system_namespace }} +rules: + - apiGroups: [""] + resources: + - namespaces + verbs: + - get + - list + - watch + - apiGroups: [""] + resources: + - pods/status + verbs: + - update + - apiGroups: [""] + resources: + - pods + verbs: + - get + - list + - watch + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - update + - watch + - apiGroups: ["extensions"] + resources: + - thirdpartyresources + verbs: + - create + - get + - list + - watch + - apiGroups: ["extensions"] + resources: + - networkpolicies + verbs: + - get + - list + - watch + - apiGroups: ["projectcalico.org"] + resources: + - globalbgppeers + verbs: + - get + - list + - apiGroups: ["projectcalico.org"] + resources: + - globalconfigs + - globalbgpconfigs + verbs: + - create + - get + - list + - update + - watch + - apiGroups: ["projectcalico.org"] + resources: + - ippools + verbs: + - create + - get + - list + - update + - watch + - apiGroups: ["alpha.projectcalico.org"] + resources: + - systemnetworkpolicies + verbs: + - get + - list diff --git a/roles/network_plugin/canal/templates/canal-cr-flannel.yml.j2 b/roles/network_plugin/canal/templates/canal-cr-flannel.yml.j2 new file mode 100644 index 000000000..0be8e938c --- /dev/null +++ b/roles/network_plugin/canal/templates/canal-cr-flannel.yml.j2 @@ -0,0 +1,26 @@ +--- +# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: flannel +rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch diff --git a/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 b/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 new file mode 100644 index 000000000..e1c1f5050 --- /dev/null +++ b/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 @@ -0,0 +1,14 @@ +--- +# Bind the calico ClusterRole to the canal ServiceAccount. +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: canal-calico +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico +subjects: +- kind: ServiceAccount + name: canal + namespace: {{ system_namespace }} diff --git a/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 b/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 new file mode 100644 index 000000000..3b00017b1 --- /dev/null +++ b/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 @@ -0,0 +1,14 @@ +--- +# Bind the flannel ClusterRole to the canal ServiceAccount. +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: canal-flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: canal + namespace: {{ system_namespace }} diff --git a/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 b/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 new file mode 100644 index 000000000..d5b9a6e97 --- /dev/null +++ b/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: canal + namespace: {{ system_namespace }} + labels: + kubernetes.io/cluster-service: "true" + diff --git a/roles/network_plugin/canal/templates/canal-node.yml.j2 b/roles/network_plugin/canal/templates/canal-node.yaml.j2 similarity index 92% rename from roles/network_plugin/canal/templates/canal-node.yml.j2 rename to roles/network_plugin/canal/templates/canal-node.yaml.j2 index 37baf06e0..972b02d5f 100644 --- a/roles/network_plugin/canal/templates/canal-node.yml.j2 +++ b/roles/network_plugin/canal/templates/canal-node.yaml.j2 @@ -3,6 +3,7 @@ kind: DaemonSet apiVersion: extensions/v1beta1 metadata: name: canal-node + namespace: {{ system_namespace }} labels: k8s-app: canal-node spec: @@ -18,6 +19,12 @@ spec: k8s-app: canal-node spec: hostNetwork: true +{% if rbac_enabled %} + serviceAccountName: canal +{% endif %} + tolerations: + - effect: NoSchedule + operator: Exists volumes: # Used by calico/node. - name: lib-modules @@ -143,6 +150,9 @@ spec: # Disable Calico BGP. Calico is simply enforcing policy. - name: CALICO_NETWORKING value: "false" + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "kubespray,canal" # Disable file logging so `kubectl logs` works. - name: CALICO_DISABLE_FILE_LOGGING value: "true" @@ -162,6 +172,10 @@ spec: configMapKeyRef: name: canal-config key: etcd_keyfile + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName securityContext: privileged: true volumeMounts: @@ -174,3 +188,7 @@ spec: - name: "canal-certs" mountPath: "{{ canal_cert_dir }}" readOnly: true + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/roles/network_plugin/cloud/tasks/main.yml b/roles/network_plugin/cloud/tasks/main.yml index 36fa8e57d..7b6650372 100644 --- a/roles/network_plugin/cloud/tasks/main.yml +++ b/roles/network_plugin/cloud/tasks/main.yml @@ -14,4 +14,3 @@ owner: kube recurse: true mode: "u=rwX,g-rwx,o-rwx" - diff --git a/roles/network_plugin/flannel/defaults/main.yml b/roles/network_plugin/flannel/defaults/main.yml index 360756f0a..08f4ac145 100644 --- a/roles/network_plugin/flannel/defaults/main.yml +++ b/roles/network_plugin/flannel/defaults/main.yml @@ -1,7 +1,8 @@ --- # Flannel public IP # The address that flannel should advertise as how to access the system -flannel_public_ip: "{{ access_ip|default(ip|default(ansible_default_ipv4.address)) }}" +# Disabled until https://github.com/coreos/flannel/issues/712 is fixed +# flannel_public_ip: "{{ access_ip|default(ip|default(ansible_default_ipv4.address)) }}" ## interface that should be used for flannel operations ## This is actually an inventory node-level item @@ -17,5 +18,5 @@ flannel_cpu_limit: 300m flannel_memory_requests: 64M flannel_cpu_requests: 150m -flannel_cert_dir: /etc/flannel/certs -etcd_cert_dir: /etc/ssl/etcd/ssl +# Legacy directory, will be removed if found. +flannel_cert_dir: /etc/flannel/certs \ No newline at end of file diff --git a/roles/network_plugin/flannel/handlers/main.yml b/roles/network_plugin/flannel/handlers/main.yml index bd4058976..00c5667b0 100644 --- a/roles/network_plugin/flannel/handlers/main.yml +++ b/roles/network_plugin/flannel/handlers/main.yml @@ -4,6 +4,10 @@ failed_when: false notify: Flannel | restart docker +- name: Flannel | delete flannel interface + command: ip link delete flannel.1 + failed_when: false + # special cases for atomic because it defaults to live-restore: true # So we disable live-restore to pickup the new flannel IP. After # we enable it, we have to restart docker again to pickup the new @@ -18,7 +22,7 @@ - Flannel | pause while Docker restarts - Flannel | wait for docker -- name : Flannel | reload systemd +- name: Flannel | reload systemd shell: systemctl daemon-reload - name: Flannel | reload docker.socket diff --git a/roles/network_plugin/flannel/meta/main.yml b/roles/network_plugin/flannel/meta/main.yml index 4a2caef72..791209357 100644 --- a/roles/network_plugin/flannel/meta/main.yml +++ b/roles/network_plugin/flannel/meta/main.yml @@ -3,3 +3,6 @@ dependencies: - role: download file: "{{ downloads.flannel }}" tags: download + - role: download + file: "{{ downloads.flannel_cni }}" + tags: download diff --git a/roles/network_plugin/flannel/tasks/main.yml b/roles/network_plugin/flannel/tasks/main.yml index 573b51f19..7da3bfaa4 100644 --- a/roles/network_plugin/flannel/tasks/main.yml +++ b/roles/network_plugin/flannel/tasks/main.yml @@ -1,83 +1,16 @@ --- -- name: Flannel | Set Flannel etcd configuration - command: |- - {{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} \ - set /{{ cluster_name }}/network/config \ - '{ "Network": "{{ kube_pods_subnet }}", "SubnetLen": {{ kube_network_node_prefix }}, "Backend": { "Type": "{{ flannel_backend_type }}" } }' - delegate_to: "{{groups['etcd'][0]}}" - run_once: true +- include: pre-upgrade.yml -- name: Flannel | Create flannel certs directory - file: - dest: "{{ flannel_cert_dir }}" - state: directory - mode: 0750 - owner: root - group: root - -- name: Flannel | Link etcd certificates for flanneld - file: - src: "{{ etcd_cert_dir }}/{{ item.s }}" - dest: "{{ flannel_cert_dir }}/{{ item.d }}" - state: hard - force: yes - with_items: - - {s: "ca.pem", d: "ca_cert.crt"} - - {s: "node-{{ inventory_hostname }}.pem", d: "cert.crt"} - - {s: "node-{{ inventory_hostname }}-key.pem", d: "key.pem"} - -- name: Flannel | Create flannel pod manifest +- name: Flannel | Create cni-flannel-rbac manifest template: - src: flannel-pod.yml - dest: "{{kube_manifest_dir}}/flannel-pod.manifest" - notify: Flannel | delete default docker bridge + src: cni-flannel-rbac.yml.j2 + dest: "{{ kube_config_dir }}/cni-flannel-rbac.yml" + register: flannel_rbac_manifest + when: inventory_hostname == groups['kube-master'][0] and rbac_enabled -- name: Flannel | Wait for flannel subnet.env file presence - wait_for: - path: /run/flannel/subnet.env - delay: 5 - timeout: 600 - -- name: Flannel | Get flannel_subnet from subnet.env - shell: cat /run/flannel/subnet.env | awk -F'=' '$1 == "FLANNEL_SUBNET" {print $2}' - register: flannel_subnet_output - changed_when: false - check_mode: no - -- set_fact: - flannel_subnet: "{{ flannel_subnet_output.stdout }}" - -- name: Flannel | Get flannel_mtu from subnet.env - shell: cat /run/flannel/subnet.env | awk -F'=' '$1 == "FLANNEL_MTU" {print $2}' - register: flannel_mtu_output - changed_when: false - check_mode: no - -- set_fact: - flannel_mtu: "{{ flannel_mtu_output.stdout }}" - -- set_fact: - docker_options_file: >- - {%- if ansible_os_family == "Debian" -%}/etc/default/docker{%- elif ansible_os_family == "RedHat" -%}/etc/sysconfig/docker{%- endif -%} - tags: facts - -- set_fact: - docker_options_name: >- - {%- if ansible_os_family == "Debian" -%}DOCKER_OPTS{%- elif ansible_os_family == "RedHat" -%}other_args{%- endif -%} - tags: facts - -- set_fact: - docker_network_options: '"--bip={{ flannel_subnet }} --mtu={{ flannel_mtu }}"' - -- name: Flannel | Ensure path for docker network systemd drop-in - file: - path: "/etc/systemd/system/docker.service.d" - state: directory - owner: root - -- name: Flannel | Create docker network systemd drop-in +- name: Flannel | Create cni-flannel manifest template: - src: flannel-options.conf.j2 - dest: "/etc/systemd/system/docker.service.d/flannel-options.conf" - notify: - - Flannel | restart docker + src: cni-flannel.yml.j2 + dest: "{{ kube_config_dir }}/cni-flannel.yml" + register: flannel_manifest + when: inventory_hostname == groups['kube-master'][0] \ No newline at end of file diff --git a/roles/network_plugin/flannel/tasks/pre-upgrade.yml b/roles/network_plugin/flannel/tasks/pre-upgrade.yml new file mode 100644 index 000000000..6b6fcd54f --- /dev/null +++ b/roles/network_plugin/flannel/tasks/pre-upgrade.yml @@ -0,0 +1,19 @@ +--- +- name: Flannel pre-upgrade | Purge legacy flannel systemd unit file + file: + path: "/etc/systemd/system/docker.service.d/flannel-options.conf" + state: absent + notify: + - Flannel | delete default docker bridge + +- name: Flannel pre-upgrade | Purge legacy Flannel static pod manifest + file: + path: "{{ kube_manifest_dir }}/flannel-pod.manifest" + state: absent + notify: + - Flannel | delete flannel interface + +- name: Flannel pre-upgrade | Remove Flannel's certificate directory not required by CNI + file: + dest: "{{ flannel_cert_dir }}" + state: absent \ No newline at end of file diff --git a/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 new file mode 100644 index 000000000..aafe2a0f5 --- /dev/null +++ b/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 @@ -0,0 +1,44 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: flannel + namespace: "{{system_namespace}}" +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: flannel +rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: flannel + namespace: "{{system_namespace}}" \ No newline at end of file diff --git a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 new file mode 100644 index 000000000..0012228d7 --- /dev/null +++ b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 @@ -0,0 +1,125 @@ +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kube-flannel-cfg + namespace: "{{system_namespace}}" + labels: + tier: node + app: flannel +data: + cni-conf.json: | + { + "name":"cbr0", + "cniVersion":"0.3.1", + "plugins":[ + { + "type":"flannel", + "delegate":{ + "forceAddress":true, + "isDefaultGateway":true + } + }, + { + "type":"portmap", + "capabilities":{ + "portMappings":true + } + } + ] + } + net-conf.json: | + { + "Network": "{{ kube_pods_subnet }}", + "Backend": { + "Type": "{{ flannel_backend_type }}" + } + } +--- +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: kube-flannel + namespace: "{{system_namespace}}" + labels: + tier: node + k8s-app: flannel +spec: + template: + metadata: + labels: + tier: node + k8s-app: flannel + spec: +{% if rbac_enabled %} + serviceAccountName: flannel +{% endif %} + containers: + - name: kube-flannel + image: {{ flannel_image_repo }}:{{ flannel_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + limits: + cpu: {{ flannel_cpu_limit }} + memory: {{ flannel_memory_limit }} + requests: + cpu: {{ flannel_cpu_requests }} + memory: {{ flannel_memory_requests }} + command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr"{% if flannel_interface is defined %}, "--iface={{ flannel_interface }}"{% endif %} ] + securityContext: + privileged: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + - name: install-cni + image: {{ flannel_cni_image_repo }}:{{ flannel_cni_image_tag }} + command: ["/install-cni.sh"] + env: + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: kube-flannel-cfg + key: cni-conf.json + - name: CNI_CONF_NAME + value: "10-flannel.conflist" + volumeMounts: + - name: cni + mountPath: /host/etc/cni/net.d + - name: host-cni-bin + mountPath: /host/opt/cni/bin/ + hostNetwork: true + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + volumes: + - name: run + hostPath: + path: /run + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + - name: host-cni-bin + hostPath: + path: /opt/cni/bin + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate \ No newline at end of file diff --git a/roles/network_plugin/flannel/templates/flannel-options.conf.j2 b/roles/network_plugin/flannel/templates/flannel-options.conf.j2 deleted file mode 100644 index 9ee22b4bc..000000000 --- a/roles/network_plugin/flannel/templates/flannel-options.conf.j2 +++ /dev/null @@ -1,6 +0,0 @@ -[Service] -{% if ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] %} -Environment="DOCKER_OPT_BIP=--bip={{ flannel_subnet }} --mtu={{ flannel_mtu }}" -{% else %} -Environment="DOCKER_NETWORK_OPTIONS=--bip={{ flannel_subnet }} --mtu={{ flannel_mtu }}" -{% endif %} diff --git a/roles/network_plugin/flannel/templates/flannel-pod.yml b/roles/network_plugin/flannel/templates/flannel-pod.yml deleted file mode 100644 index 92ecada69..000000000 --- a/roles/network_plugin/flannel/templates/flannel-pod.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- - kind: "Pod" - apiVersion: "v1" - metadata: - name: "flannel" - namespace: "{{system_namespace}}" - labels: - app: "flannel" - version: "v0.1" - spec: - volumes: - - name: "subnetenv" - hostPath: - path: "/run/flannel" - - name: "etcd-certs" - hostPath: - path: "{{ flannel_cert_dir }}" - containers: - - name: "flannel-container" - image: "{{ flannel_image_repo }}:{{ flannel_image_tag }}" - imagePullPolicy: {{ k8s_image_pull_policy }} - resources: - limits: - cpu: {{ flannel_cpu_limit }} - memory: {{ flannel_memory_limit }} - requests: - cpu: {{ flannel_cpu_requests }} - memory: {{ flannel_memory_requests }} - command: - - "/bin/sh" - - "-c" - - "/opt/bin/flanneld -etcd-endpoints {{ etcd_access_endpoint }} -etcd-prefix /{{ cluster_name }}/network -etcd-cafile {{ flannel_cert_dir }}/ca_cert.crt -etcd-certfile {{ flannel_cert_dir }}/cert.crt -etcd-keyfile {{ flannel_cert_dir }}/key.pem {% if flannel_interface is defined %}-iface {{ flannel_interface }}{% endif %} {% if flannel_public_ip is defined %}-public-ip {{ flannel_public_ip }}{% endif %}" - ports: - - hostPort: 10253 - containerPort: 10253 - volumeMounts: - - name: "subnetenv" - mountPath: "/run/flannel" - - name: "etcd-certs" - mountPath: "{{ flannel_cert_dir }}" - readOnly: true - securityContext: - privileged: true - hostNetwork: true diff --git a/roles/network_plugin/meta/main.yml b/roles/network_plugin/meta/main.yml index a1c970efe..d9834a3cd 100644 --- a/roles/network_plugin/meta/main.yml +++ b/roles/network_plugin/meta/main.yml @@ -1,16 +1,16 @@ --- dependencies: - - role: network_plugin/calico - when: kube_network_plugin == 'calico' - tags: calico - - role: network_plugin/flannel - when: kube_network_plugin == 'flannel' - tags: flannel - - role: network_plugin/weave - when: kube_network_plugin == 'weave' - tags: weave - - role: network_plugin/canal - when: kube_network_plugin == 'canal' - tags: canal - - role: network_plugin/cloud - when: kube_network_plugin == 'cloud' + - role: network_plugin/calico + when: kube_network_plugin == 'calico' + tags: calico + - role: network_plugin/flannel + when: kube_network_plugin == 'flannel' + tags: flannel + - role: network_plugin/weave + when: kube_network_plugin == 'weave' + tags: weave + - role: network_plugin/canal + when: kube_network_plugin == 'canal' + tags: canal + - role: network_plugin/cloud + when: kube_network_plugin == 'cloud' diff --git a/roles/network_plugin/weave/tasks/main.yml b/roles/network_plugin/weave/tasks/main.yml index 813bbfafe..462278e94 100644 --- a/roles/network_plugin/weave/tasks/main.yml +++ b/roles/network_plugin/weave/tasks/main.yml @@ -1,14 +1,7 @@ --- -- include: pre-upgrade.yml - - include: seed.yml when: weave_mode_seed -- name: Weave | enable br_netfilter module - modprobe: - name: br_netfilter - state: present - - name: Weave | Copy cni plugins from hyperkube command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -r /opt/cni/bin/. /cnibindir/" register: cni_task_result diff --git a/roles/network_plugin/weave/tasks/pre-upgrade.yml b/roles/network_plugin/weave/tasks/pre-upgrade.yml deleted file mode 100644 index 0b10a7551..000000000 --- a/roles/network_plugin/weave/tasks/pre-upgrade.yml +++ /dev/null @@ -1,41 +0,0 @@ -- name: Weave pre-upgrade | Stop legacy weave - command: weave stop - failed_when: false - -- name: Weave pre-upgrade | Stop legacy systemd weave services - service: - name: "{{ item }}" - enabled: no - state: stopped - with_items: - - weaveexpose - - weaveproxy - - weave - failed_when: false - -- name: Weave pre-upgrade | Purge legacy systemd weave systemd unit files - file: - path: "{{ item }}" - state: absent - register: purged_weave_systemd_units - with_items: - - "/etc/systemd/system/weaveexpose.service" - - "/etc/systemd/system/weaveproxy.service" - - "/etc/systemd/system/weave.service" - -- name: Weave pre-upgrade | Reload systemd - command: systemctl daemon-reload - when: ansible_service_mgr == "systemd" and purged_weave_systemd_units.changed - -- name: Weave pre-upgrade | Purge legacy weave configs and binary - file: - path: "{{ item }}" - state: absent - with_items: - - "{{ bin_dir }}/weave" - - "/etc/weave.env" - -- name: Weave pre-upgrade | Purge legacy weave docker containers - shell: "docker ps -af 'name=^/weave.*' -q | xargs --no-run-if-empty docker rm -f" - retries: 3 - failed_when: false diff --git a/roles/network_plugin/weave/templates/weave-net.yml.j2 b/roles/network_plugin/weave/templates/weave-net.yml.j2 index ba1f07929..c61f2e7e4 100644 --- a/roles/network_plugin/weave/templates/weave-net.yml.j2 +++ b/roles/network_plugin/weave/templates/weave-net.yml.j2 @@ -153,4 +153,9 @@ items: path: /var/lib/dbus - name: lib-modules hostPath: - path: /lib/modules \ No newline at end of file + path: /lib/modules + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index e13065404..624e7135e 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -17,6 +17,8 @@ with_items: - kubelet - etcd + - vault + - calico-node register: services_removed tags: ['services'] @@ -36,6 +38,10 @@ - name: reset | remove all containers shell: "{{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv" + register: remove_all_containers + retries: 4 + until: remove_all_containers.rc == 0 + delay: 5 tags: ['docker'] - name: reset | restart docker if needed @@ -86,10 +92,15 @@ - /run/flannel - /etc/flannel - /run/kubernetes - - /usr/local/share/ca-certificates/kube-ca.crt - /usr/local/share/ca-certificates/etcd-ca.crt - - /etc/ssl/certs/kube-ca.pem + - /usr/local/share/ca-certificates/kube-ca.crt + - /usr/local/share/ca-certificates/vault-ca.crt - /etc/ssl/certs/etcd-ca.pem + - /etc/ssl/certs/kube-ca.pem + - /etc/ssl/certs/vault-ca.crt + - /etc/pki/ca-trust/source/anchors/etcd-ca.crt + - /etc/pki/ca-trust/source/anchors/kube-ca.crt + - /etc/pki/ca-trust/source/anchors/vault-ca.crt - /etc/vault - /var/log/pods/ - "{{ bin_dir }}/kubelet" diff --git a/roles/rkt/tasks/install.yml b/roles/rkt/tasks/install.yml index 76719eebb..0cc8f8898 100644 --- a/roles/rkt/tasks/install.yml +++ b/roles/rkt/tasks/install.yml @@ -3,14 +3,14 @@ include_vars: "{{ item }}" with_first_found: - files: - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}.yml" - - "{{ ansible_os_family|lower }}.yml" - - defaults.yml + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}.yml" + - "{{ ansible_os_family|lower }}.yml" + - defaults.yml paths: - - ../vars + - ../vars skip: true tags: facts diff --git a/roles/upgrade/post-upgrade/tasks/main.yml b/roles/upgrade/post-upgrade/tasks/main.yml index e7efa0601..ec6fdcf90 100644 --- a/roles/upgrade/post-upgrade/tasks/main.yml +++ b/roles/upgrade/post-upgrade/tasks/main.yml @@ -1,7 +1,5 @@ --- - - name: Uncordon node command: "{{ bin_dir }}/kubectl uncordon {{ inventory_hostname }}" delegate_to: "{{ groups['kube-master'][0] }}" when: (needs_cordoning|default(false)) and ( {%- if inventory_hostname in groups['kube-node'] -%} true {%- else -%} false {%- endif -%} ) - diff --git a/roles/upgrade/pre-upgrade/defaults/main.yml b/roles/upgrade/pre-upgrade/defaults/main.yml index c87b7e9ea..89334f87c 100644 --- a/roles/upgrade/pre-upgrade/defaults/main.yml +++ b/roles/upgrade/pre-upgrade/defaults/main.yml @@ -1,3 +1,3 @@ +--- drain_grace_period: 90 drain_timeout: 120s - diff --git a/roles/upgrade/pre-upgrade/tasks/main.yml b/roles/upgrade/pre-upgrade/tasks/main.yml index decc9d05b..e4dbe569b 100644 --- a/roles/upgrade/pre-upgrade/tasks/main.yml +++ b/roles/upgrade/pre-upgrade/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: See if node is in ready state - shell: "kubectl get nodes | grep {{ inventory_hostname }}" + shell: "{{ bin_dir }}/kubectl get nodes | grep {{ inventory_hostname }}" register: kubectl_nodes delegate_to: "{{ groups['kube-master'][0] }}" failed_when: false diff --git a/roles/uploads/defaults/main.yml b/roles/uploads/defaults/main.yml deleted file mode 100644 index 303a2d050..000000000 --- a/roles/uploads/defaults/main.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -local_release_dir: /tmp - -# Versions -etcd_version: v3.0.17 -calico_version: v0.23.0 -calico_cni_version: v1.5.6 -weave_version: v2.0.1 - -# Download URL's -etcd_download_url: "https://github.com/coreos/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-amd64.tar.gz" -calico_cni_download_url: "https://github.com/projectcalico/calico-cni/releases/download/{{calico_cni_version}}/calico" -calico_cni_ipam_download_url: "https://github.com/projectcalico/calico-cni/releases/download/{{calico_cni_version}}/calico-ipam" -weave_download_url: "https://github.com/weaveworks/weave/releases/download/{{weave_version}}/weave" - -# Checksums -calico_cni_checksum: "9a6bd6da267c498a1833117777c069f44f720d23226d8459bada2a0b41cb8258" -calico_cni_ipam_checksum: "8d3574736df1ce10ea88fdec94d84dc58642081d3774d2d48249c6ee94ed316d" -weave_checksum: "ee22e690985115a5986352b2c75589674349c618a5c95893f87600a13e2d58e9" -etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485" - -downloads: - - name: calico-cni-plugin - dest: calico/bin/calico - version: "{{calico_cni_version}}" - sha256: "{{ calico_cni_checksum }}" - source_url: "{{ calico_cni_download_url }}" - url: "{{ calico_cni_download_url }}" - owner: "root" - mode: "0755" - - - name: calico-cni-plugin-ipam - dest: calico/bin/calico-ipam - version: "{{calico_cni_version}}" - sha256: "{{ calico_cni_ipam_checksum }}" - source_url: "{{ calico_cni_ipam_download_url }}" - url: "{{ calico_cni_ipam_download_url }}" - owner: "root" - mode: "0755" - - - name: weave - dest: weave/bin/weave - version: "{{weave_version}}" - source_url: "{{weave_download_url}}" - url: "{{weave_download_url}}" - sha256: "{{ weave_checksum }}" - owner: "root" - mode: "0755" - - - name: etcd - version: "{{etcd_version}}" - dest: "etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz" - sha256: "{{ etcd_checksum }}" - source_url: "{{ etcd_download_url }}" - url: "{{ etcd_download_url }}" - unarchive: true - owner: "etcd" - mode: "0755" diff --git a/roles/uploads/tasks/main.yml b/roles/uploads/tasks/main.yml deleted file mode 100644 index a770020c2..000000000 --- a/roles/uploads/tasks/main.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -- name: Create dest directories - file: - path: "{{local_release_dir}}/{{item.dest|dirname}}" - state: directory - recurse: yes - with_items: '{{downloads}}' - -- name: Download items - get_url: - url: "{{item.source_url}}" - dest: "{{local_release_dir}}/{{item.dest}}" - sha256sum: "{{item.sha256 | default(omit)}}" - owner: "{{ item.owner|default(omit) }}" - mode: "{{ item.mode|default(omit) }}" - with_items: '{{downloads}}' - -- name: uploads items - gc_storage: - bucket: kargo - object: "{{item.version}}_{{item.name}}" - src: "{{ local_release_dir }}/{{item.dest}}" - mode: put - permission: public-read - gs_access_key: 'changeme' - gs_secret_key: 'changeme' - with_items: '{{downloads}}' diff --git a/roles/vault/defaults/main.yml b/roles/vault/defaults/main.yml index 7e14374bf..8916d4b3a 100644 --- a/roles/vault/defaults/main.yml +++ b/roles/vault/defaults/main.yml @@ -1,4 +1,6 @@ --- +vault_bootstrap: false +vault_deployment_type: docker vault_adduser_vars: comment: "Hashicorp Vault User" @@ -6,40 +8,18 @@ vault_adduser_vars: name: vault shell: /sbin/nologin system: yes + +# This variables redefined in kubespray-defaults for using shared tasks +# in etcd and kubernetes/secrets roles vault_base_dir: /etc/vault -# https://releases.hashicorp.com/vault/0.6.4/vault_0.6.4_SHA256SUMS -vault_binary_checksum: 04d87dd553aed59f3fe316222217a8d8777f40115a115dac4d88fac1611c51a6 -vault_bootstrap: false -vault_ca_options: - common_name: kube-cluster-ca - format: pem - ttl: 87600h vault_cert_dir: "{{ vault_base_dir }}/ssl" -vault_client_headers: - Accept: "application/json" - Content-Type: "application/json" -vault_config: - backend: - etcd: - address: "{{ vault_etcd_url }}" - ha_enabled: "true" - redirect_addr: "https://{{ ansible_default_ipv4.address }}:{{ vault_port }}" - tls_ca_file: "{{ vault_cert_dir }}/ca.pem" - cluster_name: "kubernetes-vault" - default_lease_ttl: "{{ vault_default_lease_ttl }}" - listener: - tcp: - address: "0.0.0.0:{{ vault_port }}" - tls_cert_file: "{{ vault_cert_dir }}/api.pem" - tls_key_file: "{{ vault_cert_dir }}/api-key.pem" - max_lease_ttl: "{{ vault_max_lease_ttl }}" vault_config_dir: "{{ vault_base_dir }}/config" -vault_container_name: kube-hashicorp-vault -# This variable is meant to match the GID of vault inside Hashicorp's official Vault Container -vault_default_lease_ttl: 720h -vault_default_role_permissions: - allow_any_name: true -vault_deployment_type: docker +vault_roles_dir: "{{ vault_base_dir }}/roles" +vault_secrets_dir: "{{ vault_base_dir }}/secrets" +vault_log_dir: "/var/log/vault" + +vault_version: 0.8.1 +vault_binary_checksum: 3c4d70ba71619a43229e65c67830e30e050eab7a81ac6b28325ff707e5914188 vault_download_url: "https://releases.hashicorp.com/vault/{{ vault_version }}/vault_{{ vault_version }}_linux_amd64.zip" vault_download_vars: container: "{{ vault_deployment_type != 'host' }}" @@ -54,29 +34,19 @@ vault_download_vars: unarchive: true url: "{{ vault_download_url }}" version: "{{ vault_version }}" -vault_etcd_url: "https://{{ hostvars[groups.etcd[0]]['ip']|d(hostvars[groups.etcd[0]]['ansible_default_ipv4']['address']) }}:2379" + +vault_container_name: kube-hashicorp-vault +vault_temp_container_name: vault-temp vault_image_repo: "vault" vault_image_tag: "{{ vault_version }}" -vault_log_dir: "/var/log/vault" -vault_max_lease_ttl: 87600h -vault_needs_gen: false + +vault_bind_address: 0.0.0.0 vault_port: 8200 -# Although "cert" is an option, ansible has no way to auth via cert until -# upstream merges: https://github.com/ansible/ansible/pull/18141 -vault_role_auth_method: userpass -vault_roles: - - name: etcd - group: etcd - policy_rules: default - role_options: default - - name: kube - group: k8s-cluster - policy_rules: default - role_options: default -vault_roles_dir: "{{ vault_base_dir }}/roles" -vault_secret_shares: 1 -vault_secret_threshold: 1 -vault_secrets_dir: "{{ vault_base_dir }}/secrets" +vault_etcd_url: "https://{{ hostvars[groups.etcd[0]]['ip']|d(hostvars[groups.etcd[0]]['ansible_default_ipv4']['address']) }}:2379" + +vault_default_lease_ttl: 720h +vault_max_lease_ttl: 87600h + vault_temp_config: backend: file: @@ -84,8 +54,109 @@ vault_temp_config: default_lease_ttl: "{{ vault_default_lease_ttl }}" listener: tcp: - address: "0.0.0.0:{{ vault_port }}" + address: "{{ vault_bind_address }}:{{ vault_port }}" tls_disable: "true" max_lease_ttl: "{{ vault_max_lease_ttl }}" -vault_temp_container_name: vault-temp -vault_version: 0.6.4 + +vault_config: + backend: + etcd: + address: "{{ vault_etcd_url }}" + ha_enabled: "true" + redirect_addr: "https://{{ ansible_default_ipv4.address }}:{{ vault_port }}" + tls_ca_file: "{{ vault_etcd_cert_dir }}/ca.pem" + cluster_name: "kubernetes-vault" + default_lease_ttl: "{{ vault_default_lease_ttl }}" + max_lease_ttl: "{{ vault_max_lease_ttl }}" + listener: + tcp: + address: "{{ vault_bind_address }}:{{ vault_port }}" + tls_cert_file: "{{ vault_cert_dir }}/api.pem" + tls_key_file: "{{ vault_cert_dir }}/api-key.pem" + +vault_secret_shares: 1 +vault_secret_threshold: 1 + +vault_ca_options: + vault: + common_name: vault + format: pem + ttl: "{{ vault_max_lease_ttl }}" + exclude_cn_from_sans: true + etcd: + common_name: etcd + format: pem + ttl: "{{ vault_max_lease_ttl }}" + exclude_cn_from_sans: true + kube: + common_name: kube + format: pem + ttl: "{{ vault_max_lease_ttl }}" + exclude_cn_from_sans: true + +vault_client_headers: + Accept: "application/json" + Content-Type: "application/json" + +vault_etcd_cert_dir: /etc/ssl/etcd/ssl +vault_kube_cert_dir: /etc/kubernetes/ssl + +vault_pki_mounts: + vault: + name: vault + default_lease_ttl: "{{ vault_default_lease_ttl }}" + max_lease_ttl: "{{ vault_max_lease_ttl }}" + description: "Vault Root CA" + cert_dir: "{{ vault_cert_dir }}" + roles: + - name: vault + group: vault + password: "{{ lookup('password', 'credentials/vault/vault length=15') }}" + policy_rules: default + role_options: default + etcd: + name: etcd + default_lease_ttl: "{{ vault_default_lease_ttl }}" + max_lease_ttl: "{{ vault_max_lease_ttl }}" + description: "Etcd Root CA" + cert_dir: "{{ vault_etcd_cert_dir }}" + roles: + - name: etcd + group: etcd + password: "{{ lookup('password', 'credentials/vault/etcd length=15') }}" + policy_rules: default + role_options: + allow_any_name: true + enforce_hostnames: false + organization: "kube:etcd" + kube: + name: kube + default_lease_ttl: "{{ vault_default_lease_ttl }}" + max_lease_ttl: "{{ vault_max_lease_ttl }}" + description: "Kubernetes Root CA" + cert_dir: "{{ vault_kube_cert_dir }}" + roles: + - name: kube-master + group: kube-master + password: "{{ lookup('password', 'credentials/vault/kube-master length=15') }}" + policy_rules: default + role_options: + allow_any_name: true + enforce_hostnames: false + organization: "system:masters" + - name: kube-node + group: k8s-cluster + password: "{{ lookup('password', 'credentials/vault/kube-node length=15') }}" + policy_rules: default + role_options: + allow_any_name: true + enforce_hostnames: false + organization: "system:nodes" + - name: kube-proxy + group: k8s-cluster + password: "{{ lookup('password', 'credentials/vault/kube-proxy length=15') }}" + policy_rules: default + role_options: + allow_any_name: true + enforce_hostnames: false + organization: "system:node-proxier" diff --git a/roles/vault/tasks/bootstrap/ca_trust.yml b/roles/vault/tasks/bootstrap/ca_trust.yml index ae67f7405..010e6bbc6 100644 --- a/roles/vault/tasks/bootstrap/ca_trust.yml +++ b/roles/vault/tasks/bootstrap/ca_trust.yml @@ -10,11 +10,11 @@ set_fact: ca_cert_path: >- {% if ansible_os_family == "Debian" -%} - /usr/local/share/ca-certificates/kube-cluster-ca.crt + /usr/local/share/ca-certificates/vault-ca.crt {%- elif ansible_os_family == "RedHat" -%} - /etc/pki/ca-trust/source/anchors/kube-cluster-ca.crt + /etc/pki/ca-trust/source/anchors/vault-ca.crt {%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%} - /etc/ssl/certs/kube-cluster-ca.pem + /etc/ssl/certs/vault-ca.pem {%- endif %} - name: bootstrap/ca_trust | add CA to trusted CA dir diff --git a/roles/vault/tasks/bootstrap/create_mounts.yml b/roles/vault/tasks/bootstrap/create_mounts.yml new file mode 100644 index 000000000..0010c35c5 --- /dev/null +++ b/roles/vault/tasks/bootstrap/create_mounts.yml @@ -0,0 +1,12 @@ +--- +- include: ../shared/create_mount.yml + vars: + create_mount_path: "{{ item.name }}" + create_mount_default_lease_ttl: "{{ item.default_lease_ttl }}" + create_mount_max_lease_ttl: "{{ item.max_lease_ttl }}" + create_mount_description: "{{ item.description }}" + create_mount_cert_dir: "{{ item.cert_dir }}" + create_mount_config_ca_needed: "{{ item.config_ca }}" + with_items: + - "{{ vault_pki_mounts.vault|combine({'config_ca': not vault_ca_cert_needed}) }}" + - "{{ vault_pki_mounts.etcd|combine({'config_ca': not vault_etcd_ca_cert_needed}) }}" diff --git a/roles/vault/tasks/bootstrap/create_etcd_role.yml b/roles/vault/tasks/bootstrap/create_roles.yml similarity index 53% rename from roles/vault/tasks/bootstrap/create_etcd_role.yml rename to roles/vault/tasks/bootstrap/create_roles.yml index 57518f944..11411d236 100644 --- a/roles/vault/tasks/bootstrap/create_etcd_role.yml +++ b/roles/vault/tasks/bootstrap/create_roles.yml @@ -1,10 +1,10 @@ --- - - include: ../shared/create_role.yml vars: - create_role_name: "{{ item.name }}" + create_role_name: "{{ item.name }}" create_role_group: "{{ item.group }}" create_role_policy_rules: "{{ item.policy_rules }}" + create_role_password: "{{ item.password }}" create_role_options: "{{ item.role_options }}" - with_items: "{{ vault_roles }}" - when: item.name == "etcd" + create_role_mount_path: "{{ mount.name }}" + with_items: "{{ mount.roles }}" diff --git a/roles/vault/tasks/bootstrap/gen_auth_ca.yml b/roles/vault/tasks/bootstrap/gen_auth_ca.yml deleted file mode 100644 index 10313ecea..000000000 --- a/roles/vault/tasks/bootstrap/gen_auth_ca.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- - -- name: bootstrap/gen_auth_ca | Generate Root CA - uri: - url: "{{ vault_leader_url }}/v1/auth-pki/root/generate/exported" - headers: "{{ vault_headers }}" - method: POST - body_format: json - body: "{{ vault_ca_options }}" - register: vault_auth_ca_gen - when: inventory_hostname == groups.vault|first - -- name: bootstrap/gen_auth_ca | Copy auth CA cert to Vault nodes - copy: - content: "{{ hostvars[groups.vault|first]['vault_auth_ca_gen']['json']['data']['certificate'] }}" - dest: "{{ vault_cert_dir }}/auth-ca.pem" - -- name: bootstrap/gen_auth_ca | Copy auth CA key to Vault nodes - copy: - content: "{{ hostvars[groups.vault|first]['vault_auth_ca_gen']['json']['data']['private_key'] }}" - dest: "{{ vault_cert_dir }}/auth-ca-key.pem" diff --git a/roles/vault/tasks/bootstrap/gen_ca.yml b/roles/vault/tasks/bootstrap/gen_ca.yml deleted file mode 100644 index ab1cb6345..000000000 --- a/roles/vault/tasks/bootstrap/gen_ca.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- - -- name: bootstrap/gen_ca | Ensure vault_cert_dir exists - file: - mode: 0755 - path: "{{ vault_cert_dir }}" - state: directory - -- name: bootstrap/gen_ca | Generate Root CA in vault-temp - uri: - url: "{{ vault_leader_url }}/v1/pki/root/generate/exported" - headers: "{{ vault_headers }}" - method: POST - body_format: json - body: "{{ vault_ca_options }}" - register: vault_ca_gen - when: inventory_hostname == groups.vault|first and vault_ca_cert_needed - -- name: bootstrap/gen_ca | Copy root CA cert locally - copy: - content: "{{ hostvars[groups.vault|first]['vault_ca_gen']['json']['data']['certificate'] }}" - dest: "{{ vault_cert_dir }}/ca.pem" - mode: 0644 - when: vault_ca_cert_needed - -- name: bootstrap/gen_ca | Copy root CA key locally - copy: - content: "{{ hostvars[groups.vault|first]['vault_ca_gen']['json']['data']['private_key'] }}" - dest: "{{ vault_cert_dir }}/ca-key.pem" - mode: 0640 - when: vault_ca_cert_needed diff --git a/roles/vault/tasks/bootstrap/gen_vault_certs.yml b/roles/vault/tasks/bootstrap/gen_vault_certs.yml index 4a7f4ed31..ce4538571 100644 --- a/roles/vault/tasks/bootstrap/gen_vault_certs.yml +++ b/roles/vault/tasks/bootstrap/gen_vault_certs.yml @@ -1,28 +1,21 @@ --- - -- name: boostrap/gen_vault_certs | Add the vault role - uri: - url: "{{ vault_leader_url }}/v1/pki/roles/vault" - headers: "{{ vault_headers }}" - method: POST - body_format: json - body: "{{ vault_default_role_permissions }}" - status_code: 204 - when: inventory_hostname == groups.vault|first and vault_api_cert_needed - - include: ../shared/issue_cert.yml vars: + issue_cert_common_name: "{{ vault_pki_mounts.vault.roles[0].name }}" issue_cert_alt_names: "{{ groups.vault + ['localhost'] }}" issue_cert_hosts: "{{ groups.vault }}" issue_cert_ip_sans: >- [ {%- for host in groups.vault -%} "{{ hostvars[host]['ansible_default_ipv4']['address'] }}", + {%- if hostvars[host]['ip'] is defined -%} + "{{ hostvars[host]['ip'] }}", + {%- endif -%} {%- endfor -%} "127.0.0.1","::1" ] + issue_cert_mount_path: "{{ vault_pki_mounts.vault.name }}" issue_cert_path: "{{ vault_cert_dir }}/api.pem" - issue_cert_headers: "{{ hostvars[groups.vault|first]['vault_headers'] }}" - issue_cert_role: vault + issue_cert_role: "{{ vault_pki_mounts.vault.roles[0].name }}" issue_cert_url: "{{ vault_leader_url }}" when: vault_api_cert_needed diff --git a/roles/vault/tasks/bootstrap/main.yml b/roles/vault/tasks/bootstrap/main.yml index 98904bbe7..b87954ca7 100644 --- a/roles/vault/tasks/bootstrap/main.yml +++ b/roles/vault/tasks/bootstrap/main.yml @@ -1,5 +1,4 @@ --- - - include: ../shared/check_vault.yml when: inventory_hostname in groups.vault @@ -7,55 +6,60 @@ when: inventory_hostname in groups.vault - include: ../shared/find_leader.yml - when: inventory_hostname in groups.vault and vault_cluster_is_initialized|d() - -## Sync Certs + when: inventory_hostname in groups.vault and vault_cluster_is_initialized - include: sync_vault_certs.yml when: inventory_hostname in groups.vault -## Generate Certs +- include: sync_etcd_certs.yml + when: inventory_hostname in groups.etcd -# Start a temporary instance of Vault - include: start_vault_temp.yml - when: >- - inventory_hostname == groups.vault|first and - not vault_cluster_is_initialized + when: inventory_hostname == groups.vault|first and not vault_cluster_is_initialized -# NOTE: The next 2 steps run against temp Vault and long-term Vault +- name: vault | Set fact about vault leader url + set_fact: + vault_leader_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" + when: not vault_cluster_is_initialized -# Ensure PKI mount exists -- include: ../shared/pki_mount.yml - when: >- - inventory_hostname == groups.vault|first +- include: create_mounts.yml + when: inventory_hostname == groups.vault|first -# If the Root CA already exists, ensure Vault's PKI is using it -- include: ../shared/config_ca.yml +- include: ../shared/auth_backend.yml vars: - ca_name: ca - mount_name: pki - when: >- - inventory_hostname == groups.vault|first and - not vault_ca_cert_needed + auth_backend_description: A Username/Password Auth Backend primarily used for services needing to issue certificates + auth_backend_path: userpass + auth_backend_type: userpass + when: inventory_hostname == groups.vault|first -# Generate root CA certs for Vault if none exist -- include: gen_ca.yml - when: >- - inventory_hostname in groups.vault and - not vault_cluster_is_initialized and - vault_ca_cert_needed +- include: create_roles.yml + with_items: + - "{{ vault_pki_mounts.vault }}" + - "{{ vault_pki_mounts.etcd }}" + loop_control: + loop_var: mount + when: inventory_hostname in groups.vault + +- include: ../shared/gen_ca.yml + vars: + gen_ca_cert_dir: "{{ vault_pki_mounts.vault.cert_dir }}" + gen_ca_mount_path: "{{ vault_pki_mounts.vault.name }}" + gen_ca_vault_headers: "{{ vault_headers }}" + gen_ca_vault_options: "{{ vault_ca_options.vault }}" + when: >- + inventory_hostname in groups.vault + and not vault_cluster_is_initialized + and vault_ca_cert_needed + +- include: ../shared/gen_ca.yml + vars: + gen_ca_cert_dir: "{{ vault_pki_mounts.etcd.cert_dir }}" + gen_ca_mount_path: "{{ vault_pki_mounts.etcd.name }}" + gen_ca_vault_headers: "{{ vault_headers }}" + gen_ca_vault_options: "{{ vault_ca_options.etcd }}" + when: inventory_hostname in groups.etcd and vault_etcd_ca_cert_needed -# Generate Vault API certs - include: gen_vault_certs.yml when: inventory_hostname in groups.vault and vault_api_cert_needed -# Update all host's CA bundle - include: ca_trust.yml - -## Add Etcd Role to Vault (if needed) - -- include: role_auth_cert.yml - when: vault_role_auth_method == "cert" - -- include: role_auth_userpass.yml - when: vault_role_auth_method == "userpass" diff --git a/roles/vault/tasks/bootstrap/role_auth_cert.yml b/roles/vault/tasks/bootstrap/role_auth_cert.yml deleted file mode 100644 index d92cd9d69..000000000 --- a/roles/vault/tasks/bootstrap/role_auth_cert.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- - -- include: ../shared/sync_auth_certs.yml - when: inventory_hostname in groups.vault - -- include: ../shared/cert_auth_mount.yml - when: inventory_hostname == groups.vault|first - -- include: ../shared/auth_backend.yml - vars: - auth_backend_description: A Cert-based Auth primarily for services needing to issue certificates - auth_backend_name: cert - auth_backend_type: cert - when: inventory_hostname == groups.vault|first - -- include: gen_auth_ca.yml - when: inventory_hostname in groups.vault and vault_auth_ca_cert_needed - -- include: ../shared/config_ca.yml - vars: - ca_name: auth-ca - mount_name: auth-pki - when: inventory_hostname == groups.vault|first and not vault_auth_ca_cert_needed - -- include: create_etcd_role.yml - when: inventory_hostname in groups.etcd diff --git a/roles/vault/tasks/bootstrap/role_auth_userpass.yml b/roles/vault/tasks/bootstrap/role_auth_userpass.yml deleted file mode 100644 index 2ad2fbc91..000000000 --- a/roles/vault/tasks/bootstrap/role_auth_userpass.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -- include: ../shared/auth_backend.yml - vars: - auth_backend_description: A Username/Password Auth Backend primarily used for services needing to issue certificates - auth_backend_path: userpass - auth_backend_type: userpass - when: inventory_hostname == groups.vault|first - -- include: create_etcd_role.yml - when: inventory_hostname in groups.etcd diff --git a/roles/vault/tasks/bootstrap/start_vault_temp.yml b/roles/vault/tasks/bootstrap/start_vault_temp.yml index 4a5e6bc5e..49585a5d9 100644 --- a/roles/vault/tasks/bootstrap/start_vault_temp.yml +++ b/roles/vault/tasks/bootstrap/start_vault_temp.yml @@ -1,5 +1,4 @@ --- - - name: bootstrap/start_vault_temp | Ensure vault-temp isn't already running shell: if docker rm -f {{ vault_temp_container_name }} 2>&1 1>/dev/null;then echo true;else echo false;fi register: vault_temp_stop_check @@ -13,7 +12,7 @@ -v /etc/vault:/etc/vault {{ vault_image_repo }}:{{ vault_version }} server -#FIXME(mattymo): Crashes on first start with aufs docker storage. See hashicorp/docker-vault#19 +# FIXME(mattymo): Crashes on first start with aufs docker storage. See hashicorp/docker-vault#19 - name: bootstrap/start_vault_temp | Start again single node Vault with file backend command: docker start {{ vault_temp_container_name }} diff --git a/roles/vault/tasks/bootstrap/sync_etcd_certs.yml b/roles/vault/tasks/bootstrap/sync_etcd_certs.yml new file mode 100644 index 000000000..599b3cd47 --- /dev/null +++ b/roles/vault/tasks/bootstrap/sync_etcd_certs.yml @@ -0,0 +1,16 @@ +--- + +- include: ../shared/sync_file.yml + vars: + sync_file: "ca.pem" + sync_file_dir: "{{ vault_etcd_cert_dir }}" + sync_file_hosts: "{{ groups.etcd }}" + sync_file_is_cert: true + +- name: bootstrap/sync_etcd_certs | Set facts for etcd sync_file results + set_fact: + vault_etcd_ca_cert_needed: "{{ sync_file_results[0]['no_srcs'] }}" + +- name: bootstrap/sync_etcd_certs | Unset sync_file_results after ca.pem sync + set_fact: + sync_file_results: [] diff --git a/roles/vault/tasks/bootstrap/sync_vault_certs.yml b/roles/vault/tasks/bootstrap/sync_vault_certs.yml index ab088753f..9e6eff05c 100644 --- a/roles/vault/tasks/bootstrap/sync_vault_certs.yml +++ b/roles/vault/tasks/bootstrap/sync_vault_certs.yml @@ -1,5 +1,4 @@ --- - - include: ../shared/sync_file.yml vars: sync_file: "ca.pem" @@ -29,4 +28,3 @@ - name: bootstrap/sync_vault_certs | Unset sync_file_results after api.pem sync set_fact: sync_file_results: [] - diff --git a/roles/vault/tasks/cluster/create_mounts.yml b/roles/vault/tasks/cluster/create_mounts.yml new file mode 100644 index 000000000..d64fa0bae --- /dev/null +++ b/roles/vault/tasks/cluster/create_mounts.yml @@ -0,0 +1,13 @@ +--- +- include: ../shared/create_mount.yml + vars: + create_mount_path: "{{ item.name }}" + create_mount_default_lease_ttl: "{{ item.default_lease_ttl }}" + create_mount_max_lease_ttl: "{{ item.max_lease_ttl }}" + create_mount_description: "{{ item.description }}" + create_mount_cert_dir: "{{ item.cert_dir }}" + create_mount_config_ca_needed: item.name != vault_pki_mounts.kube.name + with_items: + - "{{ vault_pki_mounts.vault }}" + - "{{ vault_pki_mounts.etcd }}" + - "{{ vault_pki_mounts.kube }}" diff --git a/roles/vault/tasks/cluster/create_roles.yml b/roles/vault/tasks/cluster/create_roles.yml index a135137da..468229fd4 100644 --- a/roles/vault/tasks/cluster/create_roles.yml +++ b/roles/vault/tasks/cluster/create_roles.yml @@ -1,9 +1,10 @@ --- - - include: ../shared/create_role.yml vars: create_role_name: "{{ item.name }}" create_role_group: "{{ item.group }}" + create_role_password: "{{ item.password }}" create_role_policy_rules: "{{ item.policy_rules }}" create_role_options: "{{ item.role_options }}" - with_items: "{{ vault_roles|d([]) }}" + create_role_mount_path: "{{ mount.name }}" + with_items: "{{ mount.roles }}" diff --git a/roles/vault/tasks/cluster/main.yml b/roles/vault/tasks/cluster/main.yml index db97dd078..94af5e5dc 100644 --- a/roles/vault/tasks/cluster/main.yml +++ b/roles/vault/tasks/cluster/main.yml @@ -1,13 +1,10 @@ --- - - include: ../shared/check_vault.yml when: inventory_hostname in groups.vault - include: ../shared/check_etcd.yml when: inventory_hostname in groups.vault -## Vault Cluster Setup - - include: configure.yml when: inventory_hostname in groups.vault @@ -26,19 +23,29 @@ - include: ../shared/find_leader.yml when: inventory_hostname in groups.vault -- include: ../shared/pki_mount.yml +- include: create_mounts.yml when: inventory_hostname == groups.vault|first -- include: ../shared/config_ca.yml +- include: ../shared/gen_ca.yml vars: - ca_name: ca - mount_name: pki + gen_ca_cert_dir: "{{ vault_pki_mounts.kube.cert_dir }}" + gen_ca_mount_path: "{{ vault_pki_mounts.kube.name }}" + gen_ca_vault_headers: "{{ vault_headers }}" + gen_ca_vault_options: "{{ vault_ca_options.kube }}" + when: inventory_hostname in groups.vault + +- include: ../shared/auth_backend.yml + vars: + auth_backend_description: A Username/Password Auth Backend primarily used for services needing to issue certificates + auth_backend_path: userpass + auth_backend_type: userpass when: inventory_hostname == groups.vault|first -## Vault Policies, Roles, and Auth Backends - -- include: role_auth_cert.yml - when: vault_role_auth_method == "cert" - -- include: role_auth_userpass.yml - when: vault_role_auth_method == "userpass" +- include: create_roles.yml + with_items: + - "{{ vault_pki_mounts.vault }}" + - "{{ vault_pki_mounts.etcd }}" + - "{{ vault_pki_mounts.kube }}" + loop_control: + loop_var: mount + when: inventory_hostname in groups.vault diff --git a/roles/vault/tasks/cluster/role_auth_cert.yml b/roles/vault/tasks/cluster/role_auth_cert.yml deleted file mode 100644 index 9f186e3ff..000000000 --- a/roles/vault/tasks/cluster/role_auth_cert.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- - -- include: ../shared/cert_auth_mount.yml - when: inventory_hostname == groups.vault|first - -- include: ../shared/auth_backend.yml - vars: - auth_backend_description: A Cert-based Auth primarily for services needing to issue certificates - auth_backend_name: cert - auth_backend_type: cert - when: inventory_hostname == groups.vault|first - -- include: ../shared/config_ca.yml - vars: - ca_name: auth-ca - mount_name: auth-pki - when: inventory_hostname == groups.vault|first - -- include: create_roles.yml diff --git a/roles/vault/tasks/cluster/role_auth_userpass.yml b/roles/vault/tasks/cluster/role_auth_userpass.yml deleted file mode 100644 index ac3b2c6c1..000000000 --- a/roles/vault/tasks/cluster/role_auth_userpass.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- - -- include: ../shared/auth_backend.yml - vars: - auth_backend_description: A Username/Password Auth Backend primarily used for services needing to issue certificates - auth_backend_path: userpass - auth_backend_type: userpass - when: inventory_hostname == groups.vault|first - -- include: create_roles.yml diff --git a/roles/vault/tasks/shared/auth_backend.yml b/roles/vault/tasks/shared/auth_backend.yml index ad5b191c9..82a4c94fb 100644 --- a/roles/vault/tasks/shared/auth_backend.yml +++ b/roles/vault/tasks/shared/auth_backend.yml @@ -1,11 +1,10 @@ --- - - name: shared/auth_backend | Test if the auth backend exists uri: url: "{{ vault_leader_url }}/v1/sys/auth/{{ auth_backend_path }}/tune" headers: "{{ vault_headers }}" validate_certs: false - ignore_errors: true + ignore_errors: true register: vault_auth_backend_check - name: shared/auth_backend | Add the cert auth backend if needed diff --git a/roles/vault/tasks/shared/cert_auth_mount.yml b/roles/vault/tasks/shared/cert_auth_mount.yml index 9710aa7ca..6ba303d3b 100644 --- a/roles/vault/tasks/shared/cert_auth_mount.yml +++ b/roles/vault/tasks/shared/cert_auth_mount.yml @@ -1,14 +1,13 @@ --- -- include: ../shared/mount.yml +- include: ../shared/pki_mount.yml vars: - mount_name: auth-pki - mount_options: + pki_mount_path: auth-pki + pki_mount_options: description: PKI mount to generate certs for the Cert Auth Backend config: default_lease_ttl: "{{ vault_default_lease_ttl }}" max_lease_ttl: "{{ vault_max_lease_ttl }}" - type: pki - name: shared/auth_mount | Create a dummy role for issuing certs from auth-pki uri: diff --git a/roles/vault/tasks/shared/check_vault.yml b/roles/vault/tasks/shared/check_vault.yml index 257843d95..83328768a 100644 --- a/roles/vault/tasks/shared/check_vault.yml +++ b/roles/vault/tasks/shared/check_vault.yml @@ -1,5 +1,4 @@ --- - # Stop temporary Vault if it's running (can linger if playbook fails out) - name: stop vault-temp container shell: docker stop {{ vault_temp_container_name }} || rkt stop {{ vault_temp_container_name }} @@ -22,8 +21,8 @@ vault_is_running: "{{ vault_local_service_health|succeeded }}" vault_is_initialized: "{{ vault_local_service_health.get('json', {}).get('initialized', false) }}" vault_is_sealed: "{{ vault_local_service_health.get('json', {}).get('sealed', true) }}" - #vault_in_standby: "{{ vault_local_service_health.get('json', {}).get('standby', true) }}" - #vault_run_version: "{{ vault_local_service_health.get('json', {}).get('version', '') }}" + # vault_in_standby: "{{ vault_local_service_health.get('json', {}).get('standby', true) }}" + # vault_run_version: "{{ vault_local_service_health.get('json', {}).get('version', '') }}" - name: check_vault | Set fact about the Vault cluster's initialization state set_fact: diff --git a/roles/vault/tasks/shared/config_ca.yml b/roles/vault/tasks/shared/config_ca.yml index 79c972b4d..0ef34e7b8 100644 --- a/roles/vault/tasks/shared/config_ca.yml +++ b/roles/vault/tasks/shared/config_ca.yml @@ -1,12 +1,11 @@ --- - - name: config_ca | Read root CA cert for Vault - command: "cat /etc/vault/ssl/{{ ca_name }}.pem" + command: "cat {{ config_ca_ca_pem }}" register: vault_ca_cert_cat - name: config_ca | Pull current CA cert from Vault uri: - url: "{{ vault_leader_url }}/v1/{{ mount_name }}/ca/pem" + url: "{{ vault_leader_url }}/v1/{{ config_ca_mount_path }}/ca/pem" headers: "{{ vault_headers }}" return_content: true status_code: 200,204 @@ -14,13 +13,13 @@ register: vault_pull_current_ca - name: config_ca | Read root CA key for Vault - command: "cat /etc/vault/ssl/{{ ca_name }}-key.pem" + command: "cat {{ config_ca_ca_key }}" register: vault_ca_key_cat when: vault_ca_cert_cat.stdout.strip() != vault_pull_current_ca.content.strip() - name: config_ca | Configure pki mount to use the found root CA cert and key uri: - url: "{{ vault_leader_url }}/v1/{{ mount_name }}/config/ca" + url: "{{ vault_leader_url }}/v1/{{ config_ca_mount_path }}/config/ca" headers: "{{ vault_headers }}" method: POST body_format: json diff --git a/roles/vault/tasks/shared/create_mount.yml b/roles/vault/tasks/shared/create_mount.yml new file mode 100644 index 000000000..0b12dce24 --- /dev/null +++ b/roles/vault/tasks/shared/create_mount.yml @@ -0,0 +1,16 @@ +--- +- include: ../shared/pki_mount.yml + vars: + pki_mount_path: "{{ create_mount_path }}" + pki_mount_options: + config: + default_lease_ttl: "{{ create_mount_default_lease_ttl }}" + max_lease_ttl: "{{ create_mount_max_lease_ttl }}" + description: "{{ create_mount_description }}" + +- include: ../shared/config_ca.yml + vars: + config_ca_ca_pem: "{{ create_mount_cert_dir }}/ca.pem" + config_ca_ca_key: "{{ create_mount_cert_dir }}/ca-key.pem" + config_ca_mount_path: "{{ create_mount_path }}" + when: create_mount_config_ca_needed diff --git a/roles/vault/tasks/shared/create_role.yml b/roles/vault/tasks/shared/create_role.yml index c39fafe8c..d76e73f13 100644 --- a/roles/vault/tasks/shared/create_role.yml +++ b/roles/vault/tasks/shared/create_role.yml @@ -1,5 +1,4 @@ --- - # The JSON inside JSON here is intentional (Vault API wants it) - name: create_role | Create a policy for the new role allowing issuing uri: @@ -12,19 +11,20 @@ {%- if create_role_policy_rules|d("default") == "default" -%} {{ { 'path': { - 'pki/issue/' + create_role_name: {'policy': 'write'}, - 'pki/roles/' + create_role_name: {'policy': 'read'} + create_role_mount_path + '/issue/' + create_role_name: {'policy': 'write'}, + create_role_mount_path + '/roles/' + create_role_name: {'policy': 'read'} }} | to_json + '\n' }} {%- else -%} {{ create_role_policy_rules | to_json + '\n' }} {%- endif -%} status_code: 204 - when: inventory_hostname == groups[create_role_group]|first + delegate_to: "{{ groups.vault|first }}" + run_once: true -- name: create_role | Create the new role in the pki mount +- name: create_role | Create {{ create_role_name }} role in the {{ create_role_mount_path }} pki mount uri: - url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}/v1/pki/roles/{{ create_role_name }}" + url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}/v1/{{ create_role_mount_path }}/roles/{{ create_role_name }}" headers: "{{ hostvars[groups.vault|first]['vault_headers'] }}" method: POST body_format: json @@ -35,40 +35,14 @@ {{ create_role_options }} {%- endif -%} status_code: 204 - when: inventory_hostname == groups[create_role_group]|first - -## Cert based auth method - -- include: gen_cert.yml - vars: - gen_cert_copy_ca: true - gen_cert_hosts: "{{ groups[create_role_group] }}" - gen_cert_mount: "auth-pki" - gen_cert_path: "{{ vault_roles_dir }}/{{ create_role_name }}/issuer.pem" - gen_cert_vault_headers: "{{ hostvars[groups.vault|first]['vault_headers'] }}" - gen_cert_vault_role: "dummy" - gen_cert_vault_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" - when: vault_role_auth_method == "cert" and inventory_hostname in groups[create_role_group] - -- name: create_role | Insert the auth-pki CA as the authenticating CA for that role - uri: - url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}/v1/auth/cert/certs/{{ create_role_name }}" - headers: "{{ hostvars[groups.vault|first]['vault_headers'] }}" - method: POST - body_format: json - body: - certificate: "{{ hostvars[groups[create_role_group]|first]['gen_cert_result']['json']['data']['issuing_ca'] }}" - policies: "{{ create_role_name }}" - status_code: 204 - when: vault_role_auth_method == "cert" and inventory_hostname == groups[create_role_group]|first + delegate_to: "{{ groups.vault|first }}" + run_once: true ## Userpass based auth method - include: gen_userpass.yml vars: - gen_userpass_group: "{{ create_role_group }}" - gen_userpass_password: "{{ create_role_password|d(''|to_uuid) }}" + gen_userpass_password: "{{ create_role_password }}" gen_userpass_policies: "{{ create_role_name }}" gen_userpass_role: "{{ create_role_name }}" gen_userpass_username: "{{ create_role_name }}" - when: vault_role_auth_method == "userpass" and inventory_hostname in groups[create_role_group] diff --git a/roles/vault/tasks/shared/find_leader.yml b/roles/vault/tasks/shared/find_leader.yml index 1aaa8513e..3afee482d 100644 --- a/roles/vault/tasks/shared/find_leader.yml +++ b/roles/vault/tasks/shared/find_leader.yml @@ -15,7 +15,7 @@ vault_leader_url: "{{ vault_config.listener.tcp.tls_disable|d()|ternary('http', 'https') }}://{{ item }}:{{ vault_port }}" with_items: "{{ groups.vault }}" when: "hostvars[item]['vault_leader_check'].get('status') in [200,503]" - #run_once: true + # run_once: true - name: find_leader| show vault_leader_url debug: var=vault_leader_url verbosity=2 diff --git a/roles/vault/tasks/shared/gen_ca.yml b/roles/vault/tasks/shared/gen_ca.yml new file mode 100644 index 000000000..291f42734 --- /dev/null +++ b/roles/vault/tasks/shared/gen_ca.yml @@ -0,0 +1,29 @@ +--- +- name: "bootstrap/gen_ca | Ensure cert_dir {{ gen_ca_cert_dir }} exists" + file: + mode: 0755 + path: "{{ gen_ca_cert_dir }}" + state: directory + +- name: "bootstrap/gen_ca | Generate {{ gen_ca_mount_path }} root CA" + uri: + url: "{{ vault_leader_url }}/v1/{{ gen_ca_mount_path }}/root/generate/exported" + headers: "{{ gen_ca_vault_headers }}" + method: POST + body_format: json + body: "{{ gen_ca_vault_options }}" + register: vault_ca_gen + delegate_to: "{{ groups.vault|first }}" + run_once: true + +- name: "bootstrap/gen_ca | Copy {{ gen_ca_mount_path }} root CA cert locally" + copy: + content: "{{ hostvars[groups.vault|first]['vault_ca_gen']['json']['data']['certificate'] }}" + dest: "{{ gen_ca_cert_dir }}/ca.pem" + mode: 0644 + +- name: "bootstrap/gen_ca | Copy {{ gen_ca_mount_path }} root CA key locally" + copy: + content: "{{ hostvars[groups.vault|first]['vault_ca_gen']['json']['data']['private_key'] }}" + dest: "{{ gen_ca_cert_dir }}/ca-key.pem" + mode: 0640 diff --git a/roles/vault/tasks/shared/gen_userpass.yml b/roles/vault/tasks/shared/gen_userpass.yml index ab3d171b8..5def39d0e 100644 --- a/roles/vault/tasks/shared/gen_userpass.yml +++ b/roles/vault/tasks/shared/gen_userpass.yml @@ -1,5 +1,4 @@ --- - - name: shared/gen_userpass | Create the Username/Password combo for the role uri: url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}/v1/auth/userpass/users/{{ gen_userpass_username }}" @@ -11,20 +10,19 @@ password: "{{ gen_userpass_password }}" policies: "{{ gen_userpass_role }}" status_code: 204 - when: inventory_hostname == groups[gen_userpass_group]|first + delegate_to: "{{ groups.vault|first }}" + run_once: true - name: shared/gen_userpass | Ensure destination directory exists file: path: "{{ vault_roles_dir }}/{{ gen_userpass_role }}" state: directory - when: inventory_hostname in groups[gen_userpass_group] - name: shared/gen_userpass | Copy credentials to all hosts in the group copy: content: > - {{ + {{ {'username': gen_userpass_username, 'password': gen_userpass_password} | to_nice_json(indent=4) }} dest: "{{ vault_roles_dir }}/{{ gen_userpass_role }}/userpass" - when: inventory_hostname in groups[gen_userpass_group] diff --git a/roles/vault/tasks/shared/issue_cert.yml b/roles/vault/tasks/shared/issue_cert.yml index 4854e8b9e..24db59957 100644 --- a/roles/vault/tasks/shared/issue_cert.yml +++ b/roles/vault/tasks/shared/issue_cert.yml @@ -11,30 +11,64 @@ # issue_cert_file_mode: Mode of the placed cert file # issue_cert_file_owner: Owner of the placed cert file and directory # issue_cert_format: Format for returned data. Can be pem, der, or pem_bundle -# issue_cert_headers: Headers passed into the issue request # issue_cert_hosts: List of hosts to distribute the cert to # issue_cert_ip_sans: Requested IP Subject Alternative Names, in a list -# issue_cert_mount: Mount point in Vault to make the request to +# issue_cert_mount_path: Mount point in Vault to make the request to # issue_cert_path: Full path to the cert, include its name # issue_cert_role: The Vault role to issue the cert with # issue_cert_url: Url to reach Vault, including protocol and port -- name: issue_cert | debug who issues certs - debug: - msg: "{{ issue_cert_hosts }} issues certs" - - - name: issue_cert | Ensure target directory exists file: - path: "{{ issue_cert_path | dirname }}" + path: "{{ issue_cert_path | dirname }}" state: directory group: "{{ issue_cert_file_group | d('root' )}}" mode: "{{ issue_cert_dir_mode | d('0755') }}" owner: "{{ issue_cert_file_owner | d('root') }}" -- name: "issue_cert | Generate the cert for {{ issue_cert_role }}" +- name: "issue_cert | Read in the local credentials" + command: cat {{ vault_roles_dir }}/{{ issue_cert_role }}/userpass + register: vault_creds_cat + delegate_to: "{{ groups.vault|first }}" + run_once: true + +- name: gen_certs_vault | Set facts for read Vault Creds + set_fact: + user_vault_creds: "{{ vault_creds_cat.stdout|from_json }}" + delegate_to: "{{ groups.vault|first }}" + run_once: true + +- name: gen_certs_vault | Log into Vault and obtain an token uri: - url: "{{ issue_cert_url }}/v1/{{ issue_cert_mount|d('pki') }}/issue/{{ issue_cert_role }}" + url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}/v1/auth/userpass/login/{{ user_vault_creds.username }}" + headers: + Accept: application/json + Content-Type: application/json + method: POST + body_format: json + body: + password: "{{ user_vault_creds.password }}" + register: vault_login_result + delegate_to: "{{ groups.vault|first }}" + run_once: true + +- name: gen_certs_vault | Set fact for vault_client_token + set_fact: + vault_client_token: "{{ vault_login_result.get('json', {}).get('auth', {}).get('client_token') }}" + run_once: true + +- name: gen_certs_vault | Set fact for Vault API token + set_fact: + issue_cert_headers: + Accept: application/json + Content-Type: application/json + X-Vault-Token: "{{ vault_client_token }}" + run_once: true + when: vault_client_token != "" + +- name: "issue_cert | Generate {{ issue_cert_path }} for {{ issue_cert_role }} role" + uri: + url: "{{ issue_cert_url }}/v1/{{ issue_cert_mount_path|d('pki') }}/issue/{{ issue_cert_role }}" headers: "{{ issue_cert_headers }}" method: POST body_format: json @@ -45,11 +79,7 @@ ip_sans: "{{ issue_cert_ip_sans | default([]) | join(',') }}" register: issue_cert_result delegate_to: "{{ issue_cert_hosts|first }}" - -- name: issue_cert | results - debug: - msg: "{{ issue_cert_result }}" - + run_once: true - name: "issue_cert | Copy {{ issue_cert_path }} cert to all hosts" copy: @@ -75,3 +105,11 @@ mode: "{{ issue_cert_file_mode | d('0644') }}" owner: "{{ issue_cert_file_owner | d('root') }}" when: issue_cert_copy_ca|default(false) + +- name: issue_cert | Copy certificate serial to all hosts + copy: + content: "{{ issue_cert_result['json']['data']['serial_number'] }}" + dest: "{{ issue_cert_path.rsplit('.', 1)|first }}.serial" + group: "{{ issue_cert_file_group | d('root' )}}" + mode: "{{ issue_cert_file_mode | d('0640') }}" + owner: "{{ issue_cert_file_owner | d('root') }}" diff --git a/roles/vault/tasks/shared/mount.yml b/roles/vault/tasks/shared/mount.yml deleted file mode 100644 index b98b45c57..000000000 --- a/roles/vault/tasks/shared/mount.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- - -- name: shared/mount | Test if PKI mount exists - uri: - url: "{{ vault_leader_url }}/v1/sys/mounts/{{ mount_name }}/tune" - headers: "{{ vault_headers }}" - ignore_errors: true - register: vault_pki_mount_check - -- name: shared/mount | Mount PKI mount if needed - uri: - url: "{{ vault_leader_url }}/v1/sys/mounts/{{ mount_name }}" - headers: "{{ vault_headers }}" - method: POST - body_format: json - body: "{{ mount_options|d() }}" - status_code: 204 - when: vault_pki_mount_check|failed diff --git a/roles/vault/tasks/shared/pki_mount.yml b/roles/vault/tasks/shared/pki_mount.yml index 31faef434..3df56e0f8 100644 --- a/roles/vault/tasks/shared/pki_mount.yml +++ b/roles/vault/tasks/shared/pki_mount.yml @@ -1,11 +1,27 @@ --- +- name: "shared/mount | Test if {{ pki_mount_path }} PKI mount exists" + uri: + url: "{{ vault_leader_url }}/v1/sys/mounts/{{ pki_mount_path }}/tune" + headers: "{{ vault_headers }}" + ignore_errors: true + register: vault_pki_mount_check -- include: mount.yml - vars: - mount_name: pki - mount_options: - config: - default_lease_ttl: "{{ vault_default_lease_ttl }}" - max_lease_ttl: "{{ vault_max_lease_ttl }}" - description: The default PKI mount for Kubernetes - type: pki +- name: shared/mount | Set pki mount type + set_fact: + mount_options: "{{ pki_mount_options | combine({'type': 'pki'}) }}" + when: vault_pki_mount_check|failed + +- name: shared/mount | Mount {{ pki_mount_path }} PKI mount if needed + uri: + url: "{{ vault_leader_url }}/v1/sys/mounts/{{ pki_mount_path }}" + headers: "{{ vault_headers }}" + method: POST + body_format: json + body: "{{ mount_options|d() }}" + status_code: 204 + when: vault_pki_mount_check|failed + +- name: shared/mount | Unset mount options + set_fact: + mount_options: {} + when: vault_pki_mount_check|failed diff --git a/roles/vault/templates/docker.service.j2 b/roles/vault/templates/docker.service.j2 index c355b7f01..f99035c77 100644 --- a/roles/vault/templates/docker.service.j2 +++ b/roles/vault/templates/docker.service.j2 @@ -21,6 +21,7 @@ ExecStart={{ docker_bin_dir }}/docker run \ --cap-add=IPC_LOCK \ -v {{ vault_cert_dir }}:{{ vault_cert_dir }} \ -v {{ vault_config_dir }}:{{ vault_config_dir }} \ +-v {{ vault_etcd_cert_dir }}:{{ vault_etcd_cert_dir }} \ -v {{ vault_log_dir }}:/vault/logs \ -v {{ vault_roles_dir }}:{{ vault_roles_dir }} \ -v {{ vault_secrets_dir }}:{{ vault_secrets_dir }} \ diff --git a/roles/vault/templates/rkt.service.j2 b/roles/vault/templates/rkt.service.j2 index 42b9458ac..b0e91dc0f 100644 --- a/roles/vault/templates/rkt.service.j2 +++ b/roles/vault/templates/rkt.service.j2 @@ -24,6 +24,8 @@ ExecStart=/usr/bin/rkt run \ --mount=volume=vault-secrets-dir,target={{ vault_secrets_dir }} \ --volume=vault-roles-dir,kind=host,source={{ vault_roles_dir }} \ --mount=volume=vault-roles-dir,target={{ vault_roles_dir }} \ +--volume=vault-etcd-cert-dir,kind=host,source={{ vault_etcd_cert_dir }} \ +--mount=volume=vault-etcd-cert-dir,target={{ vault_etcd_cert_dir }} \ docker://{{ vault_image_repo }}:{{ vault_image_tag }} \ --name={{ vault_container_name }} --net=host \ --caps-retain=CAP_IPC_LOCK \ diff --git a/tests/cloud_playbooks/create-gce.yml b/tests/cloud_playbooks/create-gce.yml index 1a82c50d7..afa757719 100644 --- a/tests/cloud_playbooks/create-gce.yml +++ b/tests/cloud_playbooks/create-gce.yml @@ -34,6 +34,10 @@ tags: "build-{{test_name}},{{kube_network_plugin}}" register: gce + - name: Add instances to host group + add_host: hostname={{item.name}} ansible_host={{item.public_ip}} groupname="waitfor_hosts" + with_items: '{{gce.instance_data}}' + - name: Template the inventory template: src: ../templates/inventory-gce.j2 @@ -51,6 +55,10 @@ dest: "{{ inventory_path|dirname }}/group_vars/fake_hosts.yml" when: mode in ['scale', 'separate-scale', 'ha-scale'] - - name: Wait for SSH to come up - wait_for: host={{item.public_ip}} port=22 delay=30 timeout=180 state=started - with_items: "{{gce.instance_data}}" + +- name: Wait for instances + hosts: "waitfor_hosts" + gather_facts: false + tasks: + - name: Wait for SSH to come up. + local_action: wait_for host={{inventory_hostname}} port=22 delay=5 timeout=240 state=started diff --git a/tests/requirements.txt b/tests/requirements.txt new file mode 100644 index 000000000..77b7f5868 --- /dev/null +++ b/tests/requirements.txt @@ -0,0 +1,5 @@ +-r ../requirements.txt +yamllint +apache-libcloud==0.20.1 +boto==2.9.0 +tox diff --git a/tests/testcases/010_check-apiserver.yml b/tests/testcases/010_check-apiserver.yml index 8ca19e196..5b053fd4b 100644 --- a/tests/testcases/010_check-apiserver.yml +++ b/tests/testcases/010_check-apiserver.yml @@ -6,6 +6,6 @@ uri: url: "https://{{ access_ip | default(ansible_default_ipv4.address) }}:{{ kube_apiserver_port }}/api/v1" user: kube - password: changeme + password: "{{ lookup('password', '../../credentials/kube_user length=15 chars=ascii_letters,digits') }}" validate_certs: no status_code: 200 diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml index ee5f60785..7c934c592 100644 --- a/tests/testcases/030_check-network.yml +++ b/tests/testcases/030_check-network.yml @@ -16,7 +16,7 @@ shell: "{{bin_dir}}/kubectl get pods --all-namespaces -owide" register: get_pods - - debug: msg="{{get_pods.stdout}}" + - debug: msg="{{get_pods.stdout.split('\n')}}" - name: Get pod names shell: "{{bin_dir}}/kubectl get pods -o json" diff --git a/uploads.yml b/uploads.yml deleted file mode 100644 index 5544f4588..000000000 --- a/uploads.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- hosts: localhost - roles: - - {role: uploads} - -# TEST download -- hosts: localhost - vars: - local_release_dir: /tmp/from_gcloud - roles: - - {role: download} \ No newline at end of file