From 5efda3eda900eedf584bd1f5c5d68d7afddc14f7 Mon Sep 17 00:00:00 2001 From: Vijay Katam Date: Wed, 9 Aug 2017 15:49:53 -0700 Subject: [PATCH 01/64] Configurable docker yum repos, systemd fix * Make yum repos used for installing docker rpms configurable * TasksMax is only supported in systemd version >= 226 * Change to systemd file should restart docker --- roles/docker/defaults/main.yml | 3 +++ roles/docker/tasks/systemd.yml | 6 ++++++ roles/docker/templates/docker.service.j2 | 2 ++ roles/docker/templates/rh_docker.repo.j2 | 4 ++-- 4 files changed, 13 insertions(+), 2 deletions(-) diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index e262d908a..be1921b85 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -10,3 +10,6 @@ docker_repo_info: repos: docker_dns_servers_strict: yes + +docker_rh_repo_base_url: 'https://yum.dockerproject.org/repo/main/centos/7' +docker_rh_repo_gpgkey: 'https://yum.dockerproject.org/gpg' diff --git a/roles/docker/tasks/systemd.yml b/roles/docker/tasks/systemd.yml index 1275de5d7..88b80c6e0 100644 --- a/roles/docker/tasks/systemd.yml +++ b/roles/docker/tasks/systemd.yml @@ -10,11 +10,17 @@ dest: /etc/systemd/system/docker.service.d/http-proxy.conf when: http_proxy is defined or https_proxy is defined or no_proxy is defined +- name: get systemd version + command: rpm -q --qf '%{V}\n' systemd + register: systemd_version + when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic) + - name: Write docker.service systemd file template: src: docker.service.j2 dest: /etc/systemd/system/docker.service register: docker_service_file + notify: restart docker when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic) - name: Write docker.service systemd file for atomic diff --git a/roles/docker/templates/docker.service.j2 b/roles/docker/templates/docker.service.j2 index 54e4b7c06..c8951fa8e 100644 --- a/roles/docker/templates/docker.service.j2 +++ b/roles/docker/templates/docker.service.j2 @@ -24,7 +24,9 @@ ExecStart={{ docker_bin_dir }}/docker daemon \ $DOCKER_NETWORK_OPTIONS \ $DOCKER_DNS_OPTIONS \ $INSECURE_REGISTRY +{% if systemd_version.stdout|int >= 226 %} TasksMax=infinity +{% endif %} LimitNOFILE=1048576 LimitNPROC=1048576 LimitCORE=infinity diff --git a/roles/docker/templates/rh_docker.repo.j2 b/roles/docker/templates/rh_docker.repo.j2 index e783c0ddf..7cb728625 100644 --- a/roles/docker/templates/rh_docker.repo.j2 +++ b/roles/docker/templates/rh_docker.repo.j2 @@ -1,7 +1,7 @@ [dockerrepo] name=Docker Repository -baseurl=https://yum.dockerproject.org/repo/main/centos/7 +baseurl={{ docker_rh_repo_base_url }} enabled=1 gpgcheck=1 -gpgkey=https://yum.dockerproject.org/gpg +gpgkey={{ docker_rh_repo_gpgkey }} {% if http_proxy is defined %}proxy={{ http_proxy }}{% endif %} From 7ad552311325f53b1af6cfc625c16734578abc64 Mon Sep 17 00:00:00 2001 From: Vijay Katam Date: Thu, 10 Aug 2017 13:49:14 -0700 Subject: [PATCH 02/64] restrict rpm query to redhat --- roles/docker/tasks/systemd.yml | 2 +- roles/docker/templates/docker.service.j2 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/docker/tasks/systemd.yml b/roles/docker/tasks/systemd.yml index 88b80c6e0..6880d9e27 100644 --- a/roles/docker/tasks/systemd.yml +++ b/roles/docker/tasks/systemd.yml @@ -13,7 +13,7 @@ - name: get systemd version command: rpm -q --qf '%{V}\n' systemd register: systemd_version - when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic) + when: ansible_os_family == "RedHat" and not is_atomic - name: Write docker.service systemd file template: diff --git a/roles/docker/templates/docker.service.j2 b/roles/docker/templates/docker.service.j2 index c8951fa8e..29a80c107 100644 --- a/roles/docker/templates/docker.service.j2 +++ b/roles/docker/templates/docker.service.j2 @@ -24,7 +24,7 @@ ExecStart={{ docker_bin_dir }}/docker daemon \ $DOCKER_NETWORK_OPTIONS \ $DOCKER_DNS_OPTIONS \ $INSECURE_REGISTRY -{% if systemd_version.stdout|int >= 226 %} +{% if ansible_os_family == "RedHat" and systemd_version.stdout|int >= 226 %} TasksMax=infinity {% endif %} LimitNOFILE=1048576 From 55ba81fee56f5e33b597da3b63e4ef5f1ce48165 Mon Sep 17 00:00:00 2001 From: Vijay Katam Date: Mon, 14 Aug 2017 12:31:44 -0700 Subject: [PATCH 03/64] Add changed_when: false to rpm query --- roles/docker/tasks/systemd.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/docker/tasks/systemd.yml b/roles/docker/tasks/systemd.yml index 6880d9e27..ec4bbf9ab 100644 --- a/roles/docker/tasks/systemd.yml +++ b/roles/docker/tasks/systemd.yml @@ -14,6 +14,7 @@ command: rpm -q --qf '%{V}\n' systemd register: systemd_version when: ansible_os_family == "RedHat" and not is_atomic + changed_when: false - name: Write docker.service systemd file template: From 9f9f70aade00198bf19f3fa9f32485018ac783e9 Mon Sep 17 00:00:00 2001 From: Erik Stidham Date: Mon, 7 Aug 2017 10:33:37 -0500 Subject: [PATCH 04/64] Update Calico to 2.4.1 release. - Switched Calico images to be pulled from quay.io - Updated Canal too --- roles/download/defaults/main.yml | 17 +++++++++-------- .../calico/templates/calico-node.service.j2 | 1 + .../calico/templates/calico.env.j2 | 5 +++++ .../canal/templates/canal-node.yml.j2 | 3 +++ roles/uploads/defaults/main.yml | 4 ++-- 5 files changed, 20 insertions(+), 10 deletions(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index e5a4aa31b..4691818c1 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -22,9 +22,10 @@ kube_version: v1.7.3 etcd_version: v3.2.4 #TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults # after migration to container download -calico_version: "v1.1.3" -calico_cni_version: "v1.8.0" -calico_policy_version: "v0.5.4" +calico_version: "v2.4.1" +calico_ctl_version: "v1.4.0" +calico_cni_version: "v1.10.0" +calico_policy_version: "v0.7.0" weave_version: 2.0.1 flannel_version: v0.8.0 pod_infra_version: 3.0 @@ -42,13 +43,13 @@ etcd_image_repo: "quay.io/coreos/etcd" etcd_image_tag: "{{ etcd_version }}" flannel_image_repo: "quay.io/coreos/flannel" flannel_image_tag: "{{ flannel_version }}" -calicoctl_image_repo: "calico/ctl" -calicoctl_image_tag: "{{ calico_version }}" -calico_node_image_repo: "calico/node" +calicoctl_image_repo: "quay.io/calico/ctl" +calicoctl_image_tag: "{{ calico_ctl_version }}" +calico_node_image_repo: "quay.io/calico/node" calico_node_image_tag: "{{ calico_version }}" -calico_cni_image_repo: "calico/cni" +calico_cni_image_repo: "quay.io/calico/cni" calico_cni_image_tag: "{{ calico_cni_version }}" -calico_policy_image_repo: "calico/kube-policy-controller" +calico_policy_image_repo: "quay.io/calico/kube-policy-controller" calico_policy_image_tag: "{{ calico_policy_version }}" calico_rr_image_repo: "quay.io/calico/routereflector" calico_rr_image_tag: "v0.3.0" diff --git a/roles/network_plugin/calico/templates/calico-node.service.j2 b/roles/network_plugin/calico/templates/calico-node.service.j2 index 015c91b08..73bb757ba 100644 --- a/roles/network_plugin/calico/templates/calico-node.service.j2 +++ b/roles/network_plugin/calico/templates/calico-node.service.j2 @@ -11,6 +11,7 @@ ExecStart={{ docker_bin_dir }}/docker run --net=host --privileged \ -e HOSTNAME=${CALICO_HOSTNAME} \ -e IP=${CALICO_IP} \ -e IP6=${CALICO_IP6} \ + -e CLUSTER_TYPE=${CLUSTER_TYPE} \ -e CALICO_NETWORKING_BACKEND=${CALICO_NETWORKING_BACKEND} \ -e FELIX_DEFAULTENDPOINTTOHOSTACTION={{ calico_endpoint_to_host_action|default('RETURN') }} \ -e AS=${CALICO_AS} \ diff --git a/roles/network_plugin/calico/templates/calico.env.j2 b/roles/network_plugin/calico/templates/calico.env.j2 index 83cf8f291..e438060af 100644 --- a/roles/network_plugin/calico/templates/calico.env.j2 +++ b/roles/network_plugin/calico/templates/calico.env.j2 @@ -4,6 +4,11 @@ ETCD_CERT_FILE="{{ calico_cert_dir }}/cert.crt" ETCD_KEY_FILE="{{ calico_cert_dir }}/key.pem" CALICO_IP="{{ip | default(ansible_default_ipv4.address) }}" CALICO_IP6="" +{% if calico_network_backend is defined and calico_network_backend == 'none' %} +CLUSTER_TYPE="kubespray" +{% else %} +CLUSTER_TYPE="kubespray,bgp" +{% endif %} {% if calico_network_backend is defined %} CALICO_NETWORKING_BACKEND="{{calico_network_backend }}" {% endif %} diff --git a/roles/network_plugin/canal/templates/canal-node.yml.j2 b/roles/network_plugin/canal/templates/canal-node.yml.j2 index 37baf06e0..b4d8e67f0 100644 --- a/roles/network_plugin/canal/templates/canal-node.yml.j2 +++ b/roles/network_plugin/canal/templates/canal-node.yml.j2 @@ -143,6 +143,9 @@ spec: # Disable Calico BGP. Calico is simply enforcing policy. - name: CALICO_NETWORKING value: "false" + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "kubespray,canal" # Disable file logging so `kubectl logs` works. - name: CALICO_DISABLE_FILE_LOGGING value: "true" diff --git a/roles/uploads/defaults/main.yml b/roles/uploads/defaults/main.yml index 303a2d050..bc0a17f32 100644 --- a/roles/uploads/defaults/main.yml +++ b/roles/uploads/defaults/main.yml @@ -3,8 +3,8 @@ local_release_dir: /tmp # Versions etcd_version: v3.0.17 -calico_version: v0.23.0 -calico_cni_version: v1.5.6 +calico_version: v2.4.1 +calico_cni_version: v1.10.0 weave_version: v2.0.1 # Download URL's From ecb6dc3679806b0c17813c38cc9cae305a719a7e Mon Sep 17 00:00:00 2001 From: Ian Lewis Date: Wed, 23 Aug 2017 22:44:11 +0900 Subject: [PATCH 05/64] Register standalone master w/ taints (#1426) If Kubernetes > 1.6 register standalone master nodes w/ a node-role.kubernetes.io/master=:NoSchedule taint to allow for more flexible scheduling rather than just marking unschedulable. --- roles/kubernetes/node/templates/kubelet.j2 | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/roles/kubernetes/node/templates/kubelet.j2 b/roles/kubernetes/node/templates/kubelet.j2 index 822153f39..cb5935178 100644 --- a/roles/kubernetes/node/templates/kubelet.j2 +++ b/roles/kubernetes/node/templates/kubelet.j2 @@ -36,8 +36,14 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" {% set kubelet_args_kubeconfig %}--kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --require-kubeconfig{% endset %} {% if standalone_kubelet|bool %} {# We are on a master-only host. Make the master unschedulable in this case. #} +{% if kube_version | version_compare('v1.6', '>=') %} +{# Set taints on the master so that it's unschedulable by default. Use node-role.kubernetes.io/master taint like kubeadm. #} +{% set kubelet_args_kubeconfig %}{{ kubelet_args_kubeconfig }} --register-with-taints=node-role.kubernetes.io/master=:NoSchedule{% endset %} +{% else %} +{# --register-with-taints was added in 1.6 so just register unschedulable if Kubernetes < 1.6 #} {% set kubelet_args_kubeconfig %}{{ kubelet_args_kubeconfig }} --register-schedulable=false{% endset %} {% endif %} +{% endif %} {# Kubelet node labels #} {% if inventory_hostname in groups['kube-master'] %} From 8b151d12b91f1cf2d56a7cba3a9822bfec4c68e1 Mon Sep 17 00:00:00 2001 From: Brad Beam Date: Thu, 24 Aug 2017 04:09:52 -0500 Subject: [PATCH 06/64] Adding yamllinter to ci steps (#1556) * Adding yaml linter to ci check * Minor linting fixes from yamllint * Changing CI to install python pkgs from requirements.txt - adding in a secondary requirements.txt for tests - moving yamllint to tests requirements --- .gitlab-ci.yml | 17 ++-- .yamllint | 16 ++++ roles/bootstrap-os/tasks/bootstrap-coreos.yml | 1 - roles/bootstrap-os/tasks/main.yml | 1 - roles/bootstrap-os/tasks/setup-pipelining.yml | 1 - roles/dnsmasq/defaults/main.yml | 8 +- roles/dnsmasq/tasks/main.yml | 1 - .../dnsmasq/templates/dnsmasq-autoscaler.yml | 26 +++--- roles/dnsmasq/templates/dnsmasq-deploy.yml | 2 - roles/docker/defaults/main.yml | 1 + roles/docker/handlers/main.yml | 2 +- roles/docker/tasks/main.yml | 14 +-- roles/docker/tasks/set_facts_dns.yml | 2 +- roles/docker/vars/debian.yml | 1 + roles/docker/vars/fedora-20.yml | 1 + roles/docker/vars/fedora.yml | 1 + roles/docker/vars/redhat.yml | 3 +- roles/download/defaults/main.yml | 2 +- roles/download/tasks/main.yml | 2 +- roles/etcd/defaults/main.yml | 2 +- roles/etcd/handlers/backup.yml | 1 - roles/etcd/handlers/main.yml | 1 - roles/etcd/tasks/check_certs.yml | 1 - roles/etcd/tasks/gen_certs_script.yml | 39 ++++----- roles/etcd/tasks/gen_certs_vault.yml | 11 +-- roles/etcd/tasks/install_docker.yml | 34 ++++---- roles/etcd/tasks/pre_upgrade.yml | 1 + roles/etcd/tasks/refresh_config.yml | 2 +- roles/etcd/tasks/sync_etcd_master_certs.yml | 4 +- roles/etcd/tasks/sync_etcd_node_certs.yml | 6 +- .../templates/{etcd.env.yml => etcd.env.j2} | 0 roles/kernel-upgrade/defaults/main.yml | 7 +- .../kubernetes-apps/ansible/defaults/main.yml | 3 +- roles/kubernetes-apps/ansible/tasks/main.yml | 4 +- .../ansible/tasks/netchecker.yml | 3 +- .../kubedns-autoscaler-clusterrole.yml | 1 + .../kubedns-autoscaler-clusterrolebinding.yml | 1 + .../templates/kubedns-autoscaler-sa.yml | 1 + ...toscaler.yml => kubedns-autoscaler.yml.j2} | 23 ++--- ...bedns-deploy.yml => kubedns-deploy.yml.j2} | 1 + .../ansible/templates/kubedns-sa.yml | 1 + .../ansible/templates/kubedns-svc.yml | 2 +- .../efk/elasticsearch/defaults/main.yml | 2 +- .../efk/elasticsearch/meta/main.yml | 1 + .../efk/elasticsearch/tasks/main.yml | 1 - .../templates/efk-clusterrolebinding.yml | 1 + .../efk/elasticsearch/templates/efk-sa.yml | 1 + .../efk/fluentd/defaults/main.yml | 2 +- .../kubernetes-apps/efk/fluentd/meta/main.yml | 1 + .../efk/fluentd/tasks/main.yml | 1 - .../efk/kibana/defaults/main.yml | 2 +- .../kubernetes-apps/efk/kibana/meta/main.yml | 1 + .../kubernetes-apps/efk/kibana/tasks/main.yml | 4 +- roles/kubernetes-apps/efk/meta/main.yml | 1 + roles/kubernetes-apps/helm/defaults/main.yml | 1 + roles/kubernetes-apps/helm/meta/main.yml | 1 + .../templates/tiller-clusterrolebinding.yml | 1 + .../helm/templates/tiller-sa.yml | 1 + roles/kubernetes-apps/meta/main.yml | 1 + .../network_plugin/canal/tasks/main.yml | 4 +- .../network_plugin/meta/main.yml | 12 +-- .../network_plugin/weave/tasks/main.yml | 5 +- .../calico/defaults/main.yml | 1 + .../policy_controller/calico/tasks/main.yml | 1 + roles/kubernetes/master/defaults/main.yml | 5 +- roles/kubernetes/master/tasks/main.yml | 1 - roles/kubernetes/node/defaults/main.yml | 3 +- roles/kubernetes/node/tasks/install.yml | 1 - roles/kubernetes/node/tasks/install_rkt.yml | 5 +- roles/kubernetes/preinstall/handlers/main.yml | 1 + .../tasks/azure-credential-check.yml | 2 - roles/kubernetes/preinstall/tasks/main.yml | 16 ++-- .../tasks/vsphere-credential-check.yml | 1 + roles/kubernetes/preinstall/vars/centos.yml | 1 + roles/kubernetes/preinstall/vars/debian.yml | 1 + roles/kubernetes/preinstall/vars/fedora.yml | 1 + roles/kubernetes/preinstall/vars/redhat.yml | 1 + .../kubernetes/secrets/tasks/check-certs.yml | 1 - .../secrets/tasks/gen_certs_script.yml | 45 +++++----- .../secrets/tasks/gen_certs_vault.yml | 6 +- .../secrets/tasks/sync_kube_node_certs.yml | 4 +- roles/kubespray-defaults/defaults/main.yaml | 7 +- roles/kubespray-defaults/tasks/main.yaml | 1 + roles/network_plugin/calico/handlers/main.yml | 2 +- .../calico/rr/handlers/main.yml | 2 +- roles/network_plugin/calico/rr/meta/main.yml | 1 + roles/network_plugin/canal/defaults/main.yml | 2 +- roles/network_plugin/cloud/tasks/main.yml | 1 - .../network_plugin/flannel/handlers/main.yml | 2 +- .../flannel/templates/flannel-pod.yml | 86 +++++++++---------- roles/network_plugin/meta/main.yml | 28 +++--- .../weave/tasks/pre-upgrade.yml | 1 + roles/rkt/tasks/install.yml | 14 +-- roles/upgrade/post-upgrade/tasks/main.yml | 2 - roles/upgrade/pre-upgrade/defaults/main.yml | 2 +- roles/vault/defaults/main.yml | 2 +- .../tasks/bootstrap/create_etcd_role.yml | 3 +- .../tasks/bootstrap/start_vault_temp.yml | 3 +- .../tasks/bootstrap/sync_vault_certs.yml | 2 - roles/vault/tasks/cluster/main.yml | 3 +- roles/vault/tasks/shared/auth_backend.yml | 3 +- roles/vault/tasks/shared/check_vault.yml | 5 +- roles/vault/tasks/shared/find_leader.yml | 2 +- roles/vault/tasks/shared/gen_userpass.yml | 2 +- roles/vault/tasks/shared/issue_cert.yml | 2 +- tests/requirements.txt | 5 ++ 106 files changed, 301 insertions(+), 274 deletions(-) create mode 100644 .yamllint rename roles/etcd/templates/{etcd.env.yml => etcd.env.j2} (100%) rename roles/kubernetes-apps/ansible/templates/{kubedns-autoscaler.yml => kubedns-autoscaler.yml.j2} (72%) rename roles/kubernetes-apps/ansible/templates/{kubedns-deploy.yml => kubedns-deploy.yml.j2} (99%) create mode 100644 tests/requirements.txt diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 948ef2983..6a456f9df 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -18,10 +18,7 @@ variables: # us-west1-a before_script: - - pip install ansible==2.3.0 - - pip install netaddr - - pip install apache-libcloud==0.20.1 - - pip install boto==2.9.0 + - pip install -r tests/requirements.txt - mkdir -p /.ssh - cp tests/ansible.cfg . @@ -75,10 +72,7 @@ before_script: - $HOME/.cache before_script: - docker info - - pip install ansible==2.3.0 - - pip install netaddr - - pip install apache-libcloud==0.20.1 - - pip install boto==2.9.0 + - pip install -r tests/requirements.txt - mkdir -p /.ssh - mkdir -p $HOME/.ssh - echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa @@ -642,6 +636,13 @@ syntax-check: - ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root extra_playbooks/upgrade-only-k8s.yml -vvv --syntax-check except: ['triggers', 'master'] +yamllint: + <<: *job + stage: unit-tests + script: + - yamllint roles + except: ['triggers', 'master'] + tox-inventory-builder: stage: unit-tests <<: *job diff --git a/.yamllint b/.yamllint new file mode 100644 index 000000000..50e7b167e --- /dev/null +++ b/.yamllint @@ -0,0 +1,16 @@ +--- +extends: default + +rules: + braces: + min-spaces-inside: 0 + max-spaces-inside: 1 + brackets: + min-spaces-inside: 0 + max-spaces-inside: 1 + indentation: + spaces: 2 + indent-sequences: consistent + line-length: disable + new-line-at-end-of-file: disable + truthy: disable diff --git a/roles/bootstrap-os/tasks/bootstrap-coreos.yml b/roles/bootstrap-os/tasks/bootstrap-coreos.yml index 892da1c04..2a2271055 100644 --- a/roles/bootstrap-os/tasks/bootstrap-coreos.yml +++ b/roles/bootstrap-os/tasks/bootstrap-coreos.yml @@ -49,4 +49,3 @@ pip: name: "{{ item }}" with_items: "{{pip_python_modules}}" - diff --git a/roles/bootstrap-os/tasks/main.yml b/roles/bootstrap-os/tasks/main.yml index 73268031e..e7cb01b13 100644 --- a/roles/bootstrap-os/tasks/main.yml +++ b/roles/bootstrap-os/tasks/main.yml @@ -27,4 +27,3 @@ hostname: name: "{{inventory_hostname}}" when: ansible_hostname == 'localhost' - diff --git a/roles/bootstrap-os/tasks/setup-pipelining.yml b/roles/bootstrap-os/tasks/setup-pipelining.yml index 7143f260e..559cef25e 100644 --- a/roles/bootstrap-os/tasks/setup-pipelining.yml +++ b/roles/bootstrap-os/tasks/setup-pipelining.yml @@ -6,4 +6,3 @@ regexp: '^\w+\s+requiretty' dest: /etc/sudoers state: absent - diff --git a/roles/dnsmasq/defaults/main.yml b/roles/dnsmasq/defaults/main.yml index bf670c788..15fb7f169 100644 --- a/roles/dnsmasq/defaults/main.yml +++ b/roles/dnsmasq/defaults/main.yml @@ -4,12 +4,12 @@ # Max of 4 names is allowed and no more than 256 - 17 chars total # (a 2 is reserved for the 'default.svc.' and'svc.') -#searchdomains: -# - foo.bar.lc +# searchdomains: +# - foo.bar.lc # Max of 2 is allowed here (a 1 is reserved for the dns_server) -#nameservers: -# - 127.0.0.1 +# nameservers: +# - 127.0.0.1 dns_forward_max: 150 cache_size: 1000 diff --git a/roles/dnsmasq/tasks/main.yml b/roles/dnsmasq/tasks/main.yml index edc50703d..56ec80d98 100644 --- a/roles/dnsmasq/tasks/main.yml +++ b/roles/dnsmasq/tasks/main.yml @@ -86,4 +86,3 @@ port: 53 timeout: 180 when: inventory_hostname == groups['kube-node'][0] and groups['kube-node'][0] in ansible_play_hosts - diff --git a/roles/dnsmasq/templates/dnsmasq-autoscaler.yml b/roles/dnsmasq/templates/dnsmasq-autoscaler.yml index 4e5e2ddcc..aff99f08d 100644 --- a/roles/dnsmasq/templates/dnsmasq-autoscaler.yml +++ b/roles/dnsmasq/templates/dnsmasq-autoscaler.yml @@ -1,3 +1,4 @@ +--- # Copyright 2016 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -34,17 +35,16 @@ spec: - name: autoscaler image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1 resources: - requests: - cpu: "20m" - memory: "10Mi" + requests: + cpu: "20m" + memory: "10Mi" command: - - /cluster-proportional-autoscaler - - --namespace=kube-system - - --configmap=dnsmasq-autoscaler - - --target=Deployment/dnsmasq - # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. - # If using small nodes, "nodesPerReplica" should dominate. - - --default-params={"linear":{"nodesPerReplica":{{ dnsmasq_nodes_per_replica }},"preventSinglePointFailure":true}} - - --logtostderr=true - - --v={{ kube_log_level }} - + - /cluster-proportional-autoscaler + - --namespace=kube-system + - --configmap=dnsmasq-autoscaler + - --target=Deployment/dnsmasq + # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. + # If using small nodes, "nodesPerReplica" should dominate. + - --default-params={"linear":{"nodesPerReplica":{{ dnsmasq_nodes_per_replica }},"preventSinglePointFailure":true}} + - --logtostderr=true + - --v={{ kube_log_level }} diff --git a/roles/dnsmasq/templates/dnsmasq-deploy.yml b/roles/dnsmasq/templates/dnsmasq-deploy.yml index e811e1995..6f11363b3 100644 --- a/roles/dnsmasq/templates/dnsmasq-deploy.yml +++ b/roles/dnsmasq/templates/dnsmasq-deploy.yml @@ -35,7 +35,6 @@ spec: capabilities: add: - NET_ADMIN - imagePullPolicy: IfNotPresent resources: limits: cpu: {{ dns_cpu_limit }} @@ -64,4 +63,3 @@ spec: hostPath: path: /etc/dnsmasq.d-available dnsPolicy: Default # Don't use cluster DNS. - diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index e262d908a..fa29b32f2 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -1,3 +1,4 @@ +--- docker_version: '1.13' docker_package_info: diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml index 90d7aacb8..a43d843ee 100644 --- a/roles/docker/handlers/main.yml +++ b/roles/docker/handlers/main.yml @@ -8,7 +8,7 @@ - Docker | pause while Docker restarts - Docker | wait for docker -- name : Docker | reload systemd +- name: Docker | reload systemd shell: systemctl daemon-reload - name: Docker | reload docker.socket diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 09240bf9d..ef7e7fe8d 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -3,14 +3,14 @@ include_vars: "{{ item }}" with_first_found: - files: - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}.yml" - - "{{ ansible_os_family|lower }}.yml" - - defaults.yml + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}.yml" + - "{{ ansible_os_family|lower }}.yml" + - defaults.yml paths: - - ../vars + - ../vars skip: true tags: facts diff --git a/roles/docker/tasks/set_facts_dns.yml b/roles/docker/tasks/set_facts_dns.yml index 64a09bff2..13f342ea9 100644 --- a/roles/docker/tasks/set_facts_dns.yml +++ b/roles/docker/tasks/set_facts_dns.yml @@ -48,7 +48,7 @@ - name: add system search domains to docker options set_fact: docker_dns_search_domains: "{{ docker_dns_search_domains | union(system_search_domains.stdout.split(' ')|default([])) | unique }}" - when: system_search_domains.stdout != "" + when: system_search_domains.stdout != "" - name: check number of nameservers fail: diff --git a/roles/docker/vars/debian.yml b/roles/docker/vars/debian.yml index a4689ffbc..240e86ea4 100644 --- a/roles/docker/vars/debian.yml +++ b/roles/docker/vars/debian.yml @@ -1,3 +1,4 @@ +--- docker_kernel_min_version: '3.10' # https://apt.dockerproject.org/repo/dists/debian-wheezy/main/filelist diff --git a/roles/docker/vars/fedora-20.yml b/roles/docker/vars/fedora-20.yml index c74cd9f28..31d431ee8 100644 --- a/roles/docker/vars/fedora-20.yml +++ b/roles/docker/vars/fedora-20.yml @@ -1,3 +1,4 @@ +--- docker_kernel_min_version: '0' # versioning: docker-io itself is pinned at docker 1.5 diff --git a/roles/docker/vars/fedora.yml b/roles/docker/vars/fedora.yml index f89c90a52..b82e5fc30 100644 --- a/roles/docker/vars/fedora.yml +++ b/roles/docker/vars/fedora.yml @@ -1,3 +1,4 @@ +--- docker_kernel_min_version: '0' # https://docs.docker.com/engine/installation/linux/fedora/#install-from-a-package diff --git a/roles/docker/vars/redhat.yml b/roles/docker/vars/redhat.yml index 7abf2cda7..8b20def55 100644 --- a/roles/docker/vars/redhat.yml +++ b/roles/docker/vars/redhat.yml @@ -1,3 +1,4 @@ +--- docker_kernel_min_version: '0' # https://yum.dockerproject.org/repo/main/centos/7/Packages/ @@ -8,7 +9,7 @@ docker_versioned_pkg: '1.12': docker-engine-1.12.6-1.el7.centos '1.13': docker-engine-1.13.1-1.el7.centos 'stable': docker-engine-17.03.0.ce-1.el7.centos - 'edge': docker-engine-17.03.0.ce-1.el7.centos + 'edge': docker-engine-17.03.0.ce-1.el7.centos # https://docs.docker.com/engine/installation/linux/centos/#install-from-a-package # https://download.docker.com/linux/centos/7/x86_64/stable/Packages/ diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index e5a4aa31b..e5d24072b 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -20,7 +20,7 @@ download_always_pull: False # Versions kube_version: v1.7.3 etcd_version: v3.2.4 -#TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults +# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults # after migration to container download calico_version: "v1.1.3" calico_cni_version: "v1.8.0" diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml index 24d1b5bca..f9ae253d1 100644 --- a/roles/download/tasks/main.yml +++ b/roles/download/tasks/main.yml @@ -111,7 +111,7 @@ - download.enabled|bool - download.container|bool -#NOTE(bogdando) this brings no docker-py deps for nodes +# NOTE(bogdando) this brings no docker-py deps for nodes - name: Download containers if pull is required or told to always pull command: "{{ docker_bin_dir }}/docker pull {{ pull_args }}" register: pull_task_result diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml index 7d1d976af..6b6fde38d 100644 --- a/roles/etcd/defaults/main.yml +++ b/roles/etcd/defaults/main.yml @@ -21,7 +21,7 @@ etcd_metrics: "basic" etcd_memory_limit: 512M # Uncomment to set CPU share for etcd -#etcd_cpu_limit: 300m +# etcd_cpu_limit: 300m etcd_node_cert_hosts: "{{ groups['k8s-cluster'] | union(groups.get('calico-rr', [])) }}" diff --git a/roles/etcd/handlers/backup.yml b/roles/etcd/handlers/backup.yml index 68fe71f07..7ec42f4b6 100644 --- a/roles/etcd/handlers/backup.yml +++ b/roles/etcd/handlers/backup.yml @@ -43,4 +43,3 @@ ETCDCTL_API: 3 retries: 3 delay: "{{ retry_stagger | random + 3 }}" - diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml index 45da999ee..2575c25a4 100644 --- a/roles/etcd/handlers/main.yml +++ b/roles/etcd/handlers/main.yml @@ -30,4 +30,3 @@ - name: set etcd_secret_changed set_fact: etcd_secret_changed: true - diff --git a/roles/etcd/tasks/check_certs.yml b/roles/etcd/tasks/check_certs.yml index fe96ea01c..8795fe820 100644 --- a/roles/etcd/tasks/check_certs.yml +++ b/roles/etcd/tasks/check_certs.yml @@ -66,4 +66,3 @@ {%- set _ = certs.update({'sync': True}) -%} {% endif %} {{ certs.sync }} - diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml index f70c6ee21..000f6842b 100644 --- a/roles/etcd/tasks/gen_certs_script.yml +++ b/roles/etcd/tasks/gen_certs_script.yml @@ -73,11 +73,10 @@ 'member-{{ node }}-key.pem', {% endfor %}]" my_master_certs: ['ca-key.pem', - 'admin-{{ inventory_hostname }}.pem', - 'admin-{{ inventory_hostname }}-key.pem', - 'member-{{ inventory_hostname }}.pem', - 'member-{{ inventory_hostname }}-key.pem' - ] + 'admin-{{ inventory_hostname }}.pem', + 'admin-{{ inventory_hostname }}-key.pem', + 'member-{{ inventory_hostname }}.pem', + 'member-{{ inventory_hostname }}-key.pem'] all_node_certs: "['ca.pem', {% for node in (groups['k8s-cluster'] + groups['calico-rr']|default([]))|unique %} 'node-{{ node }}.pem', @@ -111,22 +110,22 @@ sync_certs|default(false) and inventory_hostname not in groups['etcd'] notify: set etcd_secret_changed -#NOTE(mattymo): Use temporary file to copy master certs because we have a ~200k -#char limit when using shell command - -#FIXME(mattymo): Use tempfile module in ansible 2.3 -- name: Gen_certs | Prepare tempfile for unpacking certs - shell: mktemp /tmp/certsXXXXX.tar.gz - register: cert_tempfile - when: inventory_hostname in groups['etcd'] and sync_certs|default(false) and - inventory_hostname != groups['etcd'][0] +# NOTE(mattymo): Use temporary file to copy master certs because we have a ~200k +# char limit when using shell command -- name: Gen_certs | Write master certs to tempfile - copy: - content: "{{etcd_master_cert_data.stdout}}" - dest: "{{cert_tempfile.stdout}}" - owner: root - mode: "0600" +# FIXME(mattymo): Use tempfile module in ansible 2.3 +- name: Gen_certs | Prepare tempfile for unpacking certs + shell: mktemp /tmp/certsXXXXX.tar.gz + register: cert_tempfile + when: inventory_hostname in groups['etcd'] and sync_certs|default(false) and + inventory_hostname != groups['etcd'][0] + +- name: Gen_certs | Write master certs to tempfile + copy: + content: "{{etcd_master_cert_data.stdout}}" + dest: "{{cert_tempfile.stdout}}" + owner: root + mode: "0600" when: inventory_hostname in groups['etcd'] and sync_certs|default(false) and inventory_hostname != groups['etcd'][0] diff --git a/roles/etcd/tasks/gen_certs_vault.yml b/roles/etcd/tasks/gen_certs_vault.yml index a0bf6cfdc..e59d376e9 100644 --- a/roles/etcd/tasks/gen_certs_vault.yml +++ b/roles/etcd/tasks/gen_certs_vault.yml @@ -7,7 +7,6 @@ when: inventory_hostname in etcd_node_cert_hosts tags: etcd-secrets - - name: gen_certs_vault | Read in the local credentials command: cat /etc/vault/roles/etcd/userpass register: etcd_vault_creds_cat @@ -33,15 +32,15 @@ - name: gen_certs_vault | Set fact for vault_client_token set_fact: - vault_client_token: "{{ etcd_vault_login_result.get('json', {}).get('auth', {}).get('client_token') }}" + vault_client_token: "{{ etcd_vault_login_result.get('json', {}).get('auth', {}).get('client_token') }}" run_once: true - name: gen_certs_vault | Set fact for Vault API token set_fact: etcd_vault_headers: - Accept: application/json - Content-Type: application/json - X-Vault-Token: "{{ vault_client_token }}" + Accept: application/json + Content-Type: application/json + X-Vault-Token: "{{ vault_client_token }}" run_once: true when: vault_client_token != "" @@ -96,5 +95,3 @@ with_items: "{{ etcd_node_certs_needed|d([]) }}" when: inventory_hostname in etcd_node_cert_hosts notify: set etcd_secret_changed - - diff --git a/roles/etcd/tasks/install_docker.yml b/roles/etcd/tasks/install_docker.yml index f87caeb4c..76eead2a2 100644 --- a/roles/etcd/tasks/install_docker.yml +++ b/roles/etcd/tasks/install_docker.yml @@ -1,5 +1,5 @@ --- -#Plan A: no docker-py deps +# Plan A: no docker-py deps - name: Install | Copy etcdctl binary from docker container command: sh -c "{{ docker_bin_dir }}/docker rm -f etcdctl-binarycopy; {{ docker_bin_dir }}/docker create --name etcdctl-binarycopy {{ etcd_image_repo }}:{{ etcd_image_tag }} && @@ -12,21 +12,21 @@ delay: "{{ retry_stagger | random + 3 }}" changed_when: false -#Plan B: looks nicer, but requires docker-py on all hosts: -#- name: Install | Set up etcd-binarycopy container -# docker: -# name: etcd-binarycopy -# state: present -# image: "{{ etcd_image_repo }}:{{ etcd_image_tag }}" -# when: etcd_deployment_type == "docker" +# Plan B: looks nicer, but requires docker-py on all hosts: +# - name: Install | Set up etcd-binarycopy container +# docker: +# name: etcd-binarycopy +# state: present +# image: "{{ etcd_image_repo }}:{{ etcd_image_tag }}" +# when: etcd_deployment_type == "docker" # -#- name: Install | Copy etcdctl from etcd-binarycopy container -# command: /usr/bin/docker cp "etcd-binarycopy:{{ etcd_container_bin_dir }}etcdctl" "{{ bin_dir }}/etcdctl" -# when: etcd_deployment_type == "docker" +# - name: Install | Copy etcdctl from etcd-binarycopy container +# command: /usr/bin/docker cp "etcd-binarycopy:{{ etcd_container_bin_dir }}etcdctl" "{{ bin_dir }}/etcdctl" +# when: etcd_deployment_type == "docker" # -#- name: Install | Clean up etcd-binarycopy container -# docker: -# name: etcd-binarycopy -# state: absent -# image: "{{ etcd_image_repo }}:{{ etcd_image_tag }}" -# when: etcd_deployment_type == "docker" +# - name: Install | Clean up etcd-binarycopy container +# docker: +# name: etcd-binarycopy +# state: absent +# image: "{{ etcd_image_repo }}:{{ etcd_image_tag }}" +# when: etcd_deployment_type == "docker" diff --git a/roles/etcd/tasks/pre_upgrade.yml b/roles/etcd/tasks/pre_upgrade.yml index 0f171094a..e86a0d947 100644 --- a/roles/etcd/tasks/pre_upgrade.yml +++ b/roles/etcd/tasks/pre_upgrade.yml @@ -1,3 +1,4 @@ +--- - name: "Pre-upgrade | check for etcd-proxy unit file" stat: path: /etc/systemd/system/etcd-proxy.service diff --git a/roles/etcd/tasks/refresh_config.yml b/roles/etcd/tasks/refresh_config.yml index e6f8186d3..0691d1df9 100644 --- a/roles/etcd/tasks/refresh_config.yml +++ b/roles/etcd/tasks/refresh_config.yml @@ -1,7 +1,7 @@ --- - name: Refresh config | Create etcd config file template: - src: etcd.env.yml + src: etcd.env.j2 dest: /etc/etcd.env notify: restart etcd when: is_etcd_master diff --git a/roles/etcd/tasks/sync_etcd_master_certs.yml b/roles/etcd/tasks/sync_etcd_master_certs.yml index 27ce303e9..d436c97f5 100644 --- a/roles/etcd/tasks/sync_etcd_master_certs.yml +++ b/roles/etcd/tasks/sync_etcd_master_certs.yml @@ -1,7 +1,7 @@ --- - name: sync_etcd_master_certs | Create list of master certs needing creation - set_fact: + set_fact: etcd_master_cert_list: >- {{ etcd_master_cert_list|default([]) + [ "admin-" + item + ".pem", @@ -11,7 +11,7 @@ run_once: true - include: ../../vault/tasks/shared/sync_file.yml - vars: + vars: sync_file: "{{ item }}" sync_file_dir: "{{ etcd_cert_dir }}" sync_file_hosts: "{{ groups.etcd }}" diff --git a/roles/etcd/tasks/sync_etcd_node_certs.yml b/roles/etcd/tasks/sync_etcd_node_certs.yml index 2f82dcffd..e535168fc 100644 --- a/roles/etcd/tasks/sync_etcd_node_certs.yml +++ b/roles/etcd/tasks/sync_etcd_node_certs.yml @@ -1,12 +1,12 @@ --- - name: sync_etcd_node_certs | Create list of node certs needing creation - set_fact: + set_fact: etcd_node_cert_list: "{{ etcd_node_cert_list|default([]) + ['node-' + item + '.pem'] }}" with_items: "{{ etcd_node_cert_hosts }}" - include: ../../vault/tasks/shared/sync_file.yml - vars: + vars: sync_file: "{{ item }}" sync_file_dir: "{{ etcd_cert_dir }}" sync_file_hosts: "{{ etcd_node_cert_hosts }}" @@ -24,7 +24,7 @@ sync_file_results: [] - include: ../../vault/tasks/shared/sync_file.yml - vars: + vars: sync_file: ca.pem sync_file_dir: "{{ etcd_cert_dir }}" sync_file_hosts: "{{ etcd_node_cert_hosts }}" diff --git a/roles/etcd/templates/etcd.env.yml b/roles/etcd/templates/etcd.env.j2 similarity index 100% rename from roles/etcd/templates/etcd.env.yml rename to roles/etcd/templates/etcd.env.j2 diff --git a/roles/kernel-upgrade/defaults/main.yml b/roles/kernel-upgrade/defaults/main.yml index 8a1116785..688e6e018 100644 --- a/roles/kernel-upgrade/defaults/main.yml +++ b/roles/kernel-upgrade/defaults/main.yml @@ -1,9 +1,8 @@ --- - elrepo_key_url: 'https://www.elrepo.org/RPM-GPG-KEY-elrepo.org' -elrepo_rpm : elrepo-release-7.0-3.el7.elrepo.noarch.rpm -elrepo_mirror : http://www.elrepo.org +elrepo_rpm: elrepo-release-7.0-3.el7.elrepo.noarch.rpm +elrepo_mirror: http://www.elrepo.org -elrepo_url : '{{elrepo_mirror}}/{{elrepo_rpm}}' +elrepo_url: '{{elrepo_mirror}}/{{elrepo_rpm}}' elrepo_kernel_package: "kernel-lt" diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml index d42b2ffed..42c4a027d 100644 --- a/roles/kubernetes-apps/ansible/defaults/main.yml +++ b/roles/kubernetes-apps/ansible/defaults/main.yml @@ -1,5 +1,6 @@ +--- # Versions -kubedns_version : 1.14.2 +kubedns_version: 1.14.2 kubednsautoscaler_version: 1.1.1 # Limits for dnsmasq/kubedns apps diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index e7bd934de..4f9b6ef1d 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -14,12 +14,12 @@ dest: "{{kube_config_dir}}/{{item.file}}" with_items: - {name: kubedns, file: kubedns-sa.yml, type: sa} - - {name: kubedns, file: kubedns-deploy.yml, type: deployment} + - {name: kubedns, file: kubedns-deploy.yml.j2, type: deployment} - {name: kubedns, file: kubedns-svc.yml, type: svc} - {name: kubedns-autoscaler, file: kubedns-autoscaler-sa.yml, type: sa} - {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrole.yml, type: clusterrole} - {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding} - - {name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment} + - {name: kubedns-autoscaler, file: kubedns-autoscaler.yml.j2, type: deployment} register: manifests when: - dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-apps/ansible/tasks/netchecker.yml b/roles/kubernetes-apps/ansible/tasks/netchecker.yml index 2d88b288c..ca8535c2a 100644 --- a/roles/kubernetes-apps/ansible/tasks/netchecker.yml +++ b/roles/kubernetes-apps/ansible/tasks/netchecker.yml @@ -1,3 +1,4 @@ +--- - name: Kubernetes Apps | Lay Down Netchecker Template template: src: "{{item.file}}" @@ -24,7 +25,7 @@ state: absent when: inventory_hostname == groups['kube-master'][0] -#FIXME: remove if kubernetes/features#124 is implemented +# FIXME: remove if kubernetes/features#124 is implemented - name: Kubernetes Apps | Purge old Netchecker daemonsets kube: name: "{{item.item.name}}" diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml index a194426c6..f80d3d90c 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrole.yml @@ -1,3 +1,4 @@ +--- # Copyright 2016 The Kubernetes Authors. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml index a368ae333..eb76f2d4e 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-clusterrolebinding.yml @@ -1,3 +1,4 @@ +--- # Copyright 2016 The Kubernetes Authors. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml index 9544a7dd9..542ae86ce 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler-sa.yml @@ -1,3 +1,4 @@ +--- # Copyright 2016 The Kubernetes Authors. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 similarity index 72% rename from roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml rename to roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 index 9e0462290..04f93fd84 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 @@ -1,3 +1,4 @@ +--- # Copyright 2016 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -34,18 +35,18 @@ spec: - name: autoscaler image: "{{ kubednsautoscaler_image_repo }}:{{ kubednsautoscaler_image_tag }}" resources: - requests: - cpu: "20m" - memory: "10Mi" + requests: + cpu: "20m" + memory: "10Mi" command: - - /cluster-proportional-autoscaler - - --namespace={{ system_namespace }} - - --configmap=kubedns-autoscaler - # Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base - - --target=Deployment/kube-dns - - --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}} - - --logtostderr=true - - --v=2 + - /cluster-proportional-autoscaler + - --namespace={{ system_namespace }} + - --configmap=kubedns-autoscaler + # Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base + - --target=Deployment/kube-dns + - --default-params={"linear":{"nodesPerReplica":{{ kubedns_nodes_per_replica }},"min":{{ kubedns_min_replicas }}}} + - --logtostderr=true + - --v=2 {% if rbac_enabled %} serviceAccountName: cluster-proportional-autoscaler {% endif %} diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 similarity index 99% rename from roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml rename to roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 index 7e4615676..149a16ebd 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 @@ -1,3 +1,4 @@ +--- apiVersion: extensions/v1beta1 kind: Deployment metadata: diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml b/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml index e520ccbfc..f399fd6f4 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-sa.yml @@ -1,3 +1,4 @@ +--- apiVersion: v1 kind: ServiceAccount metadata: diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml b/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml index 0565a01e8..1c4710db1 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml +++ b/roles/kubernetes-apps/ansible/templates/kubedns-svc.yml @@ -1,3 +1,4 @@ +--- apiVersion: v1 kind: Service metadata: @@ -19,4 +20,3 @@ spec: - name: dns-tcp port: 53 protocol: TCP - diff --git a/roles/kubernetes-apps/efk/elasticsearch/defaults/main.yml b/roles/kubernetes-apps/efk/elasticsearch/defaults/main.yml index e5af87425..d38ba6a6b 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/defaults/main.yml +++ b/roles/kubernetes-apps/efk/elasticsearch/defaults/main.yml @@ -1,5 +1,5 @@ --- -elasticsearch_cpu_limit: 1000m +elasticsearch_cpu_limit: 1000m elasticsearch_mem_limit: 0M elasticsearch_cpu_requests: 100m elasticsearch_mem_requests: 0M diff --git a/roles/kubernetes-apps/efk/elasticsearch/meta/main.yml b/roles/kubernetes-apps/efk/elasticsearch/meta/main.yml index cd0a80606..3dc6f3ca1 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/meta/main.yml +++ b/roles/kubernetes-apps/efk/elasticsearch/meta/main.yml @@ -1,3 +1,4 @@ +--- dependencies: - role: download file: "{{ downloads.elasticsearch }}" diff --git a/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml b/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml index 7e3626571..de514b563 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml +++ b/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml @@ -38,4 +38,3 @@ command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-service.yaml -n {{ system_namespace }}" run_once: true when: es_service_manifest.changed - diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml index 2c11e566b..a5aba61ae 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml +++ b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-clusterrolebinding.yml @@ -1,3 +1,4 @@ +--- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: diff --git a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml index b73c2a49d..e79e26be8 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml +++ b/roles/kubernetes-apps/efk/elasticsearch/templates/efk-sa.yml @@ -1,3 +1,4 @@ +--- apiVersion: v1 kind: ServiceAccount metadata: diff --git a/roles/kubernetes-apps/efk/fluentd/defaults/main.yml b/roles/kubernetes-apps/efk/fluentd/defaults/main.yml index eeb95b71a..e8d93732c 100644 --- a/roles/kubernetes-apps/efk/fluentd/defaults/main.yml +++ b/roles/kubernetes-apps/efk/fluentd/defaults/main.yml @@ -1,5 +1,5 @@ --- -fluentd_cpu_limit: 0m +fluentd_cpu_limit: 0m fluentd_mem_limit: 200Mi fluentd_cpu_requests: 100m fluentd_mem_requests: 200Mi diff --git a/roles/kubernetes-apps/efk/fluentd/meta/main.yml b/roles/kubernetes-apps/efk/fluentd/meta/main.yml index 1ba777c76..0e1e03813 100644 --- a/roles/kubernetes-apps/efk/fluentd/meta/main.yml +++ b/roles/kubernetes-apps/efk/fluentd/meta/main.yml @@ -1,3 +1,4 @@ +--- dependencies: - role: download file: "{{ downloads.fluentd }}" diff --git a/roles/kubernetes-apps/efk/fluentd/tasks/main.yml b/roles/kubernetes-apps/efk/fluentd/tasks/main.yml index 31b41412e..c91bf6827 100644 --- a/roles/kubernetes-apps/efk/fluentd/tasks/main.yml +++ b/roles/kubernetes-apps/efk/fluentd/tasks/main.yml @@ -20,4 +20,3 @@ command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/fluentd-ds.yaml -n {{ system_namespace }}" run_once: true when: fluentd_ds_manifest.changed - diff --git a/roles/kubernetes-apps/efk/kibana/defaults/main.yml b/roles/kubernetes-apps/efk/kibana/defaults/main.yml index ad6215c93..baf07cdf2 100644 --- a/roles/kubernetes-apps/efk/kibana/defaults/main.yml +++ b/roles/kubernetes-apps/efk/kibana/defaults/main.yml @@ -1,5 +1,5 @@ --- -kibana_cpu_limit: 100m +kibana_cpu_limit: 100m kibana_mem_limit: 0M kibana_cpu_requests: 100m kibana_mem_requests: 0M diff --git a/roles/kubernetes-apps/efk/kibana/meta/main.yml b/roles/kubernetes-apps/efk/kibana/meta/main.yml index 34d0ab21a..775880d54 100644 --- a/roles/kubernetes-apps/efk/kibana/meta/main.yml +++ b/roles/kubernetes-apps/efk/kibana/meta/main.yml @@ -1,3 +1,4 @@ +--- dependencies: - role: download file: "{{ downloads.kibana }}" diff --git a/roles/kubernetes-apps/efk/kibana/tasks/main.yml b/roles/kubernetes-apps/efk/kibana/tasks/main.yml index 5e2b15f71..4c14d1945 100644 --- a/roles/kubernetes-apps/efk/kibana/tasks/main.yml +++ b/roles/kubernetes-apps/efk/kibana/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: "Kibana | Write Kibana deployment" - template: + template: src: kibana-deployment.yml.j2 dest: "{{ kube_config_dir }}/kibana-deployment.yaml" register: kibana_deployment_manifest @@ -17,7 +17,7 @@ run_once: true - name: "Kibana | Write Kibana service " - template: + template: src: kibana-service.yml.j2 dest: "{{ kube_config_dir }}/kibana-service.yaml" register: kibana_service_manifest diff --git a/roles/kubernetes-apps/efk/meta/main.yml b/roles/kubernetes-apps/efk/meta/main.yml index e11bbae29..550ba9497 100644 --- a/roles/kubernetes-apps/efk/meta/main.yml +++ b/roles/kubernetes-apps/efk/meta/main.yml @@ -1,3 +1,4 @@ +--- dependencies: - role: kubernetes-apps/efk/elasticsearch - role: kubernetes-apps/efk/fluentd diff --git a/roles/kubernetes-apps/helm/defaults/main.yml b/roles/kubernetes-apps/helm/defaults/main.yml index b1b2dfca9..bb7ca244e 100644 --- a/roles/kubernetes-apps/helm/defaults/main.yml +++ b/roles/kubernetes-apps/helm/defaults/main.yml @@ -1,3 +1,4 @@ +--- helm_enabled: false # specify a dir and attach it to helm for HELM_HOME. diff --git a/roles/kubernetes-apps/helm/meta/main.yml b/roles/kubernetes-apps/helm/meta/main.yml index 805439250..5092ec83b 100644 --- a/roles/kubernetes-apps/helm/meta/main.yml +++ b/roles/kubernetes-apps/helm/meta/main.yml @@ -1,3 +1,4 @@ +--- dependencies: - role: download file: "{{ downloads.helm }}" diff --git a/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml b/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml index 0ac9341ee..0c8db4c78 100644 --- a/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml +++ b/roles/kubernetes-apps/helm/templates/tiller-clusterrolebinding.yml @@ -1,3 +1,4 @@ +--- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: diff --git a/roles/kubernetes-apps/helm/templates/tiller-sa.yml b/roles/kubernetes-apps/helm/templates/tiller-sa.yml index c840f57f8..26e575fb6 100644 --- a/roles/kubernetes-apps/helm/templates/tiller-sa.yml +++ b/roles/kubernetes-apps/helm/templates/tiller-sa.yml @@ -1,3 +1,4 @@ +--- apiVersion: v1 kind: ServiceAccount metadata: diff --git a/roles/kubernetes-apps/meta/main.yml b/roles/kubernetes-apps/meta/main.yml index c2dd39d73..9652e1a96 100644 --- a/roles/kubernetes-apps/meta/main.yml +++ b/roles/kubernetes-apps/meta/main.yml @@ -1,3 +1,4 @@ +--- dependencies: - role: download file: "{{ downloads.netcheck_server }}" diff --git a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml index f5ffc4393..a65a86c43 100644 --- a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml @@ -1,3 +1,4 @@ +--- - name: Create canal ConfigMap run_once: true kube: @@ -7,7 +8,7 @@ resource: "configmap" namespace: "{{system_namespace}}" -#FIXME: remove if kubernetes/features#124 is implemented +# FIXME: remove if kubernetes/features#124 is implemented - name: Purge old flannel and canal-node run_once: true kube: @@ -29,4 +30,3 @@ namespace: "{{system_namespace}}" state: "{{ item | ternary('latest','present') }}" with_items: "{{ canal_node_manifest.changed }}" - diff --git a/roles/kubernetes-apps/network_plugin/meta/main.yml b/roles/kubernetes-apps/network_plugin/meta/main.yml index 43382f2ae..4559d25c6 100644 --- a/roles/kubernetes-apps/network_plugin/meta/main.yml +++ b/roles/kubernetes-apps/network_plugin/meta/main.yml @@ -1,8 +1,8 @@ --- dependencies: - - role: kubernetes-apps/network_plugin/canal - when: kube_network_plugin == 'canal' - tags: canal - - role: kubernetes-apps/network_plugin/weave - when: kube_network_plugin == 'weave' - tags: weave + - role: kubernetes-apps/network_plugin/canal + when: kube_network_plugin == 'canal' + tags: canal + - role: kubernetes-apps/network_plugin/weave + when: kube_network_plugin == 'weave' + tags: weave diff --git a/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml b/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml index 232f2d781..c25702b44 100644 --- a/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml @@ -1,4 +1,5 @@ -#FIXME: remove if kubernetes/features#124 is implemented +--- +# FIXME: remove if kubernetes/features#124 is implemented - name: Weave | Purge old weave daemonset kube: name: "weave-net" @@ -9,7 +10,6 @@ state: absent when: inventory_hostname == groups['kube-master'][0] and weave_manifest.changed - - name: Weave | Start Resources kube: name: "weave-net" @@ -21,7 +21,6 @@ with_items: "{{ weave_manifest.changed }}" when: inventory_hostname == groups['kube-master'][0] - - name: "Weave | wait for weave to become available" uri: url: http://127.0.0.1:6784/status diff --git a/roles/kubernetes-apps/policy_controller/calico/defaults/main.yml b/roles/kubernetes-apps/policy_controller/calico/defaults/main.yml index 7a4db0ea8..93d12c901 100644 --- a/roles/kubernetes-apps/policy_controller/calico/defaults/main.yml +++ b/roles/kubernetes-apps/policy_controller/calico/defaults/main.yml @@ -1,3 +1,4 @@ +--- # Limits for calico apps calico_policy_controller_cpu_limit: 100m calico_policy_controller_memory_limit: 256M diff --git a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml index 8b4271d6a..de102f31d 100644 --- a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml +++ b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml @@ -1,3 +1,4 @@ +--- - set_fact: calico_cert_dir: "{{ canal_cert_dir }}" when: kube_network_plugin == 'canal' diff --git a/roles/kubernetes/master/defaults/main.yml b/roles/kubernetes/master/defaults/main.yml index 7cfe9cc9a..979622731 100644 --- a/roles/kubernetes/master/defaults/main.yml +++ b/roles/kubernetes/master/defaults/main.yml @@ -1,3 +1,4 @@ +--- # An experimental dev/test only dynamic volumes provisioner, # for PetSets. Works for kube>=v1.3 only. kube_hostpath_dynamic_provisioner: "false" @@ -52,14 +53,14 @@ kube_oidc_auth: false ## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ ## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) -#kube_oidc_url: https:// ... +# kube_oidc_url: https:// ... # kube_oidc_client_id: kubernetes ## Optional settings for OIDC # kube_oidc_ca_file: {{ kube_cert_dir }}/ca.pem # kube_oidc_username_claim: sub # kube_oidc_groups_claim: groups -##Variables for custom flags +## Variables for custom flags apiserver_custom_flags: [] controller_mgr_custom_flags: [] diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml index 6922e6a51..24a3a495a 100644 --- a/roles/kubernetes/master/tasks/main.yml +++ b/roles/kubernetes/master/tasks/main.yml @@ -88,4 +88,3 @@ - include: post-upgrade.yml tags: k8s-post-upgrade - diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml index 6e2ff835f..940bdfff4 100644 --- a/roles/kubernetes/node/defaults/main.yml +++ b/roles/kubernetes/node/defaults/main.yml @@ -1,3 +1,4 @@ +--- # Valid options: docker (default), rkt, or host kubelet_deployment_type: host @@ -49,7 +50,7 @@ kube_apiserver_node_port_range: "30000-32767" kubelet_load_modules: false -##Support custom flags to be passed to kubelet +## Support custom flags to be passed to kubelet kubelet_custom_flags: [] # This setting is used for rkt based kubelet for deploying hyperkube diff --git a/roles/kubernetes/node/tasks/install.yml b/roles/kubernetes/node/tasks/install.yml index ad4cbacf1..692f8247c 100644 --- a/roles/kubernetes/node/tasks/install.yml +++ b/roles/kubernetes/node/tasks/install.yml @@ -21,4 +21,3 @@ dest: "/etc/systemd/system/kubelet.service" backup: "yes" notify: restart kubelet - diff --git a/roles/kubernetes/node/tasks/install_rkt.yml b/roles/kubernetes/node/tasks/install_rkt.yml index 68e90860c..d19b099bd 100644 --- a/roles/kubernetes/node/tasks/install_rkt.yml +++ b/roles/kubernetes/node/tasks/install_rkt.yml @@ -20,8 +20,8 @@ path: /var/lib/kubelet - name: Create kubelet service systemd directory - file: - path: /etc/systemd/system/kubelet.service.d + file: + path: /etc/systemd/system/kubelet.service.d state: directory - name: Write kubelet proxy drop-in @@ -30,4 +30,3 @@ dest: /etc/systemd/system/kubelet.service.d/http-proxy.conf when: http_proxy is defined or https_proxy is defined or no_proxy is defined notify: restart kubelet - diff --git a/roles/kubernetes/preinstall/handlers/main.yml b/roles/kubernetes/preinstall/handlers/main.yml index 35fec7d94..dab1bf7de 100644 --- a/roles/kubernetes/preinstall/handlers/main.yml +++ b/roles/kubernetes/preinstall/handlers/main.yml @@ -1,3 +1,4 @@ +--- - name: Preinstall | restart network command: /bin/true notify: diff --git a/roles/kubernetes/preinstall/tasks/azure-credential-check.yml b/roles/kubernetes/preinstall/tasks/azure-credential-check.yml index ca50d5843..fa2d82fd2 100644 --- a/roles/kubernetes/preinstall/tasks/azure-credential-check.yml +++ b/roles/kubernetes/preinstall/tasks/azure-credential-check.yml @@ -48,5 +48,3 @@ fail: msg: "azure_route_table_name is missing" when: azure_route_table_name is not defined or azure_route_table_name == "" - - diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml index e3f27192f..b6a246684 100644 --- a/roles/kubernetes/preinstall/tasks/main.yml +++ b/roles/kubernetes/preinstall/tasks/main.yml @@ -1,6 +1,6 @@ --- - include: pre-upgrade.yml - tags: [upgrade, bootstrap-os] + tags: [upgrade, bootstrap-os] - name: Force binaries directory for Container Linux by CoreOS set_fact: @@ -27,14 +27,14 @@ include_vars: "{{ item }}" with_first_found: - files: - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}.yml" - - "{{ ansible_os_family|lower }}.yml" - - defaults.yml + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}.yml" + - "{{ ansible_os_family|lower }}.yml" + - defaults.yml paths: - - ../vars + - ../vars skip: true tags: facts diff --git a/roles/kubernetes/preinstall/tasks/vsphere-credential-check.yml b/roles/kubernetes/preinstall/tasks/vsphere-credential-check.yml index b91726d50..9beeb6b50 100644 --- a/roles/kubernetes/preinstall/tasks/vsphere-credential-check.yml +++ b/roles/kubernetes/preinstall/tasks/vsphere-credential-check.yml @@ -1,3 +1,4 @@ +--- - name: check vsphere environment variables fail: msg: "{{ item.name }} is missing" diff --git a/roles/kubernetes/preinstall/vars/centos.yml b/roles/kubernetes/preinstall/vars/centos.yml index c1be4b9b3..b2fbcd80a 100644 --- a/roles/kubernetes/preinstall/vars/centos.yml +++ b/roles/kubernetes/preinstall/vars/centos.yml @@ -1,3 +1,4 @@ +--- required_pkgs: - libselinux-python - device-mapper-libs diff --git a/roles/kubernetes/preinstall/vars/debian.yml b/roles/kubernetes/preinstall/vars/debian.yml index 596d2ac8b..dfcb0bc34 100644 --- a/roles/kubernetes/preinstall/vars/debian.yml +++ b/roles/kubernetes/preinstall/vars/debian.yml @@ -1,3 +1,4 @@ +--- required_pkgs: - python-apt - aufs-tools diff --git a/roles/kubernetes/preinstall/vars/fedora.yml b/roles/kubernetes/preinstall/vars/fedora.yml index c1be4b9b3..b2fbcd80a 100644 --- a/roles/kubernetes/preinstall/vars/fedora.yml +++ b/roles/kubernetes/preinstall/vars/fedora.yml @@ -1,3 +1,4 @@ +--- required_pkgs: - libselinux-python - device-mapper-libs diff --git a/roles/kubernetes/preinstall/vars/redhat.yml b/roles/kubernetes/preinstall/vars/redhat.yml index c1be4b9b3..b2fbcd80a 100644 --- a/roles/kubernetes/preinstall/vars/redhat.yml +++ b/roles/kubernetes/preinstall/vars/redhat.yml @@ -1,3 +1,4 @@ +--- required_pkgs: - libselinux-python - device-mapper-libs diff --git a/roles/kubernetes/secrets/tasks/check-certs.yml b/roles/kubernetes/secrets/tasks/check-certs.yml index 69b82d957..3870a3e96 100644 --- a/roles/kubernetes/secrets/tasks/check-certs.yml +++ b/roles/kubernetes/secrets/tasks/check-certs.yml @@ -105,4 +105,3 @@ {%- set _ = certs.update({'sync': True}) -%} {% endif %} {{ certs.sync }} - diff --git a/roles/kubernetes/secrets/tasks/gen_certs_script.yml b/roles/kubernetes/secrets/tasks/gen_certs_script.yml index 80fb4a506..41d91362b 100644 --- a/roles/kubernetes/secrets/tasks/gen_certs_script.yml +++ b/roles/kubernetes/secrets/tasks/gen_certs_script.yml @@ -56,26 +56,25 @@ - set_fact: all_master_certs: "['ca-key.pem', + 'apiserver.pem', + 'apiserver-key.pem', + 'kube-scheduler.pem', + 'kube-scheduler-key.pem', + 'kube-controller-manager.pem', + 'kube-controller-manager-key.pem', + {% for node in groups['kube-master'] %} + 'admin-{{ node }}.pem', + 'admin-{{ node }}-key.pem', + {% endfor %}]" + my_master_certs: ['ca-key.pem', + 'admin-{{ inventory_hostname }}.pem', + 'admin-{{ inventory_hostname }}-key.pem', 'apiserver.pem', 'apiserver-key.pem', 'kube-scheduler.pem', 'kube-scheduler-key.pem', 'kube-controller-manager.pem', - 'kube-controller-manager-key.pem', - {% for node in groups['kube-master'] %} - 'admin-{{ node }}.pem', - 'admin-{{ node }}-key.pem', - {% endfor %}]" - my_master_certs: ['ca-key.pem', - 'admin-{{ inventory_hostname }}.pem', - 'admin-{{ inventory_hostname }}-key.pem', - 'apiserver.pem', - 'apiserver-key.pem', - 'kube-scheduler.pem', - 'kube-scheduler-key.pem', - 'kube-controller-manager.pem', - 'kube-controller-manager-key.pem', - ] + 'kube-controller-manager-key.pem'] all_node_certs: "['ca.pem', {% for node in groups['k8s-cluster'] %} 'node-{{ node }}.pem', @@ -84,11 +83,10 @@ 'kube-proxy-{{ node }}-key.pem', {% endfor %}]" my_node_certs: ['ca.pem', - 'node-{{ inventory_hostname }}.pem', - 'node-{{ inventory_hostname }}-key.pem', - 'kube-proxy-{{ inventory_hostname }}.pem', - 'kube-proxy-{{ inventory_hostname }}-key.pem', - ] + 'node-{{ inventory_hostname }}.pem', + 'node-{{ inventory_hostname }}-key.pem', + 'kube-proxy-{{ inventory_hostname }}.pem', + 'kube-proxy-{{ inventory_hostname }}-key.pem'] tags: facts - name: Gen_certs | Gather master certs @@ -114,10 +112,10 @@ sync_certs|default(false) and inventory_hostname != groups['kube-master'][0] -#NOTE(mattymo): Use temporary file to copy master certs because we have a ~200k -#char limit when using shell command +# NOTE(mattymo): Use temporary file to copy master certs because we have a ~200k +# char limit when using shell command -#FIXME(mattymo): Use tempfile module in ansible 2.3 +# FIXME(mattymo): Use tempfile module in ansible 2.3 - name: Gen_certs | Prepare tempfile for unpacking certs shell: mktemp /tmp/certsXXXXX.tar.gz register: cert_tempfile @@ -195,4 +193,3 @@ - name: Gen_certs | update ca-certificates (RedHat) command: update-ca-trust extract when: kube_ca_cert.changed and ansible_os_family == "RedHat" - diff --git a/roles/kubernetes/secrets/tasks/gen_certs_vault.yml b/roles/kubernetes/secrets/tasks/gen_certs_vault.yml index e516db0f2..308ac9260 100644 --- a/roles/kubernetes/secrets/tasks/gen_certs_vault.yml +++ b/roles/kubernetes/secrets/tasks/gen_certs_vault.yml @@ -33,9 +33,9 @@ - name: gen_certs_vault | Set fact for Vault API token set_fact: kube_vault_headers: - Accept: application/json - Content-Type: application/json - X-Vault-Token: "{{ kube_vault_login_result.get('json',{}).get('auth', {}).get('client_token') }}" + Accept: application/json + Content-Type: application/json + X-Vault-Token: "{{ kube_vault_login_result.get('json',{}).get('auth', {}).get('client_token') }}" run_once: true # Issue certs to kube-master nodes diff --git a/roles/kubernetes/secrets/tasks/sync_kube_node_certs.yml b/roles/kubernetes/secrets/tasks/sync_kube_node_certs.yml index b97b85e17..7aafab5c8 100644 --- a/roles/kubernetes/secrets/tasks/sync_kube_node_certs.yml +++ b/roles/kubernetes/secrets/tasks/sync_kube_node_certs.yml @@ -6,7 +6,7 @@ with_items: "{{ groups['k8s-cluster'] }}" - include: ../../../vault/tasks/shared/sync_file.yml - vars: + vars: sync_file: "{{ item }}" sync_file_dir: "{{ kube_cert_dir }}" sync_file_group: "{{ kube_cert_group }}" @@ -26,7 +26,7 @@ sync_file_results: [] - include: ../../../vault/tasks/shared/sync_file.yml - vars: + vars: sync_file: ca.pem sync_file_dir: "{{ kube_cert_dir }}" sync_file_group: "{{ kube_cert_group }}" diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index c2152814f..03b05c5bd 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -1,3 +1,4 @@ +--- ## Required for bootstrap-os/preinstall/download roles and setting facts # Valid bootstrap options (required): ubuntu, coreos, centos, none bootstrap_os: none @@ -88,8 +89,10 @@ kube_network_node_prefix: 24 # The port the API Server will be listening on. kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" -kube_apiserver_port: 6443 # (https) -kube_apiserver_insecure_port: 8080 # (http) +# https +kube_apiserver_port: 6443 +# http +kube_apiserver_insecure_port: 8080 # Path used to store Docker data docker_daemon_graph: "/var/lib/docker" diff --git a/roles/kubespray-defaults/tasks/main.yaml b/roles/kubespray-defaults/tasks/main.yaml index 5b2cb96a0..11b9e3653 100644 --- a/roles/kubespray-defaults/tasks/main.yaml +++ b/roles/kubespray-defaults/tasks/main.yaml @@ -1,3 +1,4 @@ +--- - name: Configure defaults debug: msg: "Check roles/kubespray-defaults/defaults/main.yml" diff --git a/roles/network_plugin/calico/handlers/main.yml b/roles/network_plugin/calico/handlers/main.yml index 78dad7505..05cc73289 100644 --- a/roles/network_plugin/calico/handlers/main.yml +++ b/roles/network_plugin/calico/handlers/main.yml @@ -5,7 +5,7 @@ - Calico | reload systemd - Calico | reload calico-node -- name : Calico | reload systemd +- name: Calico | reload systemd shell: systemctl daemon-reload - name: Calico | reload calico-node diff --git a/roles/network_plugin/calico/rr/handlers/main.yml b/roles/network_plugin/calico/rr/handlers/main.yml index efd0e12ac..cb166bda1 100644 --- a/roles/network_plugin/calico/rr/handlers/main.yml +++ b/roles/network_plugin/calico/rr/handlers/main.yml @@ -5,7 +5,7 @@ - Calico-rr | reload systemd - Calico-rr | reload calico-rr -- name : Calico-rr | reload systemd +- name: Calico-rr | reload systemd shell: systemctl daemon-reload - name: Calico-rr | reload calico-rr diff --git a/roles/network_plugin/calico/rr/meta/main.yml b/roles/network_plugin/calico/rr/meta/main.yml index 55104953e..511b89744 100644 --- a/roles/network_plugin/calico/rr/meta/main.yml +++ b/roles/network_plugin/calico/rr/meta/main.yml @@ -1,3 +1,4 @@ +--- dependencies: - role: etcd - role: docker diff --git a/roles/network_plugin/canal/defaults/main.yml b/roles/network_plugin/canal/defaults/main.yml index d4018db4d..38696b87a 100644 --- a/roles/network_plugin/canal/defaults/main.yml +++ b/roles/network_plugin/canal/defaults/main.yml @@ -1,3 +1,4 @@ +--- # The interface used by canal for host <-> host communication. # If left blank, then the interface is chosing using the node's # default route. @@ -30,4 +31,3 @@ calicoctl_memory_limit: 170M calicoctl_cpu_limit: 100m calicoctl_memory_requests: 32M calicoctl_cpu_requests: 25m - diff --git a/roles/network_plugin/cloud/tasks/main.yml b/roles/network_plugin/cloud/tasks/main.yml index 36fa8e57d..7b6650372 100644 --- a/roles/network_plugin/cloud/tasks/main.yml +++ b/roles/network_plugin/cloud/tasks/main.yml @@ -14,4 +14,3 @@ owner: kube recurse: true mode: "u=rwX,g-rwx,o-rwx" - diff --git a/roles/network_plugin/flannel/handlers/main.yml b/roles/network_plugin/flannel/handlers/main.yml index bd4058976..3726c900e 100644 --- a/roles/network_plugin/flannel/handlers/main.yml +++ b/roles/network_plugin/flannel/handlers/main.yml @@ -18,7 +18,7 @@ - Flannel | pause while Docker restarts - Flannel | wait for docker -- name : Flannel | reload systemd +- name: Flannel | reload systemd shell: systemctl daemon-reload - name: Flannel | reload docker.socket diff --git a/roles/network_plugin/flannel/templates/flannel-pod.yml b/roles/network_plugin/flannel/templates/flannel-pod.yml index 92ecada69..5ca78ae1d 100644 --- a/roles/network_plugin/flannel/templates/flannel-pod.yml +++ b/roles/network_plugin/flannel/templates/flannel-pod.yml @@ -1,44 +1,44 @@ --- - kind: "Pod" - apiVersion: "v1" - metadata: - name: "flannel" - namespace: "{{system_namespace}}" - labels: - app: "flannel" - version: "v0.1" - spec: - volumes: - - name: "subnetenv" - hostPath: - path: "/run/flannel" - - name: "etcd-certs" - hostPath: - path: "{{ flannel_cert_dir }}" - containers: - - name: "flannel-container" - image: "{{ flannel_image_repo }}:{{ flannel_image_tag }}" - imagePullPolicy: {{ k8s_image_pull_policy }} - resources: - limits: - cpu: {{ flannel_cpu_limit }} - memory: {{ flannel_memory_limit }} - requests: - cpu: {{ flannel_cpu_requests }} - memory: {{ flannel_memory_requests }} - command: - - "/bin/sh" - - "-c" - - "/opt/bin/flanneld -etcd-endpoints {{ etcd_access_endpoint }} -etcd-prefix /{{ cluster_name }}/network -etcd-cafile {{ flannel_cert_dir }}/ca_cert.crt -etcd-certfile {{ flannel_cert_dir }}/cert.crt -etcd-keyfile {{ flannel_cert_dir }}/key.pem {% if flannel_interface is defined %}-iface {{ flannel_interface }}{% endif %} {% if flannel_public_ip is defined %}-public-ip {{ flannel_public_ip }}{% endif %}" - ports: - - hostPort: 10253 - containerPort: 10253 - volumeMounts: - - name: "subnetenv" - mountPath: "/run/flannel" - - name: "etcd-certs" - mountPath: "{{ flannel_cert_dir }}" - readOnly: true - securityContext: - privileged: true - hostNetwork: true +kind: "Pod" +apiVersion: "v1" +metadata: + name: "flannel" + namespace: "{{system_namespace}}" + labels: + app: "flannel" + version: "v0.1" +spec: + volumes: + - name: "subnetenv" + hostPath: + path: "/run/flannel" + - name: "etcd-certs" + hostPath: + path: "{{ flannel_cert_dir }}" + containers: + - name: "flannel-container" + image: "{{ flannel_image_repo }}:{{ flannel_image_tag }}" + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + limits: + cpu: {{ flannel_cpu_limit }} + memory: {{ flannel_memory_limit }} + requests: + cpu: {{ flannel_cpu_requests }} + memory: {{ flannel_memory_requests }} + command: + - "/bin/sh" + - "-c" + - "/opt/bin/flanneld -etcd-endpoints {{ etcd_access_endpoint }} -etcd-prefix /{{ cluster_name }}/network -etcd-cafile {{ flannel_cert_dir }}/ca_cert.crt -etcd-certfile {{ flannel_cert_dir }}/cert.crt -etcd-keyfile {{ flannel_cert_dir }}/key.pem {% if flannel_interface is defined %}-iface {{ flannel_interface }}{% endif %} {% if flannel_public_ip is defined %}-public-ip {{ flannel_public_ip }}{% endif %}" + ports: + - hostPort: 10253 + containerPort: 10253 + volumeMounts: + - name: "subnetenv" + mountPath: "/run/flannel" + - name: "etcd-certs" + mountPath: "{{ flannel_cert_dir }}" + readOnly: true + securityContext: + privileged: true + hostNetwork: true diff --git a/roles/network_plugin/meta/main.yml b/roles/network_plugin/meta/main.yml index a1c970efe..d9834a3cd 100644 --- a/roles/network_plugin/meta/main.yml +++ b/roles/network_plugin/meta/main.yml @@ -1,16 +1,16 @@ --- dependencies: - - role: network_plugin/calico - when: kube_network_plugin == 'calico' - tags: calico - - role: network_plugin/flannel - when: kube_network_plugin == 'flannel' - tags: flannel - - role: network_plugin/weave - when: kube_network_plugin == 'weave' - tags: weave - - role: network_plugin/canal - when: kube_network_plugin == 'canal' - tags: canal - - role: network_plugin/cloud - when: kube_network_plugin == 'cloud' + - role: network_plugin/calico + when: kube_network_plugin == 'calico' + tags: calico + - role: network_plugin/flannel + when: kube_network_plugin == 'flannel' + tags: flannel + - role: network_plugin/weave + when: kube_network_plugin == 'weave' + tags: weave + - role: network_plugin/canal + when: kube_network_plugin == 'canal' + tags: canal + - role: network_plugin/cloud + when: kube_network_plugin == 'cloud' diff --git a/roles/network_plugin/weave/tasks/pre-upgrade.yml b/roles/network_plugin/weave/tasks/pre-upgrade.yml index 0b10a7551..bcf3c2af2 100644 --- a/roles/network_plugin/weave/tasks/pre-upgrade.yml +++ b/roles/network_plugin/weave/tasks/pre-upgrade.yml @@ -1,3 +1,4 @@ +--- - name: Weave pre-upgrade | Stop legacy weave command: weave stop failed_when: false diff --git a/roles/rkt/tasks/install.yml b/roles/rkt/tasks/install.yml index 76719eebb..0cc8f8898 100644 --- a/roles/rkt/tasks/install.yml +++ b/roles/rkt/tasks/install.yml @@ -3,14 +3,14 @@ include_vars: "{{ item }}" with_first_found: - files: - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" - - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" - - "{{ ansible_distribution|lower }}.yml" - - "{{ ansible_os_family|lower }}.yml" - - defaults.yml + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" + - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" + - "{{ ansible_distribution|lower }}.yml" + - "{{ ansible_os_family|lower }}.yml" + - defaults.yml paths: - - ../vars + - ../vars skip: true tags: facts diff --git a/roles/upgrade/post-upgrade/tasks/main.yml b/roles/upgrade/post-upgrade/tasks/main.yml index e7efa0601..ec6fdcf90 100644 --- a/roles/upgrade/post-upgrade/tasks/main.yml +++ b/roles/upgrade/post-upgrade/tasks/main.yml @@ -1,7 +1,5 @@ --- - - name: Uncordon node command: "{{ bin_dir }}/kubectl uncordon {{ inventory_hostname }}" delegate_to: "{{ groups['kube-master'][0] }}" when: (needs_cordoning|default(false)) and ( {%- if inventory_hostname in groups['kube-node'] -%} true {%- else -%} false {%- endif -%} ) - diff --git a/roles/upgrade/pre-upgrade/defaults/main.yml b/roles/upgrade/pre-upgrade/defaults/main.yml index c87b7e9ea..89334f87c 100644 --- a/roles/upgrade/pre-upgrade/defaults/main.yml +++ b/roles/upgrade/pre-upgrade/defaults/main.yml @@ -1,3 +1,3 @@ +--- drain_grace_period: 90 drain_timeout: 120s - diff --git a/roles/vault/defaults/main.yml b/roles/vault/defaults/main.yml index 7e14374bf..47bb39d44 100644 --- a/roles/vault/defaults/main.yml +++ b/roles/vault/defaults/main.yml @@ -63,7 +63,7 @@ vault_needs_gen: false vault_port: 8200 # Although "cert" is an option, ansible has no way to auth via cert until # upstream merges: https://github.com/ansible/ansible/pull/18141 -vault_role_auth_method: userpass +vault_role_auth_method: userpass vault_roles: - name: etcd group: etcd diff --git a/roles/vault/tasks/bootstrap/create_etcd_role.yml b/roles/vault/tasks/bootstrap/create_etcd_role.yml index 57518f944..5e0b88a39 100644 --- a/roles/vault/tasks/bootstrap/create_etcd_role.yml +++ b/roles/vault/tasks/bootstrap/create_etcd_role.yml @@ -1,8 +1,7 @@ --- - - include: ../shared/create_role.yml vars: - create_role_name: "{{ item.name }}" + create_role_name: "{{ item.name }}" create_role_group: "{{ item.group }}" create_role_policy_rules: "{{ item.policy_rules }}" create_role_options: "{{ item.role_options }}" diff --git a/roles/vault/tasks/bootstrap/start_vault_temp.yml b/roles/vault/tasks/bootstrap/start_vault_temp.yml index 4a5e6bc5e..49585a5d9 100644 --- a/roles/vault/tasks/bootstrap/start_vault_temp.yml +++ b/roles/vault/tasks/bootstrap/start_vault_temp.yml @@ -1,5 +1,4 @@ --- - - name: bootstrap/start_vault_temp | Ensure vault-temp isn't already running shell: if docker rm -f {{ vault_temp_container_name }} 2>&1 1>/dev/null;then echo true;else echo false;fi register: vault_temp_stop_check @@ -13,7 +12,7 @@ -v /etc/vault:/etc/vault {{ vault_image_repo }}:{{ vault_version }} server -#FIXME(mattymo): Crashes on first start with aufs docker storage. See hashicorp/docker-vault#19 +# FIXME(mattymo): Crashes on first start with aufs docker storage. See hashicorp/docker-vault#19 - name: bootstrap/start_vault_temp | Start again single node Vault with file backend command: docker start {{ vault_temp_container_name }} diff --git a/roles/vault/tasks/bootstrap/sync_vault_certs.yml b/roles/vault/tasks/bootstrap/sync_vault_certs.yml index ab088753f..9e6eff05c 100644 --- a/roles/vault/tasks/bootstrap/sync_vault_certs.yml +++ b/roles/vault/tasks/bootstrap/sync_vault_certs.yml @@ -1,5 +1,4 @@ --- - - include: ../shared/sync_file.yml vars: sync_file: "ca.pem" @@ -29,4 +28,3 @@ - name: bootstrap/sync_vault_certs | Unset sync_file_results after api.pem sync set_fact: sync_file_results: [] - diff --git a/roles/vault/tasks/cluster/main.yml b/roles/vault/tasks/cluster/main.yml index db97dd078..c21fd0d73 100644 --- a/roles/vault/tasks/cluster/main.yml +++ b/roles/vault/tasks/cluster/main.yml @@ -1,5 +1,4 @@ --- - - include: ../shared/check_vault.yml when: inventory_hostname in groups.vault @@ -26,7 +25,7 @@ - include: ../shared/find_leader.yml when: inventory_hostname in groups.vault -- include: ../shared/pki_mount.yml +- include: ../shared/pki_mount.yml when: inventory_hostname == groups.vault|first - include: ../shared/config_ca.yml diff --git a/roles/vault/tasks/shared/auth_backend.yml b/roles/vault/tasks/shared/auth_backend.yml index ad5b191c9..82a4c94fb 100644 --- a/roles/vault/tasks/shared/auth_backend.yml +++ b/roles/vault/tasks/shared/auth_backend.yml @@ -1,11 +1,10 @@ --- - - name: shared/auth_backend | Test if the auth backend exists uri: url: "{{ vault_leader_url }}/v1/sys/auth/{{ auth_backend_path }}/tune" headers: "{{ vault_headers }}" validate_certs: false - ignore_errors: true + ignore_errors: true register: vault_auth_backend_check - name: shared/auth_backend | Add the cert auth backend if needed diff --git a/roles/vault/tasks/shared/check_vault.yml b/roles/vault/tasks/shared/check_vault.yml index 257843d95..83328768a 100644 --- a/roles/vault/tasks/shared/check_vault.yml +++ b/roles/vault/tasks/shared/check_vault.yml @@ -1,5 +1,4 @@ --- - # Stop temporary Vault if it's running (can linger if playbook fails out) - name: stop vault-temp container shell: docker stop {{ vault_temp_container_name }} || rkt stop {{ vault_temp_container_name }} @@ -22,8 +21,8 @@ vault_is_running: "{{ vault_local_service_health|succeeded }}" vault_is_initialized: "{{ vault_local_service_health.get('json', {}).get('initialized', false) }}" vault_is_sealed: "{{ vault_local_service_health.get('json', {}).get('sealed', true) }}" - #vault_in_standby: "{{ vault_local_service_health.get('json', {}).get('standby', true) }}" - #vault_run_version: "{{ vault_local_service_health.get('json', {}).get('version', '') }}" + # vault_in_standby: "{{ vault_local_service_health.get('json', {}).get('standby', true) }}" + # vault_run_version: "{{ vault_local_service_health.get('json', {}).get('version', '') }}" - name: check_vault | Set fact about the Vault cluster's initialization state set_fact: diff --git a/roles/vault/tasks/shared/find_leader.yml b/roles/vault/tasks/shared/find_leader.yml index 1aaa8513e..3afee482d 100644 --- a/roles/vault/tasks/shared/find_leader.yml +++ b/roles/vault/tasks/shared/find_leader.yml @@ -15,7 +15,7 @@ vault_leader_url: "{{ vault_config.listener.tcp.tls_disable|d()|ternary('http', 'https') }}://{{ item }}:{{ vault_port }}" with_items: "{{ groups.vault }}" when: "hostvars[item]['vault_leader_check'].get('status') in [200,503]" - #run_once: true + # run_once: true - name: find_leader| show vault_leader_url debug: var=vault_leader_url verbosity=2 diff --git a/roles/vault/tasks/shared/gen_userpass.yml b/roles/vault/tasks/shared/gen_userpass.yml index ab3d171b8..4ef301171 100644 --- a/roles/vault/tasks/shared/gen_userpass.yml +++ b/roles/vault/tasks/shared/gen_userpass.yml @@ -22,7 +22,7 @@ - name: shared/gen_userpass | Copy credentials to all hosts in the group copy: content: > - {{ + {{ {'username': gen_userpass_username, 'password': gen_userpass_password} | to_nice_json(indent=4) }} diff --git a/roles/vault/tasks/shared/issue_cert.yml b/roles/vault/tasks/shared/issue_cert.yml index 4854e8b9e..3b6b6d315 100644 --- a/roles/vault/tasks/shared/issue_cert.yml +++ b/roles/vault/tasks/shared/issue_cert.yml @@ -26,7 +26,7 @@ - name: issue_cert | Ensure target directory exists file: - path: "{{ issue_cert_path | dirname }}" + path: "{{ issue_cert_path | dirname }}" state: directory group: "{{ issue_cert_file_group | d('root' )}}" mode: "{{ issue_cert_dir_mode | d('0755') }}" diff --git a/tests/requirements.txt b/tests/requirements.txt new file mode 100644 index 000000000..77b7f5868 --- /dev/null +++ b/tests/requirements.txt @@ -0,0 +1,5 @@ +-r ../requirements.txt +yamllint +apache-libcloud==0.20.1 +boto==2.9.0 +tox From 6bb3463e7c743b901ab4bd44643137a85e1e5014 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Thu, 24 Aug 2017 10:04:25 +0100 Subject: [PATCH 07/64] Enable scheduling of critical pods and network plugins on master Added toleration to DNS, netchecker, fluentd, canal, and calico policy. Also small fixes to make yamllint pass. --- .../dnsmasq/templates/dnsmasq-autoscaler.yml | 35 ++++++++++--------- roles/dnsmasq/templates/dnsmasq-deploy.yml | 3 ++ .../templates/kubedns-autoscaler.yml.j2 | 6 +++- .../ansible/templates/kubedns-deploy.yml.j2 | 2 ++ .../templates/netchecker-agent-ds.yml.j2 | 3 ++ .../netchecker-agent-hostnet-ds.yml.j2 | 3 ++ .../efk/fluentd/templates/fluentd-ds.yml.j2 | 3 ++ .../templates/calico-policy-controller.yml.j2 | 3 ++ .../canal/templates/canal-node.yml.j2 | 3 ++ .../flannel/templates/flannel-pod.yml | 3 ++ .../weave/templates/weave-net.yml.j2 | 2 +- 11 files changed, 48 insertions(+), 18 deletions(-) diff --git a/roles/dnsmasq/templates/dnsmasq-autoscaler.yml b/roles/dnsmasq/templates/dnsmasq-autoscaler.yml index aff99f08d..85b357950 100644 --- a/roles/dnsmasq/templates/dnsmasq-autoscaler.yml +++ b/roles/dnsmasq/templates/dnsmasq-autoscaler.yml @@ -31,20 +31,23 @@ spec: scheduler.alpha.kubernetes.io/critical-pod: '' scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' spec: + tolerations: + - effect: NoSchedule + operator: Exists containers: - - name: autoscaler - image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1 - resources: - requests: - cpu: "20m" - memory: "10Mi" - command: - - /cluster-proportional-autoscaler - - --namespace=kube-system - - --configmap=dnsmasq-autoscaler - - --target=Deployment/dnsmasq - # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. - # If using small nodes, "nodesPerReplica" should dominate. - - --default-params={"linear":{"nodesPerReplica":{{ dnsmasq_nodes_per_replica }},"preventSinglePointFailure":true}} - - --logtostderr=true - - --v={{ kube_log_level }} + - name: autoscaler + image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1 + resources: + requests: + cpu: "20m" + memory: "10Mi" + command: + - /cluster-proportional-autoscaler + - --namespace=kube-system + - --configmap=dnsmasq-autoscaler + - --target=Deployment/dnsmasq + # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. + # If using small nodes, "nodesPerReplica" should dominate. + - --default-params={"linear":{"nodesPerReplica":{{ dnsmasq_nodes_per_replica }},"preventSinglePointFailure":true}} + - --logtostderr=true + - --v={{ kube_log_level }} diff --git a/roles/dnsmasq/templates/dnsmasq-deploy.yml b/roles/dnsmasq/templates/dnsmasq-deploy.yml index 6f11363b3..94b15206b 100644 --- a/roles/dnsmasq/templates/dnsmasq-deploy.yml +++ b/roles/dnsmasq/templates/dnsmasq-deploy.yml @@ -21,6 +21,9 @@ spec: kubernetes.io/cluster-service: "true" kubespray/dnsmasq-checksum: "{{ dnsmasq_stat.stat.checksum }}" spec: + tolerations: + - effect: NoSchedule + operator: Exists containers: - name: dnsmasq image: "{{ dnsmasq_image_repo }}:{{ dnsmasq_image_tag }}" diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 index 04f93fd84..fb87d5a50 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 @@ -29,11 +29,15 @@ spec: k8s-app: kubedns-autoscaler annotations: scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' spec: containers: - name: autoscaler image: "{{ kubednsautoscaler_image_repo }}:{{ kubednsautoscaler_image_tag }}" + tolerations: + - effect: NoSchedule + operator: Exists + - effect: CriticalAddonsOnly + operator: exists resources: requests: cpu: "20m" diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 index 149a16ebd..682bdf491 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-deploy.yml.j2 @@ -30,6 +30,8 @@ spec: tolerations: - key: "CriticalAddonsOnly" operator: "Exists" + - effect: NoSchedule + operator: Exists volumes: - name: kube-dns-config configMap: diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 index df0b8ba90..8b16e0c30 100644 --- a/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 @@ -12,6 +12,9 @@ spec: labels: app: netchecker-agent spec: + tolerations: + - effect: NoSchedule + operator: Exists containers: - name: netchecker-agent image: "{{ agent_img }}" diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 index 10a74da84..6064d8e68 100644 --- a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 @@ -16,6 +16,9 @@ spec: {% if kube_version | version_compare('v1.6', '>=') %} dnsPolicy: ClusterFirstWithHostNet {% endif %} + tolerations: + - effect: NoSchedule + operator: Exists containers: - name: netchecker-agent image: "{{ agent_img }}" diff --git a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 index 77ed3c4ff..838ebf1e6 100644 --- a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 +++ b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 @@ -17,6 +17,9 @@ spec: kubernetes.io/cluster-service: "true" version: "v{{ fluentd_version }}" spec: + tolerations: + - effect: NoSchedule + operator: Exists containers: - name: fluentd-es image: "{{ fluentd_image_repo }}:{{ fluentd_image_tag }}" diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller.yml.j2 index 322d3a37b..4722cbc53 100644 --- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller.yml.j2 +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller.yml.j2 @@ -21,6 +21,9 @@ spec: k8s-app: calico-policy spec: hostNetwork: true + tolerations: + - effect: NoSchedule + operator: Exists containers: - name: calico-policy-controller image: {{ calico_policy_image_repo }}:{{ calico_policy_image_tag }} diff --git a/roles/network_plugin/canal/templates/canal-node.yml.j2 b/roles/network_plugin/canal/templates/canal-node.yml.j2 index 37baf06e0..b749d4d32 100644 --- a/roles/network_plugin/canal/templates/canal-node.yml.j2 +++ b/roles/network_plugin/canal/templates/canal-node.yml.j2 @@ -18,6 +18,9 @@ spec: k8s-app: canal-node spec: hostNetwork: true + tolerations: + - effect: NoSchedule + operator: Exists volumes: # Used by calico/node. - name: lib-modules diff --git a/roles/network_plugin/flannel/templates/flannel-pod.yml b/roles/network_plugin/flannel/templates/flannel-pod.yml index 5ca78ae1d..a6e075b8c 100644 --- a/roles/network_plugin/flannel/templates/flannel-pod.yml +++ b/roles/network_plugin/flannel/templates/flannel-pod.yml @@ -8,6 +8,9 @@ metadata: app: "flannel" version: "v0.1" spec: + tolerations: + - effect: NoSchedule + operator: Exists volumes: - name: "subnetenv" hostPath: diff --git a/roles/network_plugin/weave/templates/weave-net.yml.j2 b/roles/network_plugin/weave/templates/weave-net.yml.j2 index ba1f07929..691b4cf02 100644 --- a/roles/network_plugin/weave/templates/weave-net.yml.j2 +++ b/roles/network_plugin/weave/templates/weave-net.yml.j2 @@ -153,4 +153,4 @@ items: path: /var/lib/dbus - name: lib-modules hostPath: - path: /lib/modules \ No newline at end of file + path: /lib/modules From c22cfa255ba47706288a469a1e9e2e5091674f23 Mon Sep 17 00:00:00 2001 From: Mohamed Mehany <7327188+mohamed-mehany@users.noreply.github.com> Date: Thu, 24 Aug 2017 17:00:45 +0300 Subject: [PATCH 08/64] Added private key file to ssh bastion conf (#1563) * Added private key file to ssh bastion conf * Used regular if condition insted of inline conditional --- roles/bastion-ssh-config/templates/ssh-bastion.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/bastion-ssh-config/templates/ssh-bastion.conf b/roles/bastion-ssh-config/templates/ssh-bastion.conf index ebb380665..a6a5bc592 100644 --- a/roles/bastion-ssh-config/templates/ssh-bastion.conf +++ b/roles/bastion-ssh-config/templates/ssh-bastion.conf @@ -16,6 +16,6 @@ Host {{ bastion_ip }} ControlPersist 5m Host {{ vars['hosts'] }} - ProxyCommand ssh -W %h:%p {{ real_user }}@{{ bastion_ip }} + ProxyCommand ssh -W %h:%p {{ real_user }}@{{ bastion_ip }} {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %} StrictHostKeyChecking no {% endif %} From 3aabba753511f05660ea32e0bfb934941578307d Mon Sep 17 00:00:00 2001 From: Xavier Mehrenberger Date: Thu, 24 Aug 2017 16:01:30 +0200 Subject: [PATCH 09/64] Remove discontinued option --reconcile-cidr if kube_network_plugin=="cloud" (#1568) --- roles/kubernetes/node/templates/kubelet.j2 | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/roles/kubernetes/node/templates/kubelet.j2 b/roles/kubernetes/node/templates/kubelet.j2 index cb5935178..6abea5db5 100644 --- a/roles/kubernetes/node/templates/kubelet.j2 +++ b/roles/kubernetes/node/templates/kubelet.j2 @@ -61,8 +61,7 @@ KUBELET_NETWORK_PLUGIN="--network-plugin=cni --network-plugin-dir=/etc/cni/net.d {% elif kube_network_plugin is defined and kube_network_plugin == "weave" %} DOCKER_SOCKET="--docker-endpoint=unix:/var/run/weave/weave.sock" {% elif kube_network_plugin is defined and kube_network_plugin == "cloud" %} -# Please note that --reconcile-cidr is deprecated and a no-op in Kubernetes 1.5 but still required in 1.4 -KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kubenet --reconcile-cidr=true" +KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kubenet" {% endif %} # Should this cluster be allowed to run privileged docker containers KUBE_ALLOW_PRIV="--allow-privileged=true" From a98b866a66fccfa017cc83301358d5f5e88636eb Mon Sep 17 00:00:00 2001 From: Yuki KIRII Date: Thu, 24 Aug 2017 23:47:32 +0900 Subject: [PATCH 10/64] Verify if br_netfilter module exists (#1492) --- roles/network_plugin/weave/tasks/main.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/roles/network_plugin/weave/tasks/main.yml b/roles/network_plugin/weave/tasks/main.yml index 813bbfafe..bd6691859 100644 --- a/roles/network_plugin/weave/tasks/main.yml +++ b/roles/network_plugin/weave/tasks/main.yml @@ -4,10 +4,17 @@ - include: seed.yml when: weave_mode_seed -- name: Weave | enable br_netfilter module +- name: Weave | Verify if br_netfilter module exists + shell: "modinfo br_netfilter" + register: modinfo_br_netfilter + failed_when: modinfo_br_netfilter.rc not in [0, 1] + changed_when: false + +- name: Weave | Enable br_netfilter module modprobe: name: br_netfilter state: present + when: modinfo_br_netfilter.rc == 0 - name: Weave | Copy cni plugins from hyperkube command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -r /opt/cni/bin/. /cnibindir/" From 327f9baccff421273868a797ebd766f769434a31 Mon Sep 17 00:00:00 2001 From: Hans Kristian Flaatten Date: Thu, 24 Aug 2017 20:36:53 +0200 Subject: [PATCH 11/64] Update supported component versions in README.md (#1555) --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index bb49cd041..08f853f54 100644 --- a/README.md +++ b/README.md @@ -53,13 +53,13 @@ Versions of supported components -------------------------------- -[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.6.7
-[etcd](https://github.com/coreos/etcd/releases) v3.0.17
+[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.7.3
+[etcd](https://github.com/coreos/etcd/releases) v3.2.4
[flanneld](https://github.com/coreos/flannel/releases) v0.8.0
-[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.23.0
+[calicoctl](https://github.com/projectcalico/calico-docker/releases) v1.1.3
[canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
[weave](http://weave.works/) v2.0.1
-[docker](https://www.docker.com/) v1.13.1 (see note)
+[docker](https://www.docker.com/) v1.13 (see note)
[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin). From 01ce09f3433dab5a59fa807cc59cfb3928b6d08c Mon Sep 17 00:00:00 2001 From: Hassan Zamani Date: Fri, 25 Aug 2017 00:48:38 +0430 Subject: [PATCH 12/64] Add feature_gates var for customizing Kubernetes feature gates (#1520) --- docs/vars.md | 2 ++ .../master/templates/manifests/kube-apiserver.manifest.j2 | 3 +++ .../templates/manifests/kube-controller-manager.manifest.j2 | 3 +++ .../master/templates/manifests/kube-scheduler.manifest.j2 | 3 +++ roles/kubernetes/node/templates/kubelet.j2 | 2 +- roles/kubespray-defaults/defaults/main.yaml | 4 ++++ 6 files changed, 16 insertions(+), 1 deletion(-) diff --git a/docs/vars.md b/docs/vars.md index f50197832..b2b66d3c3 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -67,6 +67,8 @@ following default cluster paramters: OpenStack (default is unset) * *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in Kubernetes +* *kube_feature_gates* - A list of key=value pairs that describe feature gates for + alpha/experimental Kubernetes features. (defaults is `[]`) * *authorization_modes* - A list of [authorization mode]( https://kubernetes.io/docs/admin/authorization/#using-flags-for-your-authorization-module) that the cluster should be configured for. Defaults to `[]` (i.e. no authorization). diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 24094fefb..c19076db3 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -84,6 +84,9 @@ spec: {% if authorization_modes %} - --authorization-mode={{ authorization_modes|join(',') }} {% endif %} +{% if kube_feature_gates %} + - --feature-gates={{ kube_feature_gates|join(',') }} +{% endif %} {% if apiserver_custom_flags is string %} - {{ apiserver_custom_flags }} {% else %} diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 index a6b69fa14..406994286 100644 --- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 @@ -49,6 +49,9 @@ spec: - --configure-cloud-routes=true - --cluster-cidr={{ kube_pods_subnet }} {% endif %} +{% if kube_feature_gates %} + - --feature-gates={{ kube_feature_gates|join(',') }} +{% endif %} {% if controller_mgr_custom_flags is string %} - {{ controller_mgr_custom_flags }} {% else %} diff --git a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 index fdc16bf7f..054239b67 100644 --- a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 @@ -27,6 +27,9 @@ spec: - --leader-elect=true - --kubeconfig={{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml - --v={{ kube_log_level }} +{% if kube_feature_gates %} + - --feature-gates={{ kube_feature_gates|join(',') }} +{% endif %} {% if scheduler_custom_flags is string %} - {{ scheduler_custom_flags }} {% else %} diff --git a/roles/kubernetes/node/templates/kubelet.j2 b/roles/kubernetes/node/templates/kubelet.j2 index 6abea5db5..ce83dea48 100644 --- a/roles/kubernetes/node/templates/kubelet.j2 +++ b/roles/kubernetes/node/templates/kubelet.j2 @@ -55,7 +55,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" {% set node_labels %}--node-labels=node-role.kubernetes.io/node=true{% endset %} {% endif %} -KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ node_labels }} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}" +KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ node_labels }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}" {% if kube_network_plugin is defined and kube_network_plugin in ["calico", "weave", "canal"] %} KUBELET_NETWORK_PLUGIN="--network-plugin=cni --network-plugin-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin" {% elif kube_network_plugin is defined and kube_network_plugin == "weave" %} diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index 03b05c5bd..5405e2577 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -131,3 +131,7 @@ openstack_lbaas_monitor_max_retries: false ## 'RBAC' modes are tested. authorization_modes: [] rbac_enabled: "{{ 'RBAC' in authorization_modes }}" + +## List of key=value pairs that describe feature gates for +## the k8s cluster. +kube_feature_gates: [] From 4550dccb845df9eb8d7d1a0b942c4499bc9b0aab Mon Sep 17 00:00:00 2001 From: Brad Beam Date: Thu, 24 Aug 2017 15:21:39 -0500 Subject: [PATCH 13/64] Fixing reference to vault leader url (#1569) --- roles/vault/tasks/bootstrap/main.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/roles/vault/tasks/bootstrap/main.yml b/roles/vault/tasks/bootstrap/main.yml index 98904bbe7..83167ace7 100644 --- a/roles/vault/tasks/bootstrap/main.yml +++ b/roles/vault/tasks/bootstrap/main.yml @@ -22,6 +22,12 @@ inventory_hostname == groups.vault|first and not vault_cluster_is_initialized +# Set vault_leader_url for all nodes based on above +- name: vault | bootstrap + set_fact: + vault_leader_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" + when: not vault_cluster_is_initialized + # NOTE: The next 2 steps run against temp Vault and long-term Vault # Ensure PKI mount exists From a39e78d42d5bcb6893d0981fc478a3883364fdae Mon Sep 17 00:00:00 2001 From: Chad Swenson Date: Fri, 25 Aug 2017 02:07:50 -0500 Subject: [PATCH 14/64] Initial version of Flannel using CNI (#1486) * Updates Controller Manager/Kubelet with Flannel's required configuration for CNI * Removes old Flannel installation * Install CNI enabled Flannel DaemonSet/ConfigMap/CNI bins and config (with portmap plugin) on host * Uses RBAC if enabled * Fixed an issue that could occur if br_netfilter is not a module and net.bridge.bridge-nf-call-iptables sysctl was not set --- docs/flannel.md | 7 - roles/docker/templates/docker-options.conf.j2 | 2 +- roles/download/defaults/main.yml | 11 +- .../network_plugin/flannel/tasks/main.yml | 22 +++ .../network_plugin/meta/main.yml | 3 + .../kube-controller-manager.manifest.j2 | 5 +- roles/kubernetes/node/templates/kubelet.j2 | 2 +- .../network_plugin/flannel/defaults/main.yml | 7 +- .../network_plugin/flannel/handlers/main.yml | 4 + roles/network_plugin/flannel/meta/main.yml | 3 + roles/network_plugin/flannel/tasks/main.yml | 114 ++++++---------- .../flannel/tasks/pre-upgrade.yml | 19 +++ .../flannel/templates/cni-flannel-rbac.yml.j2 | 44 ++++++ .../flannel/templates/cni-flannel.yml.j2 | 125 ++++++++++++++++++ .../flannel/templates/flannel-options.conf.j2 | 6 - .../flannel/templates/flannel-pod.yml | 47 ------- 16 files changed, 279 insertions(+), 142 deletions(-) create mode 100644 roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml create mode 100644 roles/network_plugin/flannel/tasks/pre-upgrade.yml create mode 100644 roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 create mode 100644 roles/network_plugin/flannel/templates/cni-flannel.yml.j2 delete mode 100644 roles/network_plugin/flannel/templates/flannel-options.conf.j2 delete mode 100644 roles/network_plugin/flannel/templates/flannel-pod.yml diff --git a/docs/flannel.md b/docs/flannel.md index 307eab56c..06351538d 100644 --- a/docs/flannel.md +++ b/docs/flannel.md @@ -23,13 +23,6 @@ ip a show dev flannel.1 valid_lft forever preferred_lft forever ``` -* Docker must be configured with a bridge ip in the flannel subnet. - -``` -ps aux | grep docker -root 20196 1.7 2.7 1260616 56840 ? Ssl 10:18 0:07 /usr/bin/docker daemon --bip=10.233.16.1/24 --mtu=1450 -``` - * Try to run a container and check its ip address ``` diff --git a/roles/docker/templates/docker-options.conf.j2 b/roles/docker/templates/docker-options.conf.j2 index 3f54c853d..c70f3d89f 100644 --- a/roles/docker/templates/docker-options.conf.j2 +++ b/roles/docker/templates/docker-options.conf.j2 @@ -1,3 +1,3 @@ [Service] Environment="DOCKER_OPTS={{ docker_options | default('') }} \ ---iptables={% if kube_network_plugin == 'flannel' %}true{% else %}false{% endif %}" +--iptables=false" diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 9b78044e1..21599d986 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -27,7 +27,8 @@ calico_ctl_version: "v1.4.0" calico_cni_version: "v1.10.0" calico_policy_version: "v0.7.0" weave_version: 2.0.1 -flannel_version: v0.8.0 +flannel_version: "v0.8.0" +flannel_cni_version: "v0.2.0" pod_infra_version: 3.0 # Download URL's @@ -43,6 +44,8 @@ etcd_image_repo: "quay.io/coreos/etcd" etcd_image_tag: "{{ etcd_version }}" flannel_image_repo: "quay.io/coreos/flannel" flannel_image_tag: "{{ flannel_version }}" +flannel_cni_image_repo: "quay.io/coreos/flannel-cni" +flannel_cni_image_tag: "{{ flannel_cni_version }}" calicoctl_image_repo: "quay.io/calico/ctl" calicoctl_image_tag: "{{ calico_ctl_version }}" calico_node_image_repo: "quay.io/calico/node" @@ -138,6 +141,12 @@ downloads: tag: "{{ flannel_image_tag }}" sha256: "{{ flannel_digest_checksum|default(None) }}" enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}" + flannel_cni: + container: true + repo: "{{ flannel_cni_image_repo }}" + tag: "{{ flannel_cni_image_tag }}" + sha256: "{{ flannel_cni_digest_checksum|default(None) }}" + enabled: "{{ kube_network_plugin == 'flannel' }}" calicoctl: container: true repo: "{{ calicoctl_image_repo }}" diff --git a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml new file mode 100644 index 000000000..cfe931375 --- /dev/null +++ b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml @@ -0,0 +1,22 @@ +--- +- name: "Flannel | Create ServiceAccount ClusterRole and ClusterRoleBinding" + command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/cni-flannel-rbac.yml" + run_once: true + when: rbac_enabled and flannel_rbac_manifest.changed + +- name: Flannel | Start Resources + kube: + name: "kube-flannel" + kubectl: "{{ bin_dir }}/kubectl" + filename: "{{ kube_config_dir }}/cni-flannel.yml" + resource: "ds" + namespace: "{{system_namespace}}" + state: "{{ item | ternary('latest','present') }}" + with_items: "{{ flannel_manifest.changed }}" + when: inventory_hostname == groups['kube-master'][0] + +- name: Flannel | Wait for flannel subnet.env file presence + wait_for: + path: /run/flannel/subnet.env + delay: 5 + timeout: 600 \ No newline at end of file diff --git a/roles/kubernetes-apps/network_plugin/meta/main.yml b/roles/kubernetes-apps/network_plugin/meta/main.yml index 4559d25c6..18c786c1d 100644 --- a/roles/kubernetes-apps/network_plugin/meta/main.yml +++ b/roles/kubernetes-apps/network_plugin/meta/main.yml @@ -3,6 +3,9 @@ dependencies: - role: kubernetes-apps/network_plugin/canal when: kube_network_plugin == 'canal' tags: canal + - role: kubernetes-apps/network_plugin/flannel + when: kube_network_plugin == 'flannel' + tags: flannel - role: kubernetes-apps/network_plugin/weave when: kube_network_plugin == 'weave' tags: weave diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 index 406994286..44a1c253c 100644 --- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 @@ -45,9 +45,12 @@ spec: - --cloud-provider={{cloud_provider}} {% endif %} {% if kube_network_plugin is defined and kube_network_plugin == 'cloud' %} - - --allocate-node-cidrs=true - --configure-cloud-routes=true +{% endif %} +{% if kube_network_plugin is defined and kube_network_plugin in ["cloud", "flannel"] %} + - --allocate-node-cidrs=true - --cluster-cidr={{ kube_pods_subnet }} + - --service-cluster-ip-range={{ kube_service_addresses }} {% endif %} {% if kube_feature_gates %} - --feature-gates={{ kube_feature_gates|join(',') }} diff --git a/roles/kubernetes/node/templates/kubelet.j2 b/roles/kubernetes/node/templates/kubelet.j2 index ce83dea48..14d7e0ddd 100644 --- a/roles/kubernetes/node/templates/kubelet.j2 +++ b/roles/kubernetes/node/templates/kubelet.j2 @@ -56,7 +56,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" {% endif %} KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ node_labels }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}" -{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "weave", "canal"] %} +{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave"] %} KUBELET_NETWORK_PLUGIN="--network-plugin=cni --network-plugin-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin" {% elif kube_network_plugin is defined and kube_network_plugin == "weave" %} DOCKER_SOCKET="--docker-endpoint=unix:/var/run/weave/weave.sock" diff --git a/roles/network_plugin/flannel/defaults/main.yml b/roles/network_plugin/flannel/defaults/main.yml index 360756f0a..08f4ac145 100644 --- a/roles/network_plugin/flannel/defaults/main.yml +++ b/roles/network_plugin/flannel/defaults/main.yml @@ -1,7 +1,8 @@ --- # Flannel public IP # The address that flannel should advertise as how to access the system -flannel_public_ip: "{{ access_ip|default(ip|default(ansible_default_ipv4.address)) }}" +# Disabled until https://github.com/coreos/flannel/issues/712 is fixed +# flannel_public_ip: "{{ access_ip|default(ip|default(ansible_default_ipv4.address)) }}" ## interface that should be used for flannel operations ## This is actually an inventory node-level item @@ -17,5 +18,5 @@ flannel_cpu_limit: 300m flannel_memory_requests: 64M flannel_cpu_requests: 150m -flannel_cert_dir: /etc/flannel/certs -etcd_cert_dir: /etc/ssl/etcd/ssl +# Legacy directory, will be removed if found. +flannel_cert_dir: /etc/flannel/certs \ No newline at end of file diff --git a/roles/network_plugin/flannel/handlers/main.yml b/roles/network_plugin/flannel/handlers/main.yml index 3726c900e..00c5667b0 100644 --- a/roles/network_plugin/flannel/handlers/main.yml +++ b/roles/network_plugin/flannel/handlers/main.yml @@ -4,6 +4,10 @@ failed_when: false notify: Flannel | restart docker +- name: Flannel | delete flannel interface + command: ip link delete flannel.1 + failed_when: false + # special cases for atomic because it defaults to live-restore: true # So we disable live-restore to pickup the new flannel IP. After # we enable it, we have to restart docker again to pickup the new diff --git a/roles/network_plugin/flannel/meta/main.yml b/roles/network_plugin/flannel/meta/main.yml index 4a2caef72..791209357 100644 --- a/roles/network_plugin/flannel/meta/main.yml +++ b/roles/network_plugin/flannel/meta/main.yml @@ -3,3 +3,6 @@ dependencies: - role: download file: "{{ downloads.flannel }}" tags: download + - role: download + file: "{{ downloads.flannel_cni }}" + tags: download diff --git a/roles/network_plugin/flannel/tasks/main.yml b/roles/network_plugin/flannel/tasks/main.yml index 573b51f19..037684fa4 100644 --- a/roles/network_plugin/flannel/tasks/main.yml +++ b/roles/network_plugin/flannel/tasks/main.yml @@ -1,83 +1,47 @@ --- -- name: Flannel | Set Flannel etcd configuration - command: |- - {{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} \ - set /{{ cluster_name }}/network/config \ - '{ "Network": "{{ kube_pods_subnet }}", "SubnetLen": {{ kube_network_node_prefix }}, "Backend": { "Type": "{{ flannel_backend_type }}" } }' - delegate_to: "{{groups['etcd'][0]}}" - run_once: true +- include: pre-upgrade.yml -- name: Flannel | Create flannel certs directory - file: - dest: "{{ flannel_cert_dir }}" - state: directory - mode: 0750 - owner: root - group: root +- name: Flannel | Verify if br_netfilter module exists + shell: "modinfo br_netfilter" + register: modinfo_br_netfilter + failed_when: modinfo_br_netfilter.rc not in [0, 1] + changed_when: false -- name: Flannel | Link etcd certificates for flanneld - file: - src: "{{ etcd_cert_dir }}/{{ item.s }}" - dest: "{{ flannel_cert_dir }}/{{ item.d }}" - state: hard - force: yes +- name: Flannel | Enable br_netfilter module + modprobe: + name: br_netfilter + state: present + when: modinfo_br_netfilter.rc == 0 + +# kube-proxy needs net.bridge.bridge-nf-call-iptables enabled when found if br_netfilter is not a module +- name: Flannel | Check if bridge-nf-call-iptables key exists + command: "sysctl net.bridge.bridge-nf-call-iptables" + failed_when: false + changed_when: false + register: sysctl_bridge_nf_call_iptables + +- name: Flannel | Enable bridge-nf-call tables + sysctl: + name: "{{ item }}" + state: present + value: 1 + reload: yes + when: modinfo_br_netfilter.rc == 1 and sysctl_bridge_nf_call_iptables.rc == 0 with_items: - - {s: "ca.pem", d: "ca_cert.crt"} - - {s: "node-{{ inventory_hostname }}.pem", d: "cert.crt"} - - {s: "node-{{ inventory_hostname }}-key.pem", d: "key.pem"} + - net.bridge.bridge-nf-call-iptables + - net.bridge.bridge-nf-call-arptables + - net.bridge.bridge-nf-call-ip6tables -- name: Flannel | Create flannel pod manifest +- name: Flannel | Create cni-flannel-rbac manifest template: - src: flannel-pod.yml - dest: "{{kube_manifest_dir}}/flannel-pod.manifest" - notify: Flannel | delete default docker bridge + src: cni-flannel-rbac.yml.j2 + dest: "{{ kube_config_dir }}/cni-flannel-rbac.yml" + register: flannel_rbac_manifest + when: inventory_hostname == groups['kube-master'][0] and rbac_enabled -- name: Flannel | Wait for flannel subnet.env file presence - wait_for: - path: /run/flannel/subnet.env - delay: 5 - timeout: 600 - -- name: Flannel | Get flannel_subnet from subnet.env - shell: cat /run/flannel/subnet.env | awk -F'=' '$1 == "FLANNEL_SUBNET" {print $2}' - register: flannel_subnet_output - changed_when: false - check_mode: no - -- set_fact: - flannel_subnet: "{{ flannel_subnet_output.stdout }}" - -- name: Flannel | Get flannel_mtu from subnet.env - shell: cat /run/flannel/subnet.env | awk -F'=' '$1 == "FLANNEL_MTU" {print $2}' - register: flannel_mtu_output - changed_when: false - check_mode: no - -- set_fact: - flannel_mtu: "{{ flannel_mtu_output.stdout }}" - -- set_fact: - docker_options_file: >- - {%- if ansible_os_family == "Debian" -%}/etc/default/docker{%- elif ansible_os_family == "RedHat" -%}/etc/sysconfig/docker{%- endif -%} - tags: facts - -- set_fact: - docker_options_name: >- - {%- if ansible_os_family == "Debian" -%}DOCKER_OPTS{%- elif ansible_os_family == "RedHat" -%}other_args{%- endif -%} - tags: facts - -- set_fact: - docker_network_options: '"--bip={{ flannel_subnet }} --mtu={{ flannel_mtu }}"' - -- name: Flannel | Ensure path for docker network systemd drop-in - file: - path: "/etc/systemd/system/docker.service.d" - state: directory - owner: root - -- name: Flannel | Create docker network systemd drop-in +- name: Flannel | Create cni-flannel manifest template: - src: flannel-options.conf.j2 - dest: "/etc/systemd/system/docker.service.d/flannel-options.conf" - notify: - - Flannel | restart docker + src: cni-flannel.yml.j2 + dest: "{{ kube_config_dir }}/cni-flannel.yml" + register: flannel_manifest + when: inventory_hostname == groups['kube-master'][0] \ No newline at end of file diff --git a/roles/network_plugin/flannel/tasks/pre-upgrade.yml b/roles/network_plugin/flannel/tasks/pre-upgrade.yml new file mode 100644 index 000000000..6b6fcd54f --- /dev/null +++ b/roles/network_plugin/flannel/tasks/pre-upgrade.yml @@ -0,0 +1,19 @@ +--- +- name: Flannel pre-upgrade | Purge legacy flannel systemd unit file + file: + path: "/etc/systemd/system/docker.service.d/flannel-options.conf" + state: absent + notify: + - Flannel | delete default docker bridge + +- name: Flannel pre-upgrade | Purge legacy Flannel static pod manifest + file: + path: "{{ kube_manifest_dir }}/flannel-pod.manifest" + state: absent + notify: + - Flannel | delete flannel interface + +- name: Flannel pre-upgrade | Remove Flannel's certificate directory not required by CNI + file: + dest: "{{ flannel_cert_dir }}" + state: absent \ No newline at end of file diff --git a/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 new file mode 100644 index 000000000..aafe2a0f5 --- /dev/null +++ b/roles/network_plugin/flannel/templates/cni-flannel-rbac.yml.j2 @@ -0,0 +1,44 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: flannel + namespace: "{{system_namespace}}" +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: flannel +rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: flannel + namespace: "{{system_namespace}}" \ No newline at end of file diff --git a/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 new file mode 100644 index 000000000..0012228d7 --- /dev/null +++ b/roles/network_plugin/flannel/templates/cni-flannel.yml.j2 @@ -0,0 +1,125 @@ +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kube-flannel-cfg + namespace: "{{system_namespace}}" + labels: + tier: node + app: flannel +data: + cni-conf.json: | + { + "name":"cbr0", + "cniVersion":"0.3.1", + "plugins":[ + { + "type":"flannel", + "delegate":{ + "forceAddress":true, + "isDefaultGateway":true + } + }, + { + "type":"portmap", + "capabilities":{ + "portMappings":true + } + } + ] + } + net-conf.json: | + { + "Network": "{{ kube_pods_subnet }}", + "Backend": { + "Type": "{{ flannel_backend_type }}" + } + } +--- +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: kube-flannel + namespace: "{{system_namespace}}" + labels: + tier: node + k8s-app: flannel +spec: + template: + metadata: + labels: + tier: node + k8s-app: flannel + spec: +{% if rbac_enabled %} + serviceAccountName: flannel +{% endif %} + containers: + - name: kube-flannel + image: {{ flannel_image_repo }}:{{ flannel_image_tag }} + imagePullPolicy: {{ k8s_image_pull_policy }} + resources: + limits: + cpu: {{ flannel_cpu_limit }} + memory: {{ flannel_memory_limit }} + requests: + cpu: {{ flannel_cpu_requests }} + memory: {{ flannel_memory_requests }} + command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr"{% if flannel_interface is defined %}, "--iface={{ flannel_interface }}"{% endif %} ] + securityContext: + privileged: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumeMounts: + - name: run + mountPath: /run + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + - name: install-cni + image: {{ flannel_cni_image_repo }}:{{ flannel_cni_image_tag }} + command: ["/install-cni.sh"] + env: + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: kube-flannel-cfg + key: cni-conf.json + - name: CNI_CONF_NAME + value: "10-flannel.conflist" + volumeMounts: + - name: cni + mountPath: /host/etc/cni/net.d + - name: host-cni-bin + mountPath: /host/opt/cni/bin/ + hostNetwork: true + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + volumes: + - name: run + hostPath: + path: /run + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + - name: host-cni-bin + hostPath: + path: /opt/cni/bin + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate \ No newline at end of file diff --git a/roles/network_plugin/flannel/templates/flannel-options.conf.j2 b/roles/network_plugin/flannel/templates/flannel-options.conf.j2 deleted file mode 100644 index 9ee22b4bc..000000000 --- a/roles/network_plugin/flannel/templates/flannel-options.conf.j2 +++ /dev/null @@ -1,6 +0,0 @@ -[Service] -{% if ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] %} -Environment="DOCKER_OPT_BIP=--bip={{ flannel_subnet }} --mtu={{ flannel_mtu }}" -{% else %} -Environment="DOCKER_NETWORK_OPTIONS=--bip={{ flannel_subnet }} --mtu={{ flannel_mtu }}" -{% endif %} diff --git a/roles/network_plugin/flannel/templates/flannel-pod.yml b/roles/network_plugin/flannel/templates/flannel-pod.yml deleted file mode 100644 index a6e075b8c..000000000 --- a/roles/network_plugin/flannel/templates/flannel-pod.yml +++ /dev/null @@ -1,47 +0,0 @@ ---- -kind: "Pod" -apiVersion: "v1" -metadata: - name: "flannel" - namespace: "{{system_namespace}}" - labels: - app: "flannel" - version: "v0.1" -spec: - tolerations: - - effect: NoSchedule - operator: Exists - volumes: - - name: "subnetenv" - hostPath: - path: "/run/flannel" - - name: "etcd-certs" - hostPath: - path: "{{ flannel_cert_dir }}" - containers: - - name: "flannel-container" - image: "{{ flannel_image_repo }}:{{ flannel_image_tag }}" - imagePullPolicy: {{ k8s_image_pull_policy }} - resources: - limits: - cpu: {{ flannel_cpu_limit }} - memory: {{ flannel_memory_limit }} - requests: - cpu: {{ flannel_cpu_requests }} - memory: {{ flannel_memory_requests }} - command: - - "/bin/sh" - - "-c" - - "/opt/bin/flanneld -etcd-endpoints {{ etcd_access_endpoint }} -etcd-prefix /{{ cluster_name }}/network -etcd-cafile {{ flannel_cert_dir }}/ca_cert.crt -etcd-certfile {{ flannel_cert_dir }}/cert.crt -etcd-keyfile {{ flannel_cert_dir }}/key.pem {% if flannel_interface is defined %}-iface {{ flannel_interface }}{% endif %} {% if flannel_public_ip is defined %}-public-ip {{ flannel_public_ip }}{% endif %}" - ports: - - hostPort: 10253 - containerPort: 10253 - volumeMounts: - - name: "subnetenv" - mountPath: "/run/flannel" - - name: "etcd-certs" - mountPath: "{{ flannel_cert_dir }}" - readOnly: true - securityContext: - privileged: true - hostNetwork: true From 76b72338daf5ce72e7a484b9af20ca2c063cdb78 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Mon, 28 Aug 2017 21:11:01 +0300 Subject: [PATCH 15/64] Add CNI config for rkt kubelet (#1579) --- roles/kubernetes/node/templates/kubelet.rkt.service.j2 | 2 +- roles/kubernetes/preinstall/tasks/main.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 b/roles/kubernetes/node/templates/kubelet.rkt.service.j2 index 1f181a89d..592d70c2b 100644 --- a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 +++ b/roles/kubernetes/node/templates/kubelet.rkt.service.j2 @@ -32,7 +32,7 @@ ExecStart=/usr/bin/rkt run \ --volume var-lib-docker,kind=host,source={{ docker_daemon_graph }},readOnly=false \ --volume var-lib-kubelet,kind=host,source=/var/lib/kubelet,readOnly=false,recursive=true \ --volume var-log,kind=host,source=/var/log \ -{% if kube_network_plugin in ["calico", "weave", "canal"] %} +{% if kube_network_plugin in ["calico", "weave", "canal", "flannel"] %} --volume etc-cni,kind=host,source=/etc/cni,readOnly=true \ --volume opt-cni,kind=host,source=/opt/cni,readOnly=true \ --volume var-lib-cni,kind=host,source=/var/lib/cni,readOnly=false \ diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml index b6a246684..65716816e 100644 --- a/roles/kubernetes/preinstall/tasks/main.yml +++ b/roles/kubernetes/preinstall/tasks/main.yml @@ -85,7 +85,7 @@ - "/etc/cni/net.d" - "/opt/cni/bin" when: - - kube_network_plugin in ["calico", "weave", "canal"] + - kube_network_plugin in ["calico", "weave", "canal", "flannel"] - inventory_hostname in groups['k8s-cluster'] tags: [network, calico, weave, canal, bootstrap-os] From 6c30a7b2eb0a9a474d04122b8e134f0f1c479131 Mon Sep 17 00:00:00 2001 From: Eric Hoffmann <2ffs2nns@gmail.com> Date: Mon, 28 Aug 2017 13:23:29 -0700 Subject: [PATCH 16/64] update calico version update calico releases link --- README.md | 2 +- roles/download/defaults/main.yml | 4 ++-- roles/uploads/defaults/main.yml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 08f853f54..641f783d7 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ Versions of supported components [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.7.3
[etcd](https://github.com/coreos/etcd/releases) v3.2.4
[flanneld](https://github.com/coreos/flannel/releases) v0.8.0
-[calicoctl](https://github.com/projectcalico/calico-docker/releases) v1.1.3
+[calico](https://docs.projectcalico.org/v2.5/releases/) v2.5.0
[canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
[weave](http://weave.works/) v2.0.1
[docker](https://www.docker.com/) v1.13 (see note)
diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 21599d986..25a335915 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -22,8 +22,8 @@ kube_version: v1.7.3 etcd_version: v3.2.4 # TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults # after migration to container download -calico_version: "v2.4.1" -calico_ctl_version: "v1.4.0" +calico_version: "v2.5.0" +calico_ctl_version: "v1.5.0" calico_cni_version: "v1.10.0" calico_policy_version: "v0.7.0" weave_version: 2.0.1 diff --git a/roles/uploads/defaults/main.yml b/roles/uploads/defaults/main.yml index bc0a17f32..587c0f043 100644 --- a/roles/uploads/defaults/main.yml +++ b/roles/uploads/defaults/main.yml @@ -3,7 +3,7 @@ local_release_dir: /tmp # Versions etcd_version: v3.0.17 -calico_version: v2.4.1 +calico_version: v2.5.0 calico_cni_version: v1.10.0 weave_version: v2.0.1 From 13d08af0540ef6a7c5d8dc6e731a543cdb82376c Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Tue, 29 Aug 2017 19:35:27 +0100 Subject: [PATCH 17/64] Fix upgrade for canal and apiserver cert Fixes #1573 --- .../network_plugin/canal/tasks/main.yml | 12 ------------ roles/kubernetes/secrets/files/make-ssl.sh | 9 ++++++--- .../network_plugin/canal/templates/canal-node.yml.j2 | 5 +++++ 3 files changed, 11 insertions(+), 15 deletions(-) diff --git a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml index a65a86c43..72956dac9 100644 --- a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml @@ -8,18 +8,6 @@ resource: "configmap" namespace: "{{system_namespace}}" -# FIXME: remove if kubernetes/features#124 is implemented -- name: Purge old flannel and canal-node - run_once: true - kube: - name: "canal-node" - kubectl: "{{ bin_dir }}/kubectl" - filename: "{{ kube_config_dir }}/canal-node.yaml" - resource: "ds" - namespace: "{{system_namespace}}" - state: absent - when: inventory_hostname == groups['kube-master'][0] and canal_node_manifest.changed - - name: Start flannel and calico-node run_once: true kube: diff --git a/roles/kubernetes/secrets/files/make-ssl.sh b/roles/kubernetes/secrets/files/make-ssl.sh index 0cb7e37c6..09342625d 100755 --- a/roles/kubernetes/secrets/files/make-ssl.sh +++ b/roles/kubernetes/secrets/files/make-ssl.sh @@ -82,10 +82,13 @@ gen_key_and_cert() { # Admins if [ -n "$MASTERS" ]; then - # If any host requires new certs, just regenerate all master certs # kube-apiserver - gen_key_and_cert "apiserver" "/CN=kube-apiserver" - cat ca.pem >> apiserver.pem + # Generate only if we don't have existing ca and apiserver certs + if ! [ -e "$SSLDIR/ca-key.pem" ] || ! [ -e "$SSLDIR/apiserver-key.pem" ]; then + gen_key_and_cert "apiserver" "/CN=kube-apiserver" + cat ca.pem >> apiserver.pem + fi + # If any host requires new certs, just regenerate scheduler and controller-manager master certs # kube-scheduler gen_key_and_cert "kube-scheduler" "/CN=system:kube-scheduler" # kube-controller-manager diff --git a/roles/network_plugin/canal/templates/canal-node.yml.j2 b/roles/network_plugin/canal/templates/canal-node.yml.j2 index ca7b37f86..cd9312832 100644 --- a/roles/network_plugin/canal/templates/canal-node.yml.j2 +++ b/roles/network_plugin/canal/templates/canal-node.yml.j2 @@ -3,6 +3,7 @@ kind: DaemonSet apiVersion: extensions/v1beta1 metadata: name: canal-node + namespace: {{ system_namespace }} labels: k8s-app: canal-node spec: @@ -180,3 +181,7 @@ spec: - name: "canal-certs" mountPath: "{{ canal_cert_dir }}" readOnly: true + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate From 6eb22c5db274ebf9f2f397a391e7b7e83d8093ee Mon Sep 17 00:00:00 2001 From: Maxim Krasilnikov Date: Wed, 30 Aug 2017 16:03:22 +0300 Subject: [PATCH 18/64] Change single Vault pki mount to multi pki mounts paths for etcd and kube CA`s (#1552) * Added update CA trust step for etcd and kube/secrets roles * Added load_balancer_domain_name to certificate alt names if defined. Reset CA's in RedHat os. * Rename kube-cluster-ca.crt to vault-ca.crt, we need separated CA`s for vault, etcd and kube. * Vault role refactoring, remove optional cert vault auth because not not used and worked. Create separate CA`s fro vault and etcd. * Fixed different certificates set for vault cert_managment * Update doc/vault.md * Fixed condition create vault CA, wrong group * Fixed missing etcd_cert_path mount for rkt deployment type. Distribute vault roles for all vault hosts * Removed wrong when condition in create etcd role vault tasks. --- docs/vault.md | 27 ++++---- roles/etcd/defaults/main.yml | 2 + roles/etcd/tasks/gen_certs_script.yml | 27 -------- roles/etcd/tasks/gen_certs_vault.yml | 2 + roles/etcd/tasks/main.yml | 3 + roles/etcd/tasks/upd_ca_trust.yml | 27 ++++++++ roles/kubernetes/secrets/defaults/main.yml | 1 + .../secrets/tasks/gen_certs_script.yml | 27 -------- .../secrets/tasks/gen_certs_vault.yml | 28 +++++++-- roles/kubernetes/secrets/tasks/main.yml | 3 + .../kubernetes/secrets/tasks/upd_ca_trust.yml | 27 ++++++++ roles/reset/tasks/main.yml | 10 ++- roles/vault/defaults/main.yml | 40 +++++++----- roles/vault/tasks/bootstrap/ca_trust.yml | 6 +- .../tasks/bootstrap/create_etcd_role.yml | 20 ++++-- roles/vault/tasks/bootstrap/gen_auth_ca.yml | 21 ------- roles/vault/tasks/bootstrap/gen_ca.yml | 31 ---------- .../vault/tasks/bootstrap/gen_vault_certs.yml | 3 +- roles/vault/tasks/bootstrap/main.yml | 61 +++++++++++-------- .../vault/tasks/bootstrap/role_auth_cert.yml | 26 -------- .../tasks/bootstrap/role_auth_userpass.yml | 11 ---- .../vault/tasks/bootstrap/sync_etcd_certs.yml | 16 +++++ roles/vault/tasks/cluster/create_roles.yml | 11 +++- roles/vault/tasks/cluster/main.yml | 41 ++++++++++--- roles/vault/tasks/cluster/role_auth_cert.yml | 19 ------ .../tasks/cluster/role_auth_userpass.yml | 10 --- roles/vault/tasks/shared/cert_auth_mount.yml | 7 +-- roles/vault/tasks/shared/config_ca.yml | 9 ++- roles/vault/tasks/shared/create_mount.yml | 16 +++++ roles/vault/tasks/shared/create_role.yml | 35 ++--------- roles/vault/tasks/shared/gen_ca.yml | 29 +++++++++ roles/vault/tasks/shared/issue_cert.yml | 15 +---- roles/vault/tasks/shared/mount.yml | 18 ------ roles/vault/tasks/shared/pki_mount.yml | 34 ++++++++--- roles/vault/templates/docker.service.j2 | 1 + roles/vault/templates/rkt.service.j2 | 2 + 36 files changed, 337 insertions(+), 329 deletions(-) create mode 100644 roles/etcd/tasks/upd_ca_trust.yml create mode 100644 roles/kubernetes/secrets/tasks/upd_ca_trust.yml delete mode 100644 roles/vault/tasks/bootstrap/gen_auth_ca.yml delete mode 100644 roles/vault/tasks/bootstrap/gen_ca.yml delete mode 100644 roles/vault/tasks/bootstrap/role_auth_cert.yml delete mode 100644 roles/vault/tasks/bootstrap/role_auth_userpass.yml create mode 100644 roles/vault/tasks/bootstrap/sync_etcd_certs.yml delete mode 100644 roles/vault/tasks/cluster/role_auth_cert.yml delete mode 100644 roles/vault/tasks/cluster/role_auth_userpass.yml create mode 100644 roles/vault/tasks/shared/create_mount.yml create mode 100644 roles/vault/tasks/shared/gen_ca.yml delete mode 100644 roles/vault/tasks/shared/mount.yml diff --git a/docs/vault.md b/docs/vault.md index 3850d04b5..056d76356 100644 --- a/docs/vault.md +++ b/docs/vault.md @@ -26,7 +26,6 @@ first task, is to stop any temporary instances of Vault, to free the port for the long-term. At the end of this task, the entire Vault cluster should be up and read to go. - Keys to the Kingdom ------------------- @@ -44,30 +43,38 @@ to authenticate to almost everything in Kubernetes and decode all private (HTTPS) traffic on your network signed by Vault certificates. For even greater security, you may want to remove and store elsewhere any -CA keys generated as well (e.g. /etc/vault/ssl/ca-key.pem). +CA keys generated as well (e.g. /etc/vault/ssl/ca-key.pem). Vault by default encrypts all traffic to and from the datastore backend, all resting data, and uses TLS for its TCP listener. It is recommended that you do not change the Vault config to disable TLS, unless you absolutely have to. - Usage ----- To get the Vault role running, you must to do two things at a minimum: 1. Assign the ``vault`` group to at least 1 node in your inventory -2. Change ``cert_management`` to be ``vault`` instead of ``script`` +1. Change ``cert_management`` to be ``vault`` instead of ``script`` Nothing else is required, but customization is possible. Check ``roles/vault/defaults/main.yml`` for the different variables that can be overridden, most common being ``vault_config``, ``vault_port``, and ``vault_deployment_type``. -Also, if you intend to use a Root or Intermediate CA generated elsewhere, -you'll need to copy the certificate and key to the hosts in the vault group -prior to running the vault role. By default, they'll be located at -``/etc/vault/ssl/ca.pem`` and ``/etc/vault/ssl/ca-key.pem``, respectively. +As a result of the Vault role will be create separated Root CA for `etcd`, +`kubernetes` and `vault`. Also, if you intend to use a Root or Intermediate CA +generated elsewhere, you'll need to copy the certificate and key to the hosts in the vault group prior to running the vault role. By default, they'll be located at: + +* vault: + * ``/etc/vault/ssl/ca.pem`` + * ``/etc/vault/ssl/ca-key.pem`` +* etcd: + * ``/etc/ssl/etcd/ssl/ca.pem`` + * ``/etc/ssl/etcd/ssl/ca-key.pem`` +* kubernetes: + * ``/etc/kubernetes/ssl/ca.pem`` + * ``/etc/kubernetes/ssl/ca-key.pem`` Additional Notes: @@ -77,7 +84,6 @@ Additional Notes: credentials are saved to ``/etc/vault/roles//``. The service will need to read in those credentials, if they want to interact with Vault. - Potential Work -------------- @@ -87,6 +93,3 @@ Potential Work - Add the ability to start temp Vault with Host, Rkt, or Docker - Add a dynamic way to change out the backend role creation during Bootstrap, so other services can be used (such as Consul) -- Segregate Server Cert generation from Auth Cert generation (separate CAs). - This work was partially started with the `auth_cert_backend` tasks, but would - need to be further applied to all roles (particularly Etcd and Kubernetes). diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml index 6b6fde38d..eb0cab951 100644 --- a/roles/etcd/defaults/main.yml +++ b/roles/etcd/defaults/main.yml @@ -26,3 +26,5 @@ etcd_memory_limit: 512M etcd_node_cert_hosts: "{{ groups['k8s-cluster'] | union(groups.get('calico-rr', [])) }}" etcd_compaction_retention: "8" + +etcd_vault_mount_path: etcd diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml index 000f6842b..46d0ddb9a 100644 --- a/roles/etcd/tasks/gen_certs_script.yml +++ b/roles/etcd/tasks/gen_certs_script.yml @@ -161,30 +161,3 @@ owner: kube mode: "u=rwX,g-rwx,o-rwx" recurse: yes - -- name: Gen_certs | target ca-certificate store file - set_fact: - ca_cert_path: |- - {% if ansible_os_family == "Debian" -%} - /usr/local/share/ca-certificates/etcd-ca.crt - {%- elif ansible_os_family == "RedHat" -%} - /etc/pki/ca-trust/source/anchors/etcd-ca.crt - {%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%} - /etc/ssl/certs/etcd-ca.pem - {%- endif %} - tags: facts - -- name: Gen_certs | add CA to trusted CA dir - copy: - src: "{{ etcd_cert_dir }}/ca.pem" - dest: "{{ ca_cert_path }}" - remote_src: true - register: etcd_ca_cert - -- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS) - command: update-ca-certificates - when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"] - -- name: Gen_certs | update ca-certificates (RedHat) - command: update-ca-trust extract - when: etcd_ca_cert.changed and ansible_os_family == "RedHat" diff --git a/roles/etcd/tasks/gen_certs_vault.yml b/roles/etcd/tasks/gen_certs_vault.yml index e59d376e9..4f27eff86 100644 --- a/roles/etcd/tasks/gen_certs_vault.yml +++ b/roles/etcd/tasks/gen_certs_vault.yml @@ -66,6 +66,7 @@ issue_cert_path: "{{ item }}" issue_cert_role: etcd issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" + issue_cert_mount_path: "{{ etcd_vault_mount_path }}" with_items: "{{ etcd_master_certs_needed|d([]) }}" when: inventory_hostname in groups.etcd notify: set etcd_secret_changed @@ -92,6 +93,7 @@ issue_cert_path: "{{ item }}" issue_cert_role: etcd issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" + issue_cert_mount_path: "{{ etcd_vault_mount_path }}" with_items: "{{ etcd_node_certs_needed|d([]) }}" when: inventory_hostname in etcd_node_cert_hosts notify: set etcd_secret_changed diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index afd5fa883..6d8388ee8 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -10,6 +10,9 @@ - include: "gen_certs_{{ cert_management }}.yml" tags: etcd-secrets +- include: upd_ca_trust.yml + tags: etcd-secrets + - include: "install_{{ etcd_deployment_type }}.yml" when: is_etcd_master tags: upgrade diff --git a/roles/etcd/tasks/upd_ca_trust.yml b/roles/etcd/tasks/upd_ca_trust.yml new file mode 100644 index 000000000..81ce1e573 --- /dev/null +++ b/roles/etcd/tasks/upd_ca_trust.yml @@ -0,0 +1,27 @@ +--- +- name: Gen_certs | target ca-certificate store file + set_fact: + ca_cert_path: |- + {% if ansible_os_family == "Debian" -%} + /usr/local/share/ca-certificates/etcd-ca.crt + {%- elif ansible_os_family == "RedHat" -%} + /etc/pki/ca-trust/source/anchors/etcd-ca.crt + {%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%} + /etc/ssl/certs/etcd-ca.pem + {%- endif %} + tags: facts + +- name: Gen_certs | add CA to trusted CA dir + copy: + src: "{{ etcd_cert_dir }}/ca.pem" + dest: "{{ ca_cert_path }}" + remote_src: true + register: etcd_ca_cert + +- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS) + command: update-ca-certificates + when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"] + +- name: Gen_certs | update ca-certificates (RedHat) + command: update-ca-trust extract + when: etcd_ca_cert.changed and ansible_os_family == "RedHat" diff --git a/roles/kubernetes/secrets/defaults/main.yml b/roles/kubernetes/secrets/defaults/main.yml index e6177857e..f0d10711d 100644 --- a/roles/kubernetes/secrets/defaults/main.yml +++ b/roles/kubernetes/secrets/defaults/main.yml @@ -1,2 +1,3 @@ --- kube_cert_group: kube-cert +kube_vault_mount_path: kube diff --git a/roles/kubernetes/secrets/tasks/gen_certs_script.yml b/roles/kubernetes/secrets/tasks/gen_certs_script.yml index 41d91362b..192787b97 100644 --- a/roles/kubernetes/secrets/tasks/gen_certs_script.yml +++ b/roles/kubernetes/secrets/tasks/gen_certs_script.yml @@ -166,30 +166,3 @@ owner: kube mode: "u=rwX,g-rwx,o-rwx" recurse: yes - -- name: Gen_certs | target ca-certificates path - set_fact: - ca_cert_path: |- - {% if ansible_os_family == "Debian" -%} - /usr/local/share/ca-certificates/kube-ca.crt - {%- elif ansible_os_family == "RedHat" -%} - /etc/pki/ca-trust/source/anchors/kube-ca.crt - {%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%} - /etc/ssl/certs/kube-ca.pem - {%- endif %} - tags: facts - -- name: Gen_certs | add CA to trusted CA dir - copy: - src: "{{ kube_cert_dir }}/ca.pem" - dest: "{{ ca_cert_path }}" - remote_src: true - register: kube_ca_cert - -- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS) - command: update-ca-certificates - when: kube_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"] - -- name: Gen_certs | update ca-certificates (RedHat) - command: update-ca-trust extract - when: kube_ca_cert.changed and ansible_os_family == "RedHat" diff --git a/roles/kubernetes/secrets/tasks/gen_certs_vault.yml b/roles/kubernetes/secrets/tasks/gen_certs_vault.yml index 308ac9260..31abdbf5b 100644 --- a/roles/kubernetes/secrets/tasks/gen_certs_vault.yml +++ b/roles/kubernetes/secrets/tasks/gen_certs_vault.yml @@ -49,17 +49,29 @@ issue_cert_path: "{{ item }}" issue_cert_role: kube issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" + issue_cert_mount_path: "{{ kube_vault_mount_path }}" with_items: "{{ kube_master_certs_needed|d([]) }}" when: inventory_hostname in groups['kube-master'] +- name: gen_certs_vault | Set fact about certificate alt names + set_fact: + kube_cert_alt_names: >- + {{ + groups['kube-master'] + + ['kubernetes.default.svc.cluster.local', 'kubernetes.default.svc', 'kubernetes.default', 'kubernetes'] + + ['localhost'] + }} + run_once: true + +- name: gen_certs_vault | Add external load balancer domain name to certificate alt names + set_fact: + kube_cert_alt_names: "{{ kube_cert_alt_names + [apiserver_loadbalancer_domain_name] }}" + when: loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined + run_once: true + - include: ../../../vault/tasks/shared/issue_cert.yml vars: - issue_cert_alt_names: >- - {{ - groups['kube-master'] + - ['kubernetes.default.svc.cluster.local', 'kubernetes.default.svc', 'kubernetes.default', 'kubernetes'] + - ['localhost'] - }} + issue_cert_alt_names: "{{ kube_cert_alt_names }}" issue_cert_file_group: "{{ kube_cert_group }}" issue_cert_file_owner: kube issue_cert_headers: "{{ kube_vault_headers }}" @@ -77,8 +89,10 @@ issue_cert_path: "{{ item }}" issue_cert_role: kube issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" + issue_cert_mount_path: "{{ kube_vault_mount_path }}" with_items: "{{ kube_master_components_certs_needed|d([]) }}" when: inventory_hostname in groups['kube-master'] + notify: set secret_changed # Issue node certs to k8s-cluster nodes - include: ../../../vault/tasks/shared/issue_cert.yml @@ -91,6 +105,7 @@ issue_cert_path: "{{ item }}" issue_cert_role: kube issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" + issue_cert_mount_path: "{{ kube_vault_mount_path }}" with_items: "{{ kube_node_certs_needed|d([]) }}" when: inventory_hostname in groups['k8s-cluster'] @@ -104,5 +119,6 @@ issue_cert_path: "{{ item }}" issue_cert_role: kube issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" + issue_cert_mount_path: "{{ kube_vault_mount_path }}" with_items: "{{ kube_proxy_certs_needed|d([]) }}" when: inventory_hostname in groups['k8s-cluster'] diff --git a/roles/kubernetes/secrets/tasks/main.yml b/roles/kubernetes/secrets/tasks/main.yml index 5f55b775b..d2ce2283d 100644 --- a/roles/kubernetes/secrets/tasks/main.yml +++ b/roles/kubernetes/secrets/tasks/main.yml @@ -72,5 +72,8 @@ - include: "gen_certs_{{ cert_management }}.yml" tags: k8s-secrets +- include: upd_ca_trust.yml + tags: k8s-secrets + - include: gen_tokens.yml tags: k8s-secrets diff --git a/roles/kubernetes/secrets/tasks/upd_ca_trust.yml b/roles/kubernetes/secrets/tasks/upd_ca_trust.yml new file mode 100644 index 000000000..c980bb6aa --- /dev/null +++ b/roles/kubernetes/secrets/tasks/upd_ca_trust.yml @@ -0,0 +1,27 @@ +--- +- name: Gen_certs | target ca-certificates path + set_fact: + ca_cert_path: |- + {% if ansible_os_family == "Debian" -%} + /usr/local/share/ca-certificates/kube-ca.crt + {%- elif ansible_os_family == "RedHat" -%} + /etc/pki/ca-trust/source/anchors/kube-ca.crt + {%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%} + /etc/ssl/certs/kube-ca.pem + {%- endif %} + tags: facts + +- name: Gen_certs | add CA to trusted CA dir + copy: + src: "{{ kube_cert_dir }}/ca.pem" + dest: "{{ ca_cert_path }}" + remote_src: true + register: kube_ca_cert + +- name: Gen_certs | update ca-certificates (Debian/Ubuntu/Container Linux by CoreOS) + command: update-ca-certificates + when: kube_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS"] + +- name: Gen_certs | update ca-certificates (RedHat) + command: update-ca-trust extract + when: kube_ca_cert.changed and ansible_os_family == "RedHat" diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index e13065404..064201571 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -17,6 +17,7 @@ with_items: - kubelet - etcd + - vault register: services_removed tags: ['services'] @@ -86,10 +87,15 @@ - /run/flannel - /etc/flannel - /run/kubernetes - - /usr/local/share/ca-certificates/kube-ca.crt - /usr/local/share/ca-certificates/etcd-ca.crt - - /etc/ssl/certs/kube-ca.pem + - /usr/local/share/ca-certificates/kube-ca.crt + - /usr/local/share/ca-certificates/vault-ca.crt - /etc/ssl/certs/etcd-ca.pem + - /etc/ssl/certs/kube-ca.pem + - /etc/ssl/certs/vault-ca.crt + - /etc/pki/ca-trust/source/anchors/etcd-ca.crt + - /etc/pki/ca-trust/source/anchors/kube-ca.crt + - /etc/pki/ca-trust/source/anchors/vault-ca.crt - /etc/vault - /var/log/pods/ - "{{ bin_dir }}/kubelet" diff --git a/roles/vault/defaults/main.yml b/roles/vault/defaults/main.yml index 47bb39d44..eb2ffd122 100644 --- a/roles/vault/defaults/main.yml +++ b/roles/vault/defaults/main.yml @@ -8,10 +8,11 @@ vault_adduser_vars: system: yes vault_base_dir: /etc/vault # https://releases.hashicorp.com/vault/0.6.4/vault_0.6.4_SHA256SUMS +vault_version: 0.6.4 vault_binary_checksum: 04d87dd553aed59f3fe316222217a8d8777f40115a115dac4d88fac1611c51a6 vault_bootstrap: false vault_ca_options: - common_name: kube-cluster-ca + common_name: vault format: pem ttl: 87600h vault_cert_dir: "{{ vault_base_dir }}/ssl" @@ -24,7 +25,7 @@ vault_config: address: "{{ vault_etcd_url }}" ha_enabled: "true" redirect_addr: "https://{{ ansible_default_ipv4.address }}:{{ vault_port }}" - tls_ca_file: "{{ vault_cert_dir }}/ca.pem" + tls_ca_file: "{{ vault_etcd_cert_dir }}/ca.pem" cluster_name: "kubernetes-vault" default_lease_ttl: "{{ vault_default_lease_ttl }}" listener: @@ -61,18 +62,6 @@ vault_log_dir: "/var/log/vault" vault_max_lease_ttl: 87600h vault_needs_gen: false vault_port: 8200 -# Although "cert" is an option, ansible has no way to auth via cert until -# upstream merges: https://github.com/ansible/ansible/pull/18141 -vault_role_auth_method: userpass -vault_roles: - - name: etcd - group: etcd - policy_rules: default - role_options: default - - name: kube - group: k8s-cluster - policy_rules: default - role_options: default vault_roles_dir: "{{ vault_base_dir }}/roles" vault_secret_shares: 1 vault_secret_threshold: 1 @@ -88,4 +77,25 @@ vault_temp_config: tls_disable: "true" max_lease_ttl: "{{ vault_max_lease_ttl }}" vault_temp_container_name: vault-temp -vault_version: 0.6.4 +# etcd pki mount options +vault_etcd_cert_dir: /etc/ssl/etcd/ssl +vault_etcd_mount_path: etcd +vault_etcd_default_lease_ttl: 720h +vault_etcd_max_lease_ttl: 87600h +vault_etcd_role: + name: etcd + group: etcd + policy_rules: default + role_options: default + mount_path: "{{ vault_etcd_mount_path }}" +# kubernetes pki mount options +vault_kube_cert_dir: "{{ kube_cert_dir }}" +vault_kube_mount_path: kube +vault_kube_default_lease_ttl: 720h +vault_kube_max_lease_ttl: 87600h +vault_kube_role: + name: kube + group: k8s-cluster + policy_rules: default + role_options: default + mount_path: "{{ vault_kube_mount_path }}" diff --git a/roles/vault/tasks/bootstrap/ca_trust.yml b/roles/vault/tasks/bootstrap/ca_trust.yml index ae67f7405..010e6bbc6 100644 --- a/roles/vault/tasks/bootstrap/ca_trust.yml +++ b/roles/vault/tasks/bootstrap/ca_trust.yml @@ -10,11 +10,11 @@ set_fact: ca_cert_path: >- {% if ansible_os_family == "Debian" -%} - /usr/local/share/ca-certificates/kube-cluster-ca.crt + /usr/local/share/ca-certificates/vault-ca.crt {%- elif ansible_os_family == "RedHat" -%} - /etc/pki/ca-trust/source/anchors/kube-cluster-ca.crt + /etc/pki/ca-trust/source/anchors/vault-ca.crt {%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%} - /etc/ssl/certs/kube-cluster-ca.pem + /etc/ssl/certs/vault-ca.pem {%- endif %} - name: bootstrap/ca_trust | add CA to trusted CA dir diff --git a/roles/vault/tasks/bootstrap/create_etcd_role.yml b/roles/vault/tasks/bootstrap/create_etcd_role.yml index 5e0b88a39..74cd5fc2f 100644 --- a/roles/vault/tasks/bootstrap/create_etcd_role.yml +++ b/roles/vault/tasks/bootstrap/create_etcd_role.yml @@ -1,9 +1,17 @@ --- +- include: ../shared/auth_backend.yml + vars: + auth_backend_description: A Username/Password Auth Backend primarily used for services needing to issue certificates + auth_backend_path: userpass + auth_backend_type: userpass + delegate_to: "{{ groups.vault|first }}" + run_once: true + - include: ../shared/create_role.yml vars: - create_role_name: "{{ item.name }}" - create_role_group: "{{ item.group }}" - create_role_policy_rules: "{{ item.policy_rules }}" - create_role_options: "{{ item.role_options }}" - with_items: "{{ vault_roles }}" - when: item.name == "etcd" + create_role_name: "{{ vault_etcd_role.name }}" + create_role_group: "{{ vault_etcd_role.group }}" + create_role_policy_rules: "{{ vault_etcd_role.policy_rules }}" + create_role_options: "{{ vault_etcd_role.role_options }}" + create_role_mount_path: "{{ vault_etcd_role.mount_path }}" + when: inventory_hostname in groups.etcd diff --git a/roles/vault/tasks/bootstrap/gen_auth_ca.yml b/roles/vault/tasks/bootstrap/gen_auth_ca.yml deleted file mode 100644 index 10313ecea..000000000 --- a/roles/vault/tasks/bootstrap/gen_auth_ca.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- - -- name: bootstrap/gen_auth_ca | Generate Root CA - uri: - url: "{{ vault_leader_url }}/v1/auth-pki/root/generate/exported" - headers: "{{ vault_headers }}" - method: POST - body_format: json - body: "{{ vault_ca_options }}" - register: vault_auth_ca_gen - when: inventory_hostname == groups.vault|first - -- name: bootstrap/gen_auth_ca | Copy auth CA cert to Vault nodes - copy: - content: "{{ hostvars[groups.vault|first]['vault_auth_ca_gen']['json']['data']['certificate'] }}" - dest: "{{ vault_cert_dir }}/auth-ca.pem" - -- name: bootstrap/gen_auth_ca | Copy auth CA key to Vault nodes - copy: - content: "{{ hostvars[groups.vault|first]['vault_auth_ca_gen']['json']['data']['private_key'] }}" - dest: "{{ vault_cert_dir }}/auth-ca-key.pem" diff --git a/roles/vault/tasks/bootstrap/gen_ca.yml b/roles/vault/tasks/bootstrap/gen_ca.yml deleted file mode 100644 index ab1cb6345..000000000 --- a/roles/vault/tasks/bootstrap/gen_ca.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- - -- name: bootstrap/gen_ca | Ensure vault_cert_dir exists - file: - mode: 0755 - path: "{{ vault_cert_dir }}" - state: directory - -- name: bootstrap/gen_ca | Generate Root CA in vault-temp - uri: - url: "{{ vault_leader_url }}/v1/pki/root/generate/exported" - headers: "{{ vault_headers }}" - method: POST - body_format: json - body: "{{ vault_ca_options }}" - register: vault_ca_gen - when: inventory_hostname == groups.vault|first and vault_ca_cert_needed - -- name: bootstrap/gen_ca | Copy root CA cert locally - copy: - content: "{{ hostvars[groups.vault|first]['vault_ca_gen']['json']['data']['certificate'] }}" - dest: "{{ vault_cert_dir }}/ca.pem" - mode: 0644 - when: vault_ca_cert_needed - -- name: bootstrap/gen_ca | Copy root CA key locally - copy: - content: "{{ hostvars[groups.vault|first]['vault_ca_gen']['json']['data']['private_key'] }}" - dest: "{{ vault_cert_dir }}/ca-key.pem" - mode: 0640 - when: vault_ca_cert_needed diff --git a/roles/vault/tasks/bootstrap/gen_vault_certs.yml b/roles/vault/tasks/bootstrap/gen_vault_certs.yml index 4a7f4ed31..651c2ac49 100644 --- a/roles/vault/tasks/bootstrap/gen_vault_certs.yml +++ b/roles/vault/tasks/bootstrap/gen_vault_certs.yml @@ -2,7 +2,7 @@ - name: boostrap/gen_vault_certs | Add the vault role uri: - url: "{{ vault_leader_url }}/v1/pki/roles/vault" + url: "{{ vault_leader_url }}/v1/{{ vault_ca_options.common_name }}/roles/vault" headers: "{{ vault_headers }}" method: POST body_format: json @@ -21,6 +21,7 @@ {%- endfor -%} "127.0.0.1","::1" ] + issue_cert_mount_path: "{{ vault_ca_options.common_name }}" issue_cert_path: "{{ vault_cert_dir }}/api.pem" issue_cert_headers: "{{ hostvars[groups.vault|first]['vault_headers'] }}" issue_cert_role: vault diff --git a/roles/vault/tasks/bootstrap/main.yml b/roles/vault/tasks/bootstrap/main.yml index 83167ace7..768d9e03b 100644 --- a/roles/vault/tasks/bootstrap/main.yml +++ b/roles/vault/tasks/bootstrap/main.yml @@ -14,6 +14,9 @@ - include: sync_vault_certs.yml when: inventory_hostname in groups.vault +- include: sync_etcd_certs.yml + when: inventory_hostname in groups.etcd + ## Generate Certs # Start a temporary instance of Vault @@ -28,24 +31,22 @@ vault_leader_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" when: not vault_cluster_is_initialized -# NOTE: The next 2 steps run against temp Vault and long-term Vault - -# Ensure PKI mount exists -- include: ../shared/pki_mount.yml - when: >- - inventory_hostname == groups.vault|first - -# If the Root CA already exists, ensure Vault's PKI is using it -- include: ../shared/config_ca.yml +# Ensure vault PKI mounts exists +- include: ../shared/create_mount.yml vars: - ca_name: ca - mount_name: pki - when: >- - inventory_hostname == groups.vault|first and - not vault_ca_cert_needed + create_mount_path: "{{ vault_ca_options.common_name }}" + create_mount_default_lease_ttl: "{{ vault_default_lease_ttl }}" + create_mount_max_lease_ttl: "{{ vault_max_lease_ttl }}" + create_mount_description: "Vault Root CA" + create_mount_cert_dir: "{{ vault_cert_dir }}" + create_mount_config_ca_needed: "{{ not vault_ca_cert_needed }}" + when: inventory_hostname == groups.vault|first # Generate root CA certs for Vault if none exist -- include: gen_ca.yml +- include: ../shared/gen_ca.yml + vars: + gen_ca_cert_dir: "{{ vault_cert_dir }}" + gen_ca_mount_path: "{{ vault_ca_options.common_name }}" when: >- inventory_hostname in groups.vault and not vault_cluster_is_initialized and @@ -55,13 +56,25 @@ - include: gen_vault_certs.yml when: inventory_hostname in groups.vault and vault_api_cert_needed -# Update all host's CA bundle +# Ensure etcd PKI mounts exists +- include: ../shared/create_mount.yml + vars: + create_mount_path: "{{ vault_etcd_mount_path }}" + create_mount_default_lease_ttl: "{{ vault_etcd_default_lease_ttl }}" + create_mount_max_lease_ttl: "{{ vault_etcd_max_lease_ttl }}" + create_mount_description: "Etcd Root CA" + create_mount_cert_dir: "{{ vault_etcd_cert_dir }}" + create_mount_config_ca_needed: "{{ not vault_etcd_ca_cert_needed }}" + when: inventory_hostname == groups.vault|first + +# Generate root CA certs for etcd if none exist +- include: ../shared/gen_ca.yml + vars: + gen_ca_cert_dir: "{{ vault_etcd_cert_dir }}" + gen_ca_mount_path: "{{ vault_etcd_mount_path }}" + when: inventory_hostname in groups.etcd and vault_etcd_ca_cert_needed + +- include: create_etcd_role.yml + +# Update all host's CA bundle, etcd CA will be added in etcd role - include: ca_trust.yml - -## Add Etcd Role to Vault (if needed) - -- include: role_auth_cert.yml - when: vault_role_auth_method == "cert" - -- include: role_auth_userpass.yml - when: vault_role_auth_method == "userpass" diff --git a/roles/vault/tasks/bootstrap/role_auth_cert.yml b/roles/vault/tasks/bootstrap/role_auth_cert.yml deleted file mode 100644 index d92cd9d69..000000000 --- a/roles/vault/tasks/bootstrap/role_auth_cert.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- - -- include: ../shared/sync_auth_certs.yml - when: inventory_hostname in groups.vault - -- include: ../shared/cert_auth_mount.yml - when: inventory_hostname == groups.vault|first - -- include: ../shared/auth_backend.yml - vars: - auth_backend_description: A Cert-based Auth primarily for services needing to issue certificates - auth_backend_name: cert - auth_backend_type: cert - when: inventory_hostname == groups.vault|first - -- include: gen_auth_ca.yml - when: inventory_hostname in groups.vault and vault_auth_ca_cert_needed - -- include: ../shared/config_ca.yml - vars: - ca_name: auth-ca - mount_name: auth-pki - when: inventory_hostname == groups.vault|first and not vault_auth_ca_cert_needed - -- include: create_etcd_role.yml - when: inventory_hostname in groups.etcd diff --git a/roles/vault/tasks/bootstrap/role_auth_userpass.yml b/roles/vault/tasks/bootstrap/role_auth_userpass.yml deleted file mode 100644 index 2ad2fbc91..000000000 --- a/roles/vault/tasks/bootstrap/role_auth_userpass.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -- include: ../shared/auth_backend.yml - vars: - auth_backend_description: A Username/Password Auth Backend primarily used for services needing to issue certificates - auth_backend_path: userpass - auth_backend_type: userpass - when: inventory_hostname == groups.vault|first - -- include: create_etcd_role.yml - when: inventory_hostname in groups.etcd diff --git a/roles/vault/tasks/bootstrap/sync_etcd_certs.yml b/roles/vault/tasks/bootstrap/sync_etcd_certs.yml new file mode 100644 index 000000000..599b3cd47 --- /dev/null +++ b/roles/vault/tasks/bootstrap/sync_etcd_certs.yml @@ -0,0 +1,16 @@ +--- + +- include: ../shared/sync_file.yml + vars: + sync_file: "ca.pem" + sync_file_dir: "{{ vault_etcd_cert_dir }}" + sync_file_hosts: "{{ groups.etcd }}" + sync_file_is_cert: true + +- name: bootstrap/sync_etcd_certs | Set facts for etcd sync_file results + set_fact: + vault_etcd_ca_cert_needed: "{{ sync_file_results[0]['no_srcs'] }}" + +- name: bootstrap/sync_etcd_certs | Unset sync_file_results after ca.pem sync + set_fact: + sync_file_results: [] diff --git a/roles/vault/tasks/cluster/create_roles.yml b/roles/vault/tasks/cluster/create_roles.yml index a135137da..54aae815d 100644 --- a/roles/vault/tasks/cluster/create_roles.yml +++ b/roles/vault/tasks/cluster/create_roles.yml @@ -1,4 +1,10 @@ --- +- include: ../shared/auth_backend.yml + vars: + auth_backend_description: A Username/Password Auth Backend primarily used for services needing to issue certificates + auth_backend_path: userpass + auth_backend_type: userpass + when: inventory_hostname == groups.vault|first - include: ../shared/create_role.yml vars: @@ -6,4 +12,7 @@ create_role_group: "{{ item.group }}" create_role_policy_rules: "{{ item.policy_rules }}" create_role_options: "{{ item.role_options }}" - with_items: "{{ vault_roles|d([]) }}" + create_role_mount_path: "{{ item.mount_path }}" + with_items: + - "{{ vault_etcd_role }}" + - "{{ vault_kube_role }}" diff --git a/roles/vault/tasks/cluster/main.yml b/roles/vault/tasks/cluster/main.yml index c21fd0d73..7a5acba0a 100644 --- a/roles/vault/tasks/cluster/main.yml +++ b/roles/vault/tasks/cluster/main.yml @@ -25,19 +25,42 @@ - include: ../shared/find_leader.yml when: inventory_hostname in groups.vault -- include: ../shared/pki_mount.yml +- include: ../shared/create_mount.yml + vars: + create_mount_path: "{{ vault_ca_options.common_name }}" + create_mount_default_lease_ttl: "{{ vault_default_lease_ttl }}" + create_mount_max_lease_ttl: "{{ vault_max_lease_ttl }}" + create_mount_description: "Vault Root CA" + create_mount_cert_dir: "{{ vault_cert_dir }}" + create_mount_config_ca_needed: true when: inventory_hostname == groups.vault|first -- include: ../shared/config_ca.yml +- include: ../shared/create_mount.yml vars: - ca_name: ca - mount_name: pki + create_mount_path: "{{ vault_etcd_mount_path }}" + create_mount_default_lease_ttl: "{{ vault_etcd_default_lease_ttl }}" + create_mount_max_lease_ttl: "{{ vault_etcd_max_lease_ttl }}" + create_mount_description: "Etcd Root CA" + create_mount_cert_dir: "{{ vault_etcd_cert_dir }}" + create_mount_config_ca_needed: true when: inventory_hostname == groups.vault|first +- include: ../shared/create_mount.yml + vars: + create_mount_path: "{{ vault_kube_mount_path }}" + create_mount_default_lease_ttl: "{{ vault_kube_default_lease_ttl }}" + create_mount_max_lease_ttl: "{{ vault_kube_max_lease_ttl }}" + create_mount_description: "Kubernetes Root CA" + create_mount_cert_dir: "{{ vault_kube_cert_dir }}" + create_mount_config_ca_needed: false + when: inventory_hostname == groups.vault|first + +- include: ../shared/gen_ca.yml + vars: + gen_ca_cert_dir: "{{ vault_kube_cert_dir }}" + gen_ca_mount_path: "{{ vault_kube_mount_path }}" + when: inventory_hostname in groups.vault + ## Vault Policies, Roles, and Auth Backends -- include: role_auth_cert.yml - when: vault_role_auth_method == "cert" - -- include: role_auth_userpass.yml - when: vault_role_auth_method == "userpass" +- include: create_roles.yml diff --git a/roles/vault/tasks/cluster/role_auth_cert.yml b/roles/vault/tasks/cluster/role_auth_cert.yml deleted file mode 100644 index 9f186e3ff..000000000 --- a/roles/vault/tasks/cluster/role_auth_cert.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- - -- include: ../shared/cert_auth_mount.yml - when: inventory_hostname == groups.vault|first - -- include: ../shared/auth_backend.yml - vars: - auth_backend_description: A Cert-based Auth primarily for services needing to issue certificates - auth_backend_name: cert - auth_backend_type: cert - when: inventory_hostname == groups.vault|first - -- include: ../shared/config_ca.yml - vars: - ca_name: auth-ca - mount_name: auth-pki - when: inventory_hostname == groups.vault|first - -- include: create_roles.yml diff --git a/roles/vault/tasks/cluster/role_auth_userpass.yml b/roles/vault/tasks/cluster/role_auth_userpass.yml deleted file mode 100644 index ac3b2c6c1..000000000 --- a/roles/vault/tasks/cluster/role_auth_userpass.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- - -- include: ../shared/auth_backend.yml - vars: - auth_backend_description: A Username/Password Auth Backend primarily used for services needing to issue certificates - auth_backend_path: userpass - auth_backend_type: userpass - when: inventory_hostname == groups.vault|first - -- include: create_roles.yml diff --git a/roles/vault/tasks/shared/cert_auth_mount.yml b/roles/vault/tasks/shared/cert_auth_mount.yml index 9710aa7ca..6ba303d3b 100644 --- a/roles/vault/tasks/shared/cert_auth_mount.yml +++ b/roles/vault/tasks/shared/cert_auth_mount.yml @@ -1,14 +1,13 @@ --- -- include: ../shared/mount.yml +- include: ../shared/pki_mount.yml vars: - mount_name: auth-pki - mount_options: + pki_mount_path: auth-pki + pki_mount_options: description: PKI mount to generate certs for the Cert Auth Backend config: default_lease_ttl: "{{ vault_default_lease_ttl }}" max_lease_ttl: "{{ vault_max_lease_ttl }}" - type: pki - name: shared/auth_mount | Create a dummy role for issuing certs from auth-pki uri: diff --git a/roles/vault/tasks/shared/config_ca.yml b/roles/vault/tasks/shared/config_ca.yml index 79c972b4d..0ef34e7b8 100644 --- a/roles/vault/tasks/shared/config_ca.yml +++ b/roles/vault/tasks/shared/config_ca.yml @@ -1,12 +1,11 @@ --- - - name: config_ca | Read root CA cert for Vault - command: "cat /etc/vault/ssl/{{ ca_name }}.pem" + command: "cat {{ config_ca_ca_pem }}" register: vault_ca_cert_cat - name: config_ca | Pull current CA cert from Vault uri: - url: "{{ vault_leader_url }}/v1/{{ mount_name }}/ca/pem" + url: "{{ vault_leader_url }}/v1/{{ config_ca_mount_path }}/ca/pem" headers: "{{ vault_headers }}" return_content: true status_code: 200,204 @@ -14,13 +13,13 @@ register: vault_pull_current_ca - name: config_ca | Read root CA key for Vault - command: "cat /etc/vault/ssl/{{ ca_name }}-key.pem" + command: "cat {{ config_ca_ca_key }}" register: vault_ca_key_cat when: vault_ca_cert_cat.stdout.strip() != vault_pull_current_ca.content.strip() - name: config_ca | Configure pki mount to use the found root CA cert and key uri: - url: "{{ vault_leader_url }}/v1/{{ mount_name }}/config/ca" + url: "{{ vault_leader_url }}/v1/{{ config_ca_mount_path }}/config/ca" headers: "{{ vault_headers }}" method: POST body_format: json diff --git a/roles/vault/tasks/shared/create_mount.yml b/roles/vault/tasks/shared/create_mount.yml new file mode 100644 index 000000000..0b12dce24 --- /dev/null +++ b/roles/vault/tasks/shared/create_mount.yml @@ -0,0 +1,16 @@ +--- +- include: ../shared/pki_mount.yml + vars: + pki_mount_path: "{{ create_mount_path }}" + pki_mount_options: + config: + default_lease_ttl: "{{ create_mount_default_lease_ttl }}" + max_lease_ttl: "{{ create_mount_max_lease_ttl }}" + description: "{{ create_mount_description }}" + +- include: ../shared/config_ca.yml + vars: + config_ca_ca_pem: "{{ create_mount_cert_dir }}/ca.pem" + config_ca_ca_key: "{{ create_mount_cert_dir }}/ca-key.pem" + config_ca_mount_path: "{{ create_mount_path }}" + when: create_mount_config_ca_needed diff --git a/roles/vault/tasks/shared/create_role.yml b/roles/vault/tasks/shared/create_role.yml index c39fafe8c..fae45207a 100644 --- a/roles/vault/tasks/shared/create_role.yml +++ b/roles/vault/tasks/shared/create_role.yml @@ -12,8 +12,8 @@ {%- if create_role_policy_rules|d("default") == "default" -%} {{ { 'path': { - 'pki/issue/' + create_role_name: {'policy': 'write'}, - 'pki/roles/' + create_role_name: {'policy': 'read'} + create_role_mount_path + '/issue/' + create_role_name: {'policy': 'write'}, + create_role_mount_path + '/roles/' + create_role_name: {'policy': 'read'} }} | to_json + '\n' }} {%- else -%} @@ -22,9 +22,9 @@ status_code: 204 when: inventory_hostname == groups[create_role_group]|first -- name: create_role | Create the new role in the pki mount +- name: create_role | Create the new role in the {{ create_role_mount_path }} pki mount uri: - url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}/v1/pki/roles/{{ create_role_name }}" + url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}/v1/{{ create_role_mount_path }}/roles/{{ create_role_name }}" headers: "{{ hostvars[groups.vault|first]['vault_headers'] }}" method: POST body_format: json @@ -37,31 +37,6 @@ status_code: 204 when: inventory_hostname == groups[create_role_group]|first -## Cert based auth method - -- include: gen_cert.yml - vars: - gen_cert_copy_ca: true - gen_cert_hosts: "{{ groups[create_role_group] }}" - gen_cert_mount: "auth-pki" - gen_cert_path: "{{ vault_roles_dir }}/{{ create_role_name }}/issuer.pem" - gen_cert_vault_headers: "{{ hostvars[groups.vault|first]['vault_headers'] }}" - gen_cert_vault_role: "dummy" - gen_cert_vault_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" - when: vault_role_auth_method == "cert" and inventory_hostname in groups[create_role_group] - -- name: create_role | Insert the auth-pki CA as the authenticating CA for that role - uri: - url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}/v1/auth/cert/certs/{{ create_role_name }}" - headers: "{{ hostvars[groups.vault|first]['vault_headers'] }}" - method: POST - body_format: json - body: - certificate: "{{ hostvars[groups[create_role_group]|first]['gen_cert_result']['json']['data']['issuing_ca'] }}" - policies: "{{ create_role_name }}" - status_code: 204 - when: vault_role_auth_method == "cert" and inventory_hostname == groups[create_role_group]|first - ## Userpass based auth method - include: gen_userpass.yml @@ -71,4 +46,4 @@ gen_userpass_policies: "{{ create_role_name }}" gen_userpass_role: "{{ create_role_name }}" gen_userpass_username: "{{ create_role_name }}" - when: vault_role_auth_method == "userpass" and inventory_hostname in groups[create_role_group] + when: inventory_hostname in groups[create_role_group] diff --git a/roles/vault/tasks/shared/gen_ca.yml b/roles/vault/tasks/shared/gen_ca.yml new file mode 100644 index 000000000..b80ebeb6b --- /dev/null +++ b/roles/vault/tasks/shared/gen_ca.yml @@ -0,0 +1,29 @@ +--- +- name: "bootstrap/gen_ca | Ensure cert_dir {{ gen_ca_cert_dir }} exists" + file: + mode: 0755 + path: "{{ gen_ca_cert_dir }}" + state: directory + +- name: "bootstrap/gen_ca | Generate {{ gen_ca_mount_path }} root CA" + uri: + url: "{{ vault_leader_url }}/v1/{{ gen_ca_mount_path }}/root/generate/exported" + headers: "{{ vault_headers }}" + method: POST + body_format: json + body: "{{ vault_ca_options }}" + register: vault_ca_gen + delegate_to: "{{ groups.vault|first }}" + run_once: true + +- name: "bootstrap/gen_ca | Copy {{ gen_ca_mount_path }} root CA cert locally" + copy: + content: "{{ hostvars[groups.vault|first]['vault_ca_gen']['json']['data']['certificate'] }}" + dest: "{{ gen_ca_cert_dir }}/ca.pem" + mode: 0644 + +- name: "bootstrap/gen_ca | Copy {{ gen_ca_mount_path }} root CA key locally" + copy: + content: "{{ hostvars[groups.vault|first]['vault_ca_gen']['json']['data']['private_key'] }}" + dest: "{{ gen_ca_cert_dir }}/ca-key.pem" + mode: 0640 diff --git a/roles/vault/tasks/shared/issue_cert.yml b/roles/vault/tasks/shared/issue_cert.yml index 3b6b6d315..d3dbbd9e8 100644 --- a/roles/vault/tasks/shared/issue_cert.yml +++ b/roles/vault/tasks/shared/issue_cert.yml @@ -14,16 +14,11 @@ # issue_cert_headers: Headers passed into the issue request # issue_cert_hosts: List of hosts to distribute the cert to # issue_cert_ip_sans: Requested IP Subject Alternative Names, in a list -# issue_cert_mount: Mount point in Vault to make the request to +# issue_cert_mount_path: Mount point in Vault to make the request to # issue_cert_path: Full path to the cert, include its name # issue_cert_role: The Vault role to issue the cert with # issue_cert_url: Url to reach Vault, including protocol and port -- name: issue_cert | debug who issues certs - debug: - msg: "{{ issue_cert_hosts }} issues certs" - - - name: issue_cert | Ensure target directory exists file: path: "{{ issue_cert_path | dirname }}" @@ -34,7 +29,7 @@ - name: "issue_cert | Generate the cert for {{ issue_cert_role }}" uri: - url: "{{ issue_cert_url }}/v1/{{ issue_cert_mount|d('pki') }}/issue/{{ issue_cert_role }}" + url: "{{ issue_cert_url }}/v1/{{ issue_cert_mount_path|d('pki') }}/issue/{{ issue_cert_role }}" headers: "{{ issue_cert_headers }}" method: POST body_format: json @@ -45,11 +40,7 @@ ip_sans: "{{ issue_cert_ip_sans | default([]) | join(',') }}" register: issue_cert_result delegate_to: "{{ issue_cert_hosts|first }}" - -- name: issue_cert | results - debug: - msg: "{{ issue_cert_result }}" - + run_once: true - name: "issue_cert | Copy {{ issue_cert_path }} cert to all hosts" copy: diff --git a/roles/vault/tasks/shared/mount.yml b/roles/vault/tasks/shared/mount.yml deleted file mode 100644 index b98b45c57..000000000 --- a/roles/vault/tasks/shared/mount.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- - -- name: shared/mount | Test if PKI mount exists - uri: - url: "{{ vault_leader_url }}/v1/sys/mounts/{{ mount_name }}/tune" - headers: "{{ vault_headers }}" - ignore_errors: true - register: vault_pki_mount_check - -- name: shared/mount | Mount PKI mount if needed - uri: - url: "{{ vault_leader_url }}/v1/sys/mounts/{{ mount_name }}" - headers: "{{ vault_headers }}" - method: POST - body_format: json - body: "{{ mount_options|d() }}" - status_code: 204 - when: vault_pki_mount_check|failed diff --git a/roles/vault/tasks/shared/pki_mount.yml b/roles/vault/tasks/shared/pki_mount.yml index 31faef434..3df56e0f8 100644 --- a/roles/vault/tasks/shared/pki_mount.yml +++ b/roles/vault/tasks/shared/pki_mount.yml @@ -1,11 +1,27 @@ --- +- name: "shared/mount | Test if {{ pki_mount_path }} PKI mount exists" + uri: + url: "{{ vault_leader_url }}/v1/sys/mounts/{{ pki_mount_path }}/tune" + headers: "{{ vault_headers }}" + ignore_errors: true + register: vault_pki_mount_check -- include: mount.yml - vars: - mount_name: pki - mount_options: - config: - default_lease_ttl: "{{ vault_default_lease_ttl }}" - max_lease_ttl: "{{ vault_max_lease_ttl }}" - description: The default PKI mount for Kubernetes - type: pki +- name: shared/mount | Set pki mount type + set_fact: + mount_options: "{{ pki_mount_options | combine({'type': 'pki'}) }}" + when: vault_pki_mount_check|failed + +- name: shared/mount | Mount {{ pki_mount_path }} PKI mount if needed + uri: + url: "{{ vault_leader_url }}/v1/sys/mounts/{{ pki_mount_path }}" + headers: "{{ vault_headers }}" + method: POST + body_format: json + body: "{{ mount_options|d() }}" + status_code: 204 + when: vault_pki_mount_check|failed + +- name: shared/mount | Unset mount options + set_fact: + mount_options: {} + when: vault_pki_mount_check|failed diff --git a/roles/vault/templates/docker.service.j2 b/roles/vault/templates/docker.service.j2 index c355b7f01..f99035c77 100644 --- a/roles/vault/templates/docker.service.j2 +++ b/roles/vault/templates/docker.service.j2 @@ -21,6 +21,7 @@ ExecStart={{ docker_bin_dir }}/docker run \ --cap-add=IPC_LOCK \ -v {{ vault_cert_dir }}:{{ vault_cert_dir }} \ -v {{ vault_config_dir }}:{{ vault_config_dir }} \ +-v {{ vault_etcd_cert_dir }}:{{ vault_etcd_cert_dir }} \ -v {{ vault_log_dir }}:/vault/logs \ -v {{ vault_roles_dir }}:{{ vault_roles_dir }} \ -v {{ vault_secrets_dir }}:{{ vault_secrets_dir }} \ diff --git a/roles/vault/templates/rkt.service.j2 b/roles/vault/templates/rkt.service.j2 index 42b9458ac..b0e91dc0f 100644 --- a/roles/vault/templates/rkt.service.j2 +++ b/roles/vault/templates/rkt.service.j2 @@ -24,6 +24,8 @@ ExecStart=/usr/bin/rkt run \ --mount=volume=vault-secrets-dir,target={{ vault_secrets_dir }} \ --volume=vault-roles-dir,kind=host,source={{ vault_roles_dir }} \ --mount=volume=vault-roles-dir,target={{ vault_roles_dir }} \ +--volume=vault-etcd-cert-dir,kind=host,source={{ vault_etcd_cert_dir }} \ +--mount=volume=vault-etcd-cert-dir,target={{ vault_etcd_cert_dir }} \ docker://{{ vault_image_repo }}:{{ vault_image_tag }} \ --name={{ vault_container_name }} --net=host \ --caps-retain=CAP_IPC_LOCK \ From 576beaa6a6e46c0697b707c46d0866ccc8f1762f Mon Sep 17 00:00:00 2001 From: Oliver Moser Date: Wed, 30 Aug 2017 15:50:33 +0200 Subject: [PATCH 19/64] Include /opt/bin in PATH for host deployed kubelet on CoreOS (#1591) * Include /opt/bin in PATH for host deployed kubelet on CoreOS * Removing conditional check for CoreOS --- roles/kubernetes/node/templates/kubelet.j2 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/kubernetes/node/templates/kubelet.j2 b/roles/kubernetes/node/templates/kubelet.j2 index 14d7e0ddd..3240b5611 100644 --- a/roles/kubernetes/node/templates/kubelet.j2 +++ b/roles/kubernetes/node/templates/kubelet.j2 @@ -72,3 +72,5 @@ KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }}" {% else %} KUBELET_CLOUDPROVIDER="" {% endif %} + +PATH={{ bin_dir }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin From 7a98ad50b44341cd87b056c2d8d1ece2391b6074 Mon Sep 17 00:00:00 2001 From: Brad Beam Date: Wed, 30 Aug 2017 14:41:09 -0500 Subject: [PATCH 20/64] Fixing CA certificate locations for k8s components --- .../manifests/kube-apiserver.manifest.j2 | 18 ++++++++++++++---- .../kube-controller-manager.manifest.j2 | 18 ++++++++++++------ .../manifests/kube-scheduler.manifest.j2 | 18 ++++++++++++------ 3 files changed, 38 insertions(+), 16 deletions(-) diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index c19076db3..1032ba482 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -105,9 +105,14 @@ spec: - mountPath: {{ kube_config_dir }} name: kubernetes-config readOnly: true - - mountPath: /etc/ssl/certs + - mountPath: /etc/ssl name: ssl-certs-host readOnly: true +{% for dir in ssl_ca_dirs %} + - mountPath: {{ dir }} + name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }} + readOnly: true +{% endfor %} - mountPath: {{ etcd_cert_dir }} name: etcd-certs readOnly: true @@ -120,9 +125,14 @@ spec: - hostPath: path: {{ kube_config_dir }} name: kubernetes-config - - hostPath: - path: /etc/ssl/certs/ - name: ssl-certs-host + - name: ssl-certs-host + hostPath: + path: /etc/ssl +{% for dir in ssl_ca_dirs %} + - name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }} + hostPath: + path: {{ dir }} +{% endfor %} - hostPath: path: {{ etcd_cert_dir }} name: etcd-certs diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 index 44a1c253c..8d08dfeb6 100644 --- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 @@ -70,9 +70,14 @@ spec: initialDelaySeconds: 30 timeoutSeconds: 10 volumeMounts: - - mountPath: /etc/ssl/certs + - mountPath: /etc/ssl name: ssl-certs-host readOnly: true +{% for dir in ssl_ca_dirs %} + - mountPath: {{ dir }} + name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }} + readOnly: true +{% endfor %} - mountPath: "{{kube_config_dir}}/ssl" name: etc-kube-ssl readOnly: true @@ -87,11 +92,12 @@ spec: volumes: - name: ssl-certs-host hostPath: -{% if ansible_os_family == 'RedHat' %} - path: /etc/pki/tls -{% else %} - path: /usr/share/ca-certificates -{% endif %} + path: /etc/ssl +{% for dir in ssl_ca_dirs %} + - name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }} + hostPath: + path: {{ dir }} +{% endfor %} - name: etc-kube-ssl hostPath: path: "{{ kube_config_dir }}/ssl" diff --git a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 index 054239b67..e9422d4a1 100644 --- a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 @@ -45,9 +45,14 @@ spec: initialDelaySeconds: 30 timeoutSeconds: 10 volumeMounts: - - mountPath: /etc/ssl/certs + - mountPath: /etc/ssl name: ssl-certs-host readOnly: true +{% for dir in ssl_ca_dirs %} + - mountPath: {{ dir }} + name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }} + readOnly: true +{% endfor %} - mountPath: "{{ kube_config_dir }}/ssl" name: etc-kube-ssl readOnly: true @@ -57,11 +62,12 @@ spec: volumes: - name: ssl-certs-host hostPath: -{% if ansible_os_family == 'RedHat' %} - path: /etc/pki/tls -{% else %} - path: /usr/share/ca-certificates -{% endif %} + path: /etc/ssl +{% for dir in ssl_ca_dirs %} + - name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }} + hostPath: + path: {{ dir }} +{% endfor %} - name: etc-kube-ssl hostPath: path: "{{ kube_config_dir }}/ssl" From 93304e5f58212402c97ef9d5a88f13e306612dc0 Mon Sep 17 00:00:00 2001 From: Julian Poschmann Date: Thu, 31 Aug 2017 11:00:05 +0200 Subject: [PATCH 21/64] Fix calico leaving service behind. (#1599) --- roles/reset/tasks/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 064201571..26b6141dd 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -18,6 +18,7 @@ - kubelet - etcd - vault + - calico-node register: services_removed tags: ['services'] From 783924e671fcc57b4f1cfebd2ece9851d69089ec Mon Sep 17 00:00:00 2001 From: sgmitchell Date: Thu, 31 Aug 2017 11:23:24 -0400 Subject: [PATCH 22/64] Change backup handler to only run v2 data backup if snap directory exists (#1594) --- roles/etcd/handlers/backup.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/roles/etcd/handlers/backup.yml b/roles/etcd/handlers/backup.yml index 7ec42f4b6..9be90a5b1 100644 --- a/roles/etcd/handlers/backup.yml +++ b/roles/etcd/handlers/backup.yml @@ -5,6 +5,7 @@ - Refresh Time Fact - Set Backup Directory - Create Backup Directory + - Stat etcd v2 data directory - Backup etcd v2 data - Backup etcd v3 data when: etcd_cluster_is_healthy.rc == 0 @@ -24,7 +25,13 @@ group: root mode: 0600 +- name: Stat etcd v2 data directory + stat: + path: "{{ etcd_data_dir }}/member" + register: etcd_data_dir_member + - name: Backup etcd v2 data + when: etcd_data_dir_member.stat.exists command: >- {{ bin_dir }}/etcdctl backup --data-dir {{ etcd_data_dir }} From 8ae77e955e12bb2edc83f8b3433f3842ba364337 Mon Sep 17 00:00:00 2001 From: Brad Beam Date: Fri, 1 Sep 2017 01:02:23 -0500 Subject: [PATCH 23/64] Adding in certificate serial numbers to manifests (#1392) --- roles/etcd/tasks/main.yml | 5 +++ roles/etcd/tasks/pre_upgrade.yml | 2 +- .../manifests/kube-apiserver.manifest.j2 | 3 ++ .../kube-controller-manager.manifest.j2 | 3 ++ .../manifests/kube-scheduler.manifest.j2 | 2 ++ .../manifests/kube-proxy.manifest.j2 | 2 ++ roles/kubernetes/secrets/tasks/main.yml | 32 +++++++++++++++++++ roles/vault/tasks/shared/issue_cert.yml | 8 +++++ 8 files changed, 56 insertions(+), 1 deletion(-) diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index 6d8388ee8..a21016941 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -13,6 +13,11 @@ - include: upd_ca_trust.yml tags: etcd-secrets +- name: "Gen_certs | Get etcd certificate serials" + shell: "openssl x509 -in {{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem -noout -serial | cut -d= -f2" + register: "node-{{ inventory_hostname }}_serial" + when: inventory_hostname in groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique|sort + - include: "install_{{ etcd_deployment_type }}.yml" when: is_etcd_master tags: upgrade diff --git a/roles/etcd/tasks/pre_upgrade.yml b/roles/etcd/tasks/pre_upgrade.yml index e86a0d947..c08aee621 100644 --- a/roles/etcd/tasks/pre_upgrade.yml +++ b/roles/etcd/tasks/pre_upgrade.yml @@ -34,7 +34,7 @@ - name: "Pre-upgrade | remove etcd-proxy if it exists" command: "{{ docker_bin_dir }}/docker rm -f {{item}}" - with_items: "{{etcd_proxy_container.stdout_lines}}" + with_items: "{{etcd_proxy_container.stdout_lines|default()}}" - name: "Pre-upgrade | see if etcdctl is installed" stat: diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 1032ba482..f5dec5589 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -6,6 +6,9 @@ metadata: labels: k8s-app: kube-apiserver kubespray: v2 + annotations: + kubespray.etcd-cert/serial: "{{ etcd_node_cert_serial }}" + kubespray.apiserver-cert/serial: "{{ apiserver_cert_serial }}" spec: hostNetwork: true {% if kube_version | version_compare('v1.6', '>=') %} diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 index 8d08dfeb6..e0ef08fe4 100644 --- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 @@ -5,6 +5,9 @@ metadata: namespace: {{system_namespace}} labels: k8s-app: kube-controller + annotations: + kubespray.etcd-cert/serial: "{{ etcd_node_cert_serial }}" + kubespray.controller-manager-cert/serial: "{{ controller_manager_cert_serial }}" spec: hostNetwork: true {% if kube_version | version_compare('v1.6', '>=') %} diff --git a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 index e9422d4a1..6353ca102 100644 --- a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 @@ -5,6 +5,8 @@ metadata: namespace: {{ system_namespace }} labels: k8s-app: kube-scheduler + annotations: + kubespray.scheduler-cert/serial: "{{ scheduler_cert_serial }}" spec: hostNetwork: true {% if kube_version | version_compare('v1.6', '>=') %} diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 index 65feeee65..daf0fcb4f 100644 --- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 @@ -5,6 +5,8 @@ metadata: namespace: {{system_namespace}} labels: k8s-app: kube-proxy + annotations: + kubespray.kube-proxy-cert/serial: "{{ kube_proxy_cert_serial }}" spec: hostNetwork: true {% if kube_version | version_compare('v1.6', '>=') %} diff --git a/roles/kubernetes/secrets/tasks/main.yml b/roles/kubernetes/secrets/tasks/main.yml index d2ce2283d..2a15591df 100644 --- a/roles/kubernetes/secrets/tasks/main.yml +++ b/roles/kubernetes/secrets/tasks/main.yml @@ -75,5 +75,37 @@ - include: upd_ca_trust.yml tags: k8s-secrets +- name: "Gen_certs | Get certificate serials on kube masters" + shell: "openssl x509 -in {{ kube_cert_dir }}/{{ item }} -noout -serial | cut -d= -f2" + register: "master_certificate_serials" + with_items: + - "admin-{{ inventory_hostname }}.pem" + - "apiserver.pem" + - "kube-controller-manager.pem" + - "kube-scheduler.pem" + when: inventory_hostname in groups['kube-master'] + +- name: "Gen_certs | set kube master certificate serial facts" + set_fact: + etcd_admin_cert_serial: "{{ master_certificate_serials.results[0].stdout|default() }}" + apiserver_cert_serial: "{{ master_certificate_serials.results[1].stdout|default() }}" + controller_manager_cert_serial: "{{ master_certificate_serials.results[2].stdout|default() }}" + scheduler_cert_serial: "{{ master_certificate_serials.results[3].stdout|default() }}" + when: inventory_hostname in groups['kube-master'] + +- name: "Gen_certs | Get certificate serials on kube nodes" + shell: "openssl x509 -in {{ kube_cert_dir }}/{{ item }} -noout -serial | cut -d= -f2" + register: "node_certificate_serials" + with_items: + - "node-{{ inventory_hostname }}.pem" + - "kube-proxy-{{ inventory_hostname }}.pem" + when: inventory_hostname in groups['k8s-cluster'] + +- name: "Gen_certs | set kube node certificate serial facts" + set_fact: + etcd_node_cert_serial: "{{ node_certificate_serials.results[0].stdout|default() }}" + kube_proxy_cert_serial: "{{ node_certificate_serials.results[1].stdout|default() }}" + when: inventory_hostname in groups['k8s-cluster'] + - include: gen_tokens.yml tags: k8s-secrets diff --git a/roles/vault/tasks/shared/issue_cert.yml b/roles/vault/tasks/shared/issue_cert.yml index d3dbbd9e8..fa09bfd2b 100644 --- a/roles/vault/tasks/shared/issue_cert.yml +++ b/roles/vault/tasks/shared/issue_cert.yml @@ -66,3 +66,11 @@ mode: "{{ issue_cert_file_mode | d('0644') }}" owner: "{{ issue_cert_file_owner | d('root') }}" when: issue_cert_copy_ca|default(false) + +- name: issue_cert | Copy certificate serial to all hosts + copy: + content: "{{ hostvars[issue_cert_hosts|first]['issue_cert_result']['json']['data']['serial_number'] }}" + dest: "{{ issue_cert_path.rsplit('.', 1)|first }}.serial }}" + group: "{{ issue_cert_file_group | d('root' )}}" + mode: "{{ issue_cert_file_mode | d('0640') }}" + owner: "{{ issue_cert_file_owner | d('root') }}" From 702ce446dffae3a17c0a427398986eb38764f2da Mon Sep 17 00:00:00 2001 From: Dann Date: Sun, 3 Sep 2017 03:53:45 -0400 Subject: [PATCH 24/64] Apply ClusterRoleBinding to dnsmaq when rbac_enabled (#1592) * Add RBAC policies to dnsmasq * fix merge conflict * yamllint * use .j2 extension for dnsmasq autoscaler --- roles/dnsmasq/tasks/main.yml | 22 ++++++++++++++++++- ...toscaler.yml => dnsmasq-autoscaler.yml.j2} | 3 +++ .../templates/dnsmasq-clusterrolebinding.yml | 14 ++++++++++++ roles/dnsmasq/templates/dnsmasq-deploy.yml | 1 - .../templates/dnsmasq-serviceaccount.yml | 8 +++++++ 5 files changed, 46 insertions(+), 2 deletions(-) rename roles/dnsmasq/templates/{dnsmasq-autoscaler.yml => dnsmasq-autoscaler.yml.j2} (96%) create mode 100644 roles/dnsmasq/templates/dnsmasq-clusterrolebinding.yml create mode 100644 roles/dnsmasq/templates/dnsmasq-serviceaccount.yml diff --git a/roles/dnsmasq/tasks/main.yml b/roles/dnsmasq/tasks/main.yml index 56ec80d98..a06afbdce 100644 --- a/roles/dnsmasq/tasks/main.yml +++ b/roles/dnsmasq/tasks/main.yml @@ -56,6 +56,26 @@ dest: /etc/dnsmasq.d/01-kube-dns.conf state: link +- name: Create dnsmasq RBAC manifests + template: + src: "{{ item }}" + dest: "{{ kube_config_dir }}/{{ item }}" + with_items: + - "dnsmasq-clusterrolebinding.yml" + - "dnsmasq-serviceaccount.yml" + when: rbac_enabled + delegate_to: "{{ groups['kube-master'][0] }}" + run_once: true + +- name: Apply dnsmasq RBAC manifests + command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }}" + with_items: + - "dnsmasq-clusterrolebinding.yml" + - "dnsmasq-serviceaccount.yml" + when: rbac_enabled + delegate_to: "{{ groups['kube-master'][0] }}" + run_once: true + - name: Create dnsmasq manifests template: src: "{{item.file}}" @@ -63,7 +83,7 @@ with_items: - {name: dnsmasq, file: dnsmasq-deploy.yml, type: deployment} - {name: dnsmasq, file: dnsmasq-svc.yml, type: svc} - - {name: dnsmasq-autoscaler, file: dnsmasq-autoscaler.yml, type: deployment} + - {name: dnsmasq-autoscaler, file: dnsmasq-autoscaler.yml.j2, type: deployment} register: manifests delegate_to: "{{ groups['kube-master'][0] }}" run_once: true diff --git a/roles/dnsmasq/templates/dnsmasq-autoscaler.yml b/roles/dnsmasq/templates/dnsmasq-autoscaler.yml.j2 similarity index 96% rename from roles/dnsmasq/templates/dnsmasq-autoscaler.yml rename to roles/dnsmasq/templates/dnsmasq-autoscaler.yml.j2 index 85b357950..d9e7b10f3 100644 --- a/roles/dnsmasq/templates/dnsmasq-autoscaler.yml +++ b/roles/dnsmasq/templates/dnsmasq-autoscaler.yml.j2 @@ -31,6 +31,9 @@ spec: scheduler.alpha.kubernetes.io/critical-pod: '' scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' spec: +{% if rbac_enabled %} + serviceAccountName: dnsmasq +{% endif %} tolerations: - effect: NoSchedule operator: Exists diff --git a/roles/dnsmasq/templates/dnsmasq-clusterrolebinding.yml b/roles/dnsmasq/templates/dnsmasq-clusterrolebinding.yml new file mode 100644 index 000000000..817de877b --- /dev/null +++ b/roles/dnsmasq/templates/dnsmasq-clusterrolebinding.yml @@ -0,0 +1,14 @@ +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: dnsmasq + namespace: "{{ system_namespace }}" +subjects: + - kind: ServiceAccount + name: dnsmasq + namespace: "{{ system_namespace}}" +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io diff --git a/roles/dnsmasq/templates/dnsmasq-deploy.yml b/roles/dnsmasq/templates/dnsmasq-deploy.yml index 94b15206b..838471050 100644 --- a/roles/dnsmasq/templates/dnsmasq-deploy.yml +++ b/roles/dnsmasq/templates/dnsmasq-deploy.yml @@ -57,7 +57,6 @@ spec: mountPath: /etc/dnsmasq.d - name: etcdnsmasqdavailable mountPath: /etc/dnsmasq.d-available - volumes: - name: etcdnsmasqd hostPath: diff --git a/roles/dnsmasq/templates/dnsmasq-serviceaccount.yml b/roles/dnsmasq/templates/dnsmasq-serviceaccount.yml new file mode 100644 index 000000000..bce8a232f --- /dev/null +++ b/roles/dnsmasq/templates/dnsmasq-serviceaccount.yml @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: dnsmasq + namespace: "{{ system_namespace }}" + labels: + kubernetes.io/cluster-service: "true" From a3e6896a43195fa49141a4f00d1bdf0a70ce20f4 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Mon, 4 Sep 2017 11:29:40 +0300 Subject: [PATCH 25/64] Add RBAC support for canal (#1604) Refactored how rbac_enabled is set Added RBAC to ubuntu-canal-ha CI job Added rbac for calico policy controller --- .gitlab-ci.yml | 11 +-- .../netchecker-server-deployment.yml.j2 | 4 +- .../network_plugin/canal/tasks/main.yml | 25 ++---- .../calico/defaults/main.yml | 5 ++ .../policy_controller/calico/tasks/main.yml | 49 +++++++++--- .../templates/calico-policy-controller.yml.j2 | 9 ++- .../calico/templates/calico-policy-cr.yml.j2 | 17 ++++ .../calico/templates/calico-policy-crb.yml.j2 | 13 +++ .../calico/templates/calico-policy-sa.yml.j2 | 8 ++ roles/network_plugin/canal/defaults/main.yml | 5 ++ roles/network_plugin/canal/tasks/main.yml | 24 +++--- ...nal-config.yml.j2 => canal-config.yaml.j2} | 0 .../canal/templates/canal-cr-calico.yml.j2 | 80 +++++++++++++++++++ .../canal/templates/canal-cr-flannel.yml.j2 | 26 ++++++ .../canal/templates/canal-crb-calico.yml.j2 | 14 ++++ .../canal/templates/canal-crb-flannel.yml.j2 | 14 ++++ .../canal/templates/canal-node-sa.yml.j2 | 9 +++ .../{canal-node.yml.j2 => canal-node.yaml.j2} | 7 ++ 18 files changed, 274 insertions(+), 46 deletions(-) create mode 100644 roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-cr.yml.j2 create mode 100644 roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-crb.yml.j2 create mode 100644 roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-sa.yml.j2 rename roles/network_plugin/canal/templates/{canal-config.yml.j2 => canal-config.yaml.j2} (100%) create mode 100644 roles/network_plugin/canal/templates/canal-cr-calico.yml.j2 create mode 100644 roles/network_plugin/canal/templates/canal-cr-flannel.yml.j2 create mode 100644 roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 create mode 100644 roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 create mode 100644 roles/network_plugin/canal/templates/canal-node-sa.yml.j2 rename roles/network_plugin/canal/templates/{canal-node.yml.j2 => canal-node.yaml.j2} (97%) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6a456f9df..17851b19c 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -269,9 +269,10 @@ before_script: ##User-data to simply turn off coreos upgrades STARTUP_SCRIPT: 'systemctl disable locksmithd && systemctl stop locksmithd' -.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables +.ubuntu_canal_ha_rbac_variables: &ubuntu_canal_ha_rbac_variables # stage: deploy-gce-part1 KUBE_NETWORK_PLUGIN: canal + AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }" CLOUD_IMAGE: ubuntu-1604-xenial CLOUD_REGION: europe-west1-b CLUSTER_MODE: ha @@ -445,24 +446,24 @@ ubuntu-weave-sep-triggers: only: ['triggers'] # More builds for PRs/merges (manual) and triggers (auto) -ubuntu-canal-ha: +ubuntu-canal-ha-rbac: stage: deploy-gce-part1 <<: *job <<: *gce variables: <<: *gce_variables - <<: *ubuntu_canal_ha_variables + <<: *ubuntu_canal_ha_rbac_variables when: manual except: ['triggers'] only: ['master', /^pr-.*$/] -ubuntu-canal-ha-triggers: +ubuntu-canal-ha-rbac-triggers: stage: deploy-gce-part1 <<: *job <<: *gce variables: <<: *gce_variables - <<: *ubuntu_canal_ha_variables + <<: *ubuntu_canal_ha_rbac_variables when: on_success only: ['triggers'] diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 index c3dbf3cb5..6e2738e6f 100644 --- a/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/netchecker-server-deployment.yml.j2 @@ -25,12 +25,14 @@ spec: memory: {{ netchecker_server_memory_requests }} ports: - containerPort: 8081 - hostPort: 8081 args: - "-v=5" - "-logtostderr" - "-kubeproxyinit" - "-endpoint=0.0.0.0:8081" + tolerations: + - effect: NoSchedule + operator: Exists {% if rbac_enabled %} serviceAccountName: netchecker-server {% endif %} diff --git a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml index 72956dac9..6f3bb4d85 100644 --- a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml @@ -1,20 +1,11 @@ --- -- name: Create canal ConfigMap - run_once: true +- name: Canal | Start Resources kube: - name: "canal-config" + name: "{{item.item.name}}" + namespace: "{{ system_namespace }}" kubectl: "{{bin_dir}}/kubectl" - filename: "{{kube_config_dir}}/canal-config.yaml" - resource: "configmap" - namespace: "{{system_namespace}}" - -- name: Start flannel and calico-node - run_once: true - kube: - name: "canal-node" - kubectl: "{{bin_dir}}/kubectl" - filename: "{{kube_config_dir}}/canal-node.yaml" - resource: "ds" - namespace: "{{system_namespace}}" - state: "{{ item | ternary('latest','present') }}" - with_items: "{{ canal_node_manifest.changed }}" + resource: "{{item.item.type}}" + filename: "{{kube_config_dir}}/{{item.item.file}}" + state: "{{item.changed | ternary('latest','present') }}" + with_items: "{{ canal_manifests.results }}" + when: inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-apps/policy_controller/calico/defaults/main.yml b/roles/kubernetes-apps/policy_controller/calico/defaults/main.yml index 93d12c901..0e66359cc 100644 --- a/roles/kubernetes-apps/policy_controller/calico/defaults/main.yml +++ b/roles/kubernetes-apps/policy_controller/calico/defaults/main.yml @@ -8,3 +8,8 @@ calico_policy_controller_memory_requests: 64M # SSL calico_cert_dir: "/etc/calico/certs" canal_cert_dir: "/etc/canal/certs" + +rbac_resources: + - sa + - clusterrole + - clusterrolebinding diff --git a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml index de102f31d..79bb535b7 100644 --- a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml +++ b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml @@ -1,22 +1,49 @@ --- -- set_fact: +- name: Set cert dir + set_fact: calico_cert_dir: "{{ canal_cert_dir }}" when: kube_network_plugin == 'canal' tags: [facts, canal] -- name: Write calico-policy-controller yaml +- name: Get calico-policy-controller version if running + shell: "{{ bin_dir }}/kubectl -n {{ system_namespace }} get rs calico-policy-controller -o=jsonpath='{$.spec.template.spec.containers[:1].image}' | cut -d':' -f2" + register: existing_calico_policy_version + run_once: true + failed_when: false + +# FIXME(mattymo): This should not be necessary +- name: Delete calico-policy-controller if an old one is installed + kube: + name: calico-policy-controller + kubectl: "{{bin_dir}}/kubectl" + resource: rs + namespace: "{{ system_namespace }}" + state: absent + run_once: true + when: + - not "NotFound" in existing_calico_policy_version.stderr + - existing_calico_policy_version.stdout | version_compare('v0.7.0', '<') + +- name: Create calico-policy-controller manifests template: - src: calico-policy-controller.yml.j2 - dest: "{{kube_config_dir}}/calico-policy-controller.yml" - when: inventory_hostname == groups['kube-master'][0] - tags: canal + src: "{{item.file}}.j2" + dest: "{{kube_config_dir}}/{{item.file}}" + with_items: + - {name: calico-policy-controller, file: calico-policy-controller.yml, type: rs} + - {name: calico-policy-controller, file: calico-policy-sa.yml, type: sa} + - {name: calico-policy-controller, file: calico-policy-cr.yml, type: clusterrole} + - {name: calico-policy-controller, file: calico-policy-crb.yml, type: clusterrolebinding} + register: calico_policy_manifests + when: + - rbac_enabled or item.type not in rbac_resources - name: Start of Calico policy controller kube: - name: "calico-policy-controller" + name: "{{item.item.name}}" + namespace: "{{ system_namespace }}" kubectl: "{{bin_dir}}/kubectl" - filename: "{{kube_config_dir}}/calico-policy-controller.yml" - namespace: "{{system_namespace}}" - resource: "rs" + resource: "{{item.item.type}}" + filename: "{{kube_config_dir}}/{{item.item.file}}" + state: "{{item.changed | ternary('latest','present') }}" + with_items: "{{ calico_policy_manifests.results }}" when: inventory_hostname == groups['kube-master'][0] - tags: canal diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller.yml.j2 index 4722cbc53..ca1711463 100644 --- a/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller.yml.j2 +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-controller.yml.j2 @@ -15,15 +15,18 @@ spec: template: metadata: name: calico-policy-controller - namespace: {{system_namespace}} + namespace: {{ system_namespace }} labels: kubernetes.io/cluster-service: "true" k8s-app: calico-policy spec: hostNetwork: true +{% if rbac_enabled %} + serviceAccountName: calico-policy-controller +{% endif %} tolerations: - - effect: NoSchedule - operator: Exists + - effect: NoSchedule + operator: Exists containers: - name: calico-policy-controller image: {{ calico_policy_image_repo }}:{{ calico_policy_image_tag }} diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-cr.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-cr.yml.j2 new file mode 100644 index 000000000..aac341ca6 --- /dev/null +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-cr.yml.j2 @@ -0,0 +1,17 @@ +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-policy-controller + namespace: {{ system_namespace }} +rules: + - apiGroups: + - "" + - extensions + resources: + - pods + - namespaces + - networkpolicies + verbs: + - watch + - list diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-crb.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-crb.yml.j2 new file mode 100644 index 000000000..d5c192018 --- /dev/null +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-crb.yml.j2 @@ -0,0 +1,13 @@ +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-policy-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-policy-controller +subjects: +- kind: ServiceAccount + name: calico-policy-controller + namespace: {{ system_namespace }} diff --git a/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-sa.yml.j2 b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-sa.yml.j2 new file mode 100644 index 000000000..c6bc07fbb --- /dev/null +++ b/roles/kubernetes-apps/policy_controller/calico/templates/calico-policy-sa.yml.j2 @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-policy-controller + namespace: {{ system_namespace }} + labels: + kubernetes.io/cluster-service: "true" diff --git a/roles/network_plugin/canal/defaults/main.yml b/roles/network_plugin/canal/defaults/main.yml index 38696b87a..bf74653c7 100644 --- a/roles/network_plugin/canal/defaults/main.yml +++ b/roles/network_plugin/canal/defaults/main.yml @@ -31,3 +31,8 @@ calicoctl_memory_limit: 170M calicoctl_cpu_limit: 100m calicoctl_memory_requests: 32M calicoctl_cpu_requests: 25m + +rbac_resources: + - sa + - clusterrole + - clusterrolebinding diff --git a/roles/network_plugin/canal/tasks/main.yml b/roles/network_plugin/canal/tasks/main.yml index ea67e20cd..2cc1a8ffe 100644 --- a/roles/network_plugin/canal/tasks/main.yml +++ b/roles/network_plugin/canal/tasks/main.yml @@ -32,16 +32,22 @@ delegate_to: "{{groups['etcd'][0]}}" run_once: true -- name: Canal | Write canal configmap +- name: Canal | Create canal node manifests template: - src: canal-config.yml.j2 - dest: "{{kube_config_dir}}/canal-config.yaml" - -- name: Canal | Write canal node configuration - template: - src: canal-node.yml.j2 - dest: "{{kube_config_dir}}/canal-node.yaml" - register: canal_node_manifest + src: "{{item.file}}.j2" + dest: "{{kube_config_dir}}/{{item.file}}" + with_items: + - {name: canal-config, file: canal-config.yaml, type: cm} + - {name: canal-node, file: canal-node.yaml, type: ds} + - {name: canal, file: canal-node-sa.yml, type: sa} + - {name: calico, file: canal-cr-calico.yml, type: clusterrole} + - {name: flannel, file: canal-cr-flannel.yml, type: clusterrole} + - {name: canal-calico, file: canal-crb-calico.yml, type: clusterrolebinding} + - {name: canal-flannel, file: canal-crb-flannel.yml, type: clusterrolebinding} + register: canal_manifests + when: + - inventory_hostname in groups['kube-master'] + - rbac_enabled or item.type not in rbac_resources - name: Canal | Copy cni plugins from hyperkube command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /usr/bin/rsync -ac /opt/cni/bin/ /cnibindir/" diff --git a/roles/network_plugin/canal/templates/canal-config.yml.j2 b/roles/network_plugin/canal/templates/canal-config.yaml.j2 similarity index 100% rename from roles/network_plugin/canal/templates/canal-config.yml.j2 rename to roles/network_plugin/canal/templates/canal-config.yaml.j2 diff --git a/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2 b/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2 new file mode 100644 index 000000000..e3b048c64 --- /dev/null +++ b/roles/network_plugin/canal/templates/canal-cr-calico.yml.j2 @@ -0,0 +1,80 @@ +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico + namespace: {{ system_namespace }} +rules: + - apiGroups: [""] + resources: + - namespaces + verbs: + - get + - list + - watch + - apiGroups: [""] + resources: + - pods/status + verbs: + - update + - apiGroups: [""] + resources: + - pods + verbs: + - get + - list + - watch + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - update + - watch + - apiGroups: ["extensions"] + resources: + - thirdpartyresources + verbs: + - create + - get + - list + - watch + - apiGroups: ["extensions"] + resources: + - networkpolicies + verbs: + - get + - list + - watch + - apiGroups: ["projectcalico.org"] + resources: + - globalbgppeers + verbs: + - get + - list + - apiGroups: ["projectcalico.org"] + resources: + - globalconfigs + - globalbgpconfigs + verbs: + - create + - get + - list + - update + - watch + - apiGroups: ["projectcalico.org"] + resources: + - ippools + verbs: + - create + - get + - list + - update + - watch + - apiGroups: ["alpha.projectcalico.org"] + resources: + - systemnetworkpolicies + verbs: + - get + - list diff --git a/roles/network_plugin/canal/templates/canal-cr-flannel.yml.j2 b/roles/network_plugin/canal/templates/canal-cr-flannel.yml.j2 new file mode 100644 index 000000000..0be8e938c --- /dev/null +++ b/roles/network_plugin/canal/templates/canal-cr-flannel.yml.j2 @@ -0,0 +1,26 @@ +--- +# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: flannel +rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch diff --git a/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 b/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 new file mode 100644 index 000000000..e1c1f5050 --- /dev/null +++ b/roles/network_plugin/canal/templates/canal-crb-calico.yml.j2 @@ -0,0 +1,14 @@ +--- +# Bind the calico ClusterRole to the canal ServiceAccount. +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: canal-calico +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico +subjects: +- kind: ServiceAccount + name: canal + namespace: {{ system_namespace }} diff --git a/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 b/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 new file mode 100644 index 000000000..3b00017b1 --- /dev/null +++ b/roles/network_plugin/canal/templates/canal-crb-flannel.yml.j2 @@ -0,0 +1,14 @@ +--- +# Bind the flannel ClusterRole to the canal ServiceAccount. +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: canal-flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: canal + namespace: {{ system_namespace }} diff --git a/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 b/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 new file mode 100644 index 000000000..d5b9a6e97 --- /dev/null +++ b/roles/network_plugin/canal/templates/canal-node-sa.yml.j2 @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: canal + namespace: {{ system_namespace }} + labels: + kubernetes.io/cluster-service: "true" + diff --git a/roles/network_plugin/canal/templates/canal-node.yml.j2 b/roles/network_plugin/canal/templates/canal-node.yaml.j2 similarity index 97% rename from roles/network_plugin/canal/templates/canal-node.yml.j2 rename to roles/network_plugin/canal/templates/canal-node.yaml.j2 index cd9312832..972b02d5f 100644 --- a/roles/network_plugin/canal/templates/canal-node.yml.j2 +++ b/roles/network_plugin/canal/templates/canal-node.yaml.j2 @@ -19,6 +19,9 @@ spec: k8s-app: canal-node spec: hostNetwork: true +{% if rbac_enabled %} + serviceAccountName: canal +{% endif %} tolerations: - effect: NoSchedule operator: Exists @@ -169,6 +172,10 @@ spec: configMapKeyRef: name: canal-config key: etcd_keyfile + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName securityContext: privileged: true volumeMounts: From 77602dbb930335e9d323bf2fd3407170ce1071c7 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Mon, 4 Sep 2017 11:29:51 +0300 Subject: [PATCH 26/64] Move calico to daemonset (#1605) * Drop legacy calico logic * add calico as a daemonset --- .gitlab-ci.yml | 1 + .../network_plugin/calico/tasks/main.yml | 10 ++ .../network_plugin/meta/main.yml | 3 + .../node/templates/kubelet.docker.service.j2 | 5 - .../node/templates/kubelet.host.service.j2 | 5 - .../node/templates/kubelet.rkt.service.j2 | 5 - roles/network_plugin/calico/defaults/main.yml | 5 + roles/network_plugin/calico/tasks/main.yml | 127 +++----------- .../calico/templates/calico-config.yml.j2 | 19 ++ .../calico/templates/calico-cr.yml.j2 | 13 ++ .../calico/templates/calico-crb.yml.j2 | 13 ++ .../calico/templates/calico-node-sa.yml.j2 | 8 + .../calico/templates/calico-node.service.j2 | 41 ----- .../templates/calico-node.service.legacy.j2 | 19 -- .../calico/templates/calico-node.yml.j2 | 166 ++++++++++++++++++ .../calico/templates/calico.env.j2 | 20 --- .../calico/templates/cni-calico.conf.j2 | 2 - 17 files changed, 262 insertions(+), 200 deletions(-) create mode 100644 roles/kubernetes-apps/network_plugin/calico/tasks/main.yml create mode 100644 roles/network_plugin/calico/templates/calico-config.yml.j2 create mode 100644 roles/network_plugin/calico/templates/calico-cr.yml.j2 create mode 100644 roles/network_plugin/calico/templates/calico-crb.yml.j2 create mode 100644 roles/network_plugin/calico/templates/calico-node-sa.yml.j2 delete mode 100644 roles/network_plugin/calico/templates/calico-node.service.j2 delete mode 100644 roles/network_plugin/calico/templates/calico-node.service.legacy.j2 create mode 100644 roles/network_plugin/calico/templates/calico-node.yml.j2 delete mode 100644 roles/network_plugin/calico/templates/calico.env.j2 diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 17851b19c..864cadde5 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -259,6 +259,7 @@ before_script: # Test matrix. Leave the comments for markup scripts. .coreos_calico_sep_variables: &coreos_calico_sep_variables # stage: deploy-gce-part1 + AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }" KUBE_NETWORK_PLUGIN: calico CLOUD_IMAGE: coreos-stable-1465-6-0-v20170817 CLOUD_REGION: us-west1-b diff --git a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml new file mode 100644 index 000000000..5061c5c98 --- /dev/null +++ b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- name: Start Calico resources + kube: + name: "{{item.item.name}}" + namespace: "{{ system_namespace }}" + kubectl: "{{bin_dir}}/kubectl" + resource: "{{item.item.type}}" + filename: "{{kube_config_dir}}/{{item.item.file}}" + state: "{{item.changed | ternary('latest','present') }}" + with_items: "{{ calico_node_manifests.results }}" diff --git a/roles/kubernetes-apps/network_plugin/meta/main.yml b/roles/kubernetes-apps/network_plugin/meta/main.yml index 18c786c1d..4df295ea4 100644 --- a/roles/kubernetes-apps/network_plugin/meta/main.yml +++ b/roles/kubernetes-apps/network_plugin/meta/main.yml @@ -1,5 +1,8 @@ --- dependencies: + - role: kubernetes-apps/network_plugin/calico + when: kube_network_plugin == 'calico' + tags: calico - role: kubernetes-apps/network_plugin/canal when: kube_network_plugin == 'canal' tags: canal diff --git a/roles/kubernetes/node/templates/kubelet.docker.service.j2 b/roles/kubernetes/node/templates/kubelet.docker.service.j2 index cf79f6fa4..16d8a63d1 100644 --- a/roles/kubernetes/node/templates/kubelet.docker.service.j2 +++ b/roles/kubernetes/node/templates/kubelet.docker.service.j2 @@ -1,13 +1,8 @@ [Unit] Description=Kubernetes Kubelet Server Documentation=https://github.com/GoogleCloudPlatform/kubernetes -{% if kube_network_plugin is defined and kube_network_plugin == "calico" %} -After=docker.service docker.socket calico-node.service -Wants=docker.socket calico-node.service -{% else %} After=docker.service Wants=docker.socket -{% endif %} [Service] EnvironmentFile={{kube_config_dir}}/kubelet.env diff --git a/roles/kubernetes/node/templates/kubelet.host.service.j2 b/roles/kubernetes/node/templates/kubelet.host.service.j2 index 71a9da8c3..ec5e3d524 100644 --- a/roles/kubernetes/node/templates/kubelet.host.service.j2 +++ b/roles/kubernetes/node/templates/kubelet.host.service.j2 @@ -1,13 +1,8 @@ [Unit] Description=Kubernetes Kubelet Server Documentation=https://github.com/GoogleCloudPlatform/kubernetes -{% if kube_network_plugin is defined and kube_network_plugin == "calico" %} -After=docker.service docker.socket calico-node.service -Wants=docker.socket calico-node.service -{% else %} After=docker.service Wants=docker.socket -{% endif %} [Service] EnvironmentFile={{kube_config_dir}}/kubelet.env diff --git a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 b/roles/kubernetes/node/templates/kubelet.rkt.service.j2 index 592d70c2b..522f58d8c 100644 --- a/roles/kubernetes/node/templates/kubelet.rkt.service.j2 +++ b/roles/kubernetes/node/templates/kubelet.rkt.service.j2 @@ -1,12 +1,7 @@ [Unit] Description=Kubernetes Kubelet Server Documentation=https://github.com/GoogleCloudPlatform/kubernetes -{% if kube_network_plugin is defined and kube_network_plugin == "calico" %} -After=calico-node.service -Wants=network.target calico-node.service -{% else %} Wants=network.target -{% endif %} [Service] Restart=on-failure diff --git a/roles/network_plugin/calico/defaults/main.yml b/roles/network_plugin/calico/defaults/main.yml index e09ab3e1e..148a28082 100644 --- a/roles/network_plugin/calico/defaults/main.yml +++ b/roles/network_plugin/calico/defaults/main.yml @@ -34,3 +34,8 @@ calicoctl_cpu_requests: 50m # Should calico ignore kernel's RPF check setting, # see https://github.com/projectcalico/felix/blob/ab8799eaea66627e5db7717e62fca61fd9c08646/python/calico/felix/config.py#L198 calico_node_ignorelooserpf: false + +rbac_resources: + - sa + - clusterrole + - clusterrolebinding diff --git a/roles/network_plugin/calico/tasks/main.yml b/roles/network_plugin/calico/tasks/main.yml index 38d3ad5db..7ea77d053 100644 --- a/roles/network_plugin/calico/tasks/main.yml +++ b/roles/network_plugin/calico/tasks/main.yml @@ -1,9 +1,10 @@ --- -- name: Calico | Check calicoctl version - run_once: true - set_fact: - legacy_calicoctl: "{{ calicoctl_image_tag | version_compare('v1.0.0', '<') }}" - tags: facts +- name: Calico | Disable calico-node service if it exists + service: + name: calico-node + state: stopped + enabled: yes + failed_when: false - name: Calico | Write Calico cni config template: @@ -38,7 +39,6 @@ owner: root group: root changed_when: false - notify: restart calico-node - name: Calico | Copy cni plugins from hyperkube command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /usr/bin/rsync -ac /opt/cni/bin/ /cnibindir/" @@ -103,38 +103,7 @@ environment: NO_DEFAULT_POOLS: true run_once: true - when: not legacy_calicoctl and - ("Key not found" in calico_conf.stdout or "nodes" not in calico_conf.stdout) - -- name: Calico (old) | Define ipip pool argument - run_once: true - set_fact: - ipip_arg: "--ipip" - when: (legacy_calicoctl and ipip ) - tags: facts - -- name: Calico (old) | Define nat-outgoing pool argument - run_once: true - set_fact: - nat_arg: "--nat-outgoing" - when: (legacy_calicoctl and - nat_outgoing|default(false) and not peer_with_router|default(false)) - tags: facts - -- name: Calico (old) | Define calico pool task name - run_once: true - set_fact: - pool_task_name: "with options {{ ipip_arg|default('') }} {{ nat_arg|default('') }}" - when: (legacy_calicoctl and ipip_arg|default(false) or nat_arg|default(false)) - tags: facts - -- name: Calico (old) | Configure calico network pool {{ pool_task_name|default('') }} - command: "{{ bin_dir}}/calicoctl pool add {{ kube_pods_subnet }} {{ ipip_arg|default('') }} {{ nat_arg|default('') }}" - environment: - NO_DEFAULT_POOLS: true - run_once: true - when: legacy_calicoctl and - ("Key not found" in calico_conf.stdout or "nodes" not in calico_conf.stdout) + when: ("Key not found" in calico_conf.stdout or "nodes" not in calico_conf.stdout) - name: Calico | Get calico configuration from etcd command: |- @@ -162,52 +131,11 @@ - name: Calico | Set global as_num command: "{{ bin_dir}}/calicoctl config set asNumber {{ global_as_num }}" run_once: true - when: not legacy_calicoctl - -- name: Calico (old) | Set global as_num - command: "{{ bin_dir}}/calicoctl bgp default-node-as {{ global_as_num }}" - run_once: true - when: legacy_calicoctl - -- name: Calico (old) | Write calico-node systemd init file - template: - src: calico-node.service.legacy.j2 - dest: /etc/systemd/system/calico-node.service - when: legacy_calicoctl - notify: restart calico-node - -- name: Calico | Write calico.env for systemd init file - template: - src: calico.env.j2 - dest: /etc/calico/calico.env - when: not legacy_calicoctl - notify: restart calico-node - -- name: Calico | Write calico-node systemd init file - template: - src: calico-node.service.j2 - dest: /etc/systemd/system/calico-node.service - when: not legacy_calicoctl - notify: restart calico-node - -- name: Calico | Restart calico-node if secrets changed - command: /bin/true - when: secret_changed|default(false) or etcd_secret_changed|default(false) - notify: restart calico-node - -- meta: flush_handlers - -- name: Calico | Enable calico-node - service: - name: calico-node - state: started - enabled: yes - name: Calico | Disable node mesh shell: "{{ bin_dir }}/calicoctl config set nodeToNodeMesh off" when: ((peer_with_router|default(false) or peer_with_calico_rr|default(false)) - and inventory_hostname in groups['k8s-cluster'] - and not legacy_calicoctl) + and inventory_hostname in groups['k8s-cluster']) run_once: true - name: Calico | Configure peering with router(s) @@ -220,8 +148,7 @@ }' | {{ bin_dir }}/calicoctl create --skip-exists -f - with_items: "{{ peers|default([]) }}" - when: (not legacy_calicoctl and - peer_with_router|default(false) and inventory_hostname in groups['k8s-cluster']) + when: peer_with_router|default(false) and inventory_hostname in groups['k8s-cluster'] - name: Calico | Configure peering with route reflectors shell: > @@ -235,26 +162,20 @@ }' | {{ bin_dir }}/calicoctl create --skip-exists -f - with_items: "{{ groups['calico-rr'] | default([]) }}" - when: (not legacy_calicoctl and - peer_with_calico_rr|default(false) and inventory_hostname in groups['k8s-cluster'] + when: (peer_with_calico_rr|default(false) and inventory_hostname in groups['k8s-cluster'] and hostvars[item]['cluster_id'] == cluster_id) -- name: Calico (old) | Disable node mesh - shell: "{{ bin_dir }}/calicoctl bgp node-mesh off" - when: ((peer_with_router|default(false) or peer_with_calico_rr|default(false)) - and inventory_hostname in groups['k8s-cluster'] - and legacy_calicoctl) - run_once: true - -- name: Calico (old) | Configure peering with router(s) - shell: "{{ bin_dir }}/calicoctl node bgp peer add {{ item.router_id }} as {{ item.as }}" - with_items: "{{ peers|default([]) }}" - when: (legacy_calicoctl and - peer_with_router|default(false) and inventory_hostname in groups['k8s-cluster']) - -- name: Calico (old) | Configure peering with route reflectors - shell: "{{ bin_dir }}/calicoctl node bgp peer add {{ hostvars[item]['calico_rr_ip']|default(hostvars[item]['ip'])|default(hostvars[item]['ansible_default_ipv4.address']) }} as {{ local_as | default(global_as_num) }}" - with_items: "{{ groups['calico-rr'] | default([]) }}" - when: (legacy_calicoctl and - peer_with_calico_rr|default(false) and inventory_hostname in groups['k8s-cluster'] - and hostvars[item]['cluster_id'] == cluster_id) +- name: Calico | Create calico manifests + template: + src: "{{item.file}}.j2" + dest: "{{kube_config_dir}}/{{item.file}}" + with_items: + - {name: calico-config, file: calico-config.yml, type: cm} + - {name: calico-node, file: calico-node.yml, type: ds} + - {name: calico, file: calico-node-sa.yml, type: sa} + - {name: calico, file: calico-cr.yml, type: clusterrole} + - {name: calico, file: calico-crb.yml, type: clusterrolebinding} + register: calico_node_manifests + when: + - inventory_hostname in groups['kube-master'] + - rbac_enabled or item.type not in rbac_resources diff --git a/roles/network_plugin/calico/templates/calico-config.yml.j2 b/roles/network_plugin/calico/templates/calico-config.yml.j2 new file mode 100644 index 000000000..a4207f1dc --- /dev/null +++ b/roles/network_plugin/calico/templates/calico-config.yml.j2 @@ -0,0 +1,19 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: {{ system_namespace }} +data: + etcd_endpoints: "{{ etcd_access_endpoint }}" + etcd_ca: "/calico-secrets/ca_cert.crt" + etcd_cert: "/calico-secrets/cert.crt" + etcd_key: "/calico-secrets/key.pem" +{% if calico_network_backend is defined and calico_network_backend == 'none' %} + cluster_type: "kubespray" +{%- else %} + cluster_type: "kubespray,bgp" +{% endif %} + calico_backend: "bird" + {%- if inventory_hostname in groups['k8s-cluster'] and peer_with_router|default(false) %} + as: "{{ local_as }}" + {% endif -%} diff --git a/roles/network_plugin/calico/templates/calico-cr.yml.j2 b/roles/network_plugin/calico/templates/calico-cr.yml.j2 new file mode 100644 index 000000000..47d626659 --- /dev/null +++ b/roles/network_plugin/calico/templates/calico-cr.yml.j2 @@ -0,0 +1,13 @@ +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-node + namespace: {{ system_namespace }} +rules: + - apiGroups: [""] + resources: + - pods + - nodes + verbs: + - get diff --git a/roles/network_plugin/calico/templates/calico-crb.yml.j2 b/roles/network_plugin/calico/templates/calico-crb.yml.j2 new file mode 100644 index 000000000..2e132a0dc --- /dev/null +++ b/roles/network_plugin/calico/templates/calico-crb.yml.j2 @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: +- kind: ServiceAccount + name: calico-node + namespace: {{ system_namespace }} diff --git a/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 b/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 new file mode 100644 index 000000000..5cce29793 --- /dev/null +++ b/roles/network_plugin/calico/templates/calico-node-sa.yml.j2 @@ -0,0 +1,8 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: {{ system_namespace }} + labels: + kubernetes.io/cluster-service: "true" diff --git a/roles/network_plugin/calico/templates/calico-node.service.j2 b/roles/network_plugin/calico/templates/calico-node.service.j2 deleted file mode 100644 index 73bb757ba..000000000 --- a/roles/network_plugin/calico/templates/calico-node.service.j2 +++ /dev/null @@ -1,41 +0,0 @@ -[Unit] -Description=calico-node -After=docker.service -Requires=docker.service - -[Service] -EnvironmentFile=/etc/calico/calico.env -ExecStartPre=-{{ docker_bin_dir }}/docker rm -f calico-node -ExecStart={{ docker_bin_dir }}/docker run --net=host --privileged \ - --name=calico-node \ - -e HOSTNAME=${CALICO_HOSTNAME} \ - -e IP=${CALICO_IP} \ - -e IP6=${CALICO_IP6} \ - -e CLUSTER_TYPE=${CLUSTER_TYPE} \ - -e CALICO_NETWORKING_BACKEND=${CALICO_NETWORKING_BACKEND} \ - -e FELIX_DEFAULTENDPOINTTOHOSTACTION={{ calico_endpoint_to_host_action|default('RETURN') }} \ - -e AS=${CALICO_AS} \ - -e NO_DEFAULT_POOLS=${CALICO_NO_DEFAULT_POOLS} \ - -e CALICO_LIBNETWORK_ENABLED=${CALICO_LIBNETWORK_ENABLED} \ - -e ETCD_ENDPOINTS=${ETCD_ENDPOINTS} \ - -e ETCD_CA_CERT_FILE=${ETCD_CA_CERT_FILE} \ - -e ETCD_CERT_FILE=${ETCD_CERT_FILE} \ - -e ETCD_KEY_FILE=${ETCD_KEY_FILE} \ -{% if calico_node_ignorelooserpf %} - -e FELIX_IGNORELOOSERPF=true \ -{% endif %} - -v /var/log/calico:/var/log/calico \ - -v /run/docker/plugins:/run/docker/plugins \ - -v /lib/modules:/lib/modules \ - -v /var/run/calico:/var/run/calico \ - -v {{ calico_cert_dir }}:{{ calico_cert_dir }}:ro \ - --memory={{ calico_node_memory_limit|regex_replace('Mi', 'M') }} --cpu-shares={{ calico_node_cpu_limit|regex_replace('m', '') }} \ - {{ calico_node_image_repo }}:{{ calico_node_image_tag }} - -Restart=always -RestartSec=10s - -ExecStop=-{{ docker_bin_dir }}/docker stop calico-node - -[Install] -WantedBy=multi-user.target diff --git a/roles/network_plugin/calico/templates/calico-node.service.legacy.j2 b/roles/network_plugin/calico/templates/calico-node.service.legacy.j2 deleted file mode 100644 index f542f64f6..000000000 --- a/roles/network_plugin/calico/templates/calico-node.service.legacy.j2 +++ /dev/null @@ -1,19 +0,0 @@ -[Unit] -Description=Calico per-node agent -Documentation=https://github.com/projectcalico/calico-docker -After=docker.service docker.socket -Wants=docker.socket - -[Service] -User=root -PermissionsStartOnly=true -{% if inventory_hostname in groups['k8s-cluster'] and peer_with_router|default(false)%} -ExecStart={{ bin_dir }}/calicoctl node --ip={{ip | default(ansible_default_ipv4.address) }} --as={{ local_as }} --detach=false --node-image={{ calico_node_image_repo }}:{{ calico_node_image_tag }} -{% else %} -ExecStart={{ bin_dir }}/calicoctl node --ip={{ip | default(ansible_default_ipv4.address) }} --detach=false --node-image={{ calico_node_image_repo }}:{{ calico_node_image_tag }} -{% endif %} -Restart=always -RestartSec=10s - -[Install] -WantedBy=multi-user.target diff --git a/roles/network_plugin/calico/templates/calico-node.yml.j2 b/roles/network_plugin/calico/templates/calico-node.yml.j2 new file mode 100644 index 000000000..9f47d468a --- /dev/null +++ b/roles/network_plugin/calico/templates/calico-node.yml.j2 @@ -0,0 +1,166 @@ +--- +# This manifest installs the calico/node container, as well +# as the Calico CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: + name: calico-node + namespace: {{ system_namespace }} + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + template: + metadata: + labels: + k8s-app: calico-node + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + kubespray.etcd-cert/serial: "{{ etcd_node_cert_serial }}" + spec: + hostNetwork: true +{% if rbac_enabled %} + serviceAccountName: calico-node +{% endif %} + tolerations: + - effect: NoSchedule + operator: Exists + containers: + # Runs calico/node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: {{ calico_node_image_repo }}:{{ calico_node_image_tag }} + env: + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # Choose the backend to use. + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + valueFrom: + configMapKeyRef: + name: calico-config + key: cluster_type + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "{{ calico_endpoint_to_host_action|default('RETURN') }}" +# should be set in etcd before deployment +# # Configure the IP Pool from which Pod IPs will be chosen. +# - name: CALICO_IPV4POOL_CIDR +# value: "192.168.0.0/16" +# - name: CALICO_IPV4POOL_IPIP +# value: "always" + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: "info" + # Disable autocreation of pools + - name: CALICO_NO_DEFAULT_POOLS + value: "true" + # Enable libnetwork + - name: CALICO_LIBNETWORK_ENABLED + value: "true" + # Set MTU for tunnel device used if ipip is enabled +{% if calico_mtu is defined %} + - name: FELIX_IPINIPMTU + value: "{{ calico_mtu }}" +{% endif %} + # Location of the CA certificate for etcd. + - name: ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_ca + # Location of the client key for etcd. + - name: ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_key + # Location of the client certificate for etcd. + - name: ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_cert + - name: IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: FELIX_HEALTHENABLED + value: "true" + securityContext: + privileged: true + resources: + limits: + cpu: {{ calico_node_cpu_limit }} + memory: {{ calico_node_memory_limit }} + requests: + cpu: {{ calico_node_cpu_requests }} + memory: {{ calico_node_memory_requests }} + livenessProbe: + httpGet: + path: /liveness + port: 9099 + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + readinessProbe: + httpGet: + path: /readiness + port: 9099 + periodSeconds: 10 + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /calico-secrets + name: etcd-certs + volumes: + # Used by calico/node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + # Mount in the etcd TLS secrets. + - name: etcd-certs + hostPath: + path: "{{ calico_cert_dir }}" + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + diff --git a/roles/network_plugin/calico/templates/calico.env.j2 b/roles/network_plugin/calico/templates/calico.env.j2 deleted file mode 100644 index e438060af..000000000 --- a/roles/network_plugin/calico/templates/calico.env.j2 +++ /dev/null @@ -1,20 +0,0 @@ -ETCD_ENDPOINTS="{{ etcd_access_endpoint }}" -ETCD_CA_CERT_FILE="{{ calico_cert_dir }}/ca_cert.crt" -ETCD_CERT_FILE="{{ calico_cert_dir }}/cert.crt" -ETCD_KEY_FILE="{{ calico_cert_dir }}/key.pem" -CALICO_IP="{{ip | default(ansible_default_ipv4.address) }}" -CALICO_IP6="" -{% if calico_network_backend is defined and calico_network_backend == 'none' %} -CLUSTER_TYPE="kubespray" -{% else %} -CLUSTER_TYPE="kubespray,bgp" -{% endif %} -{% if calico_network_backend is defined %} -CALICO_NETWORKING_BACKEND="{{calico_network_backend }}" -{% endif %} -{% if inventory_hostname in groups['k8s-cluster'] and peer_with_router|default(false)%} -CALICO_AS="{{ local_as }}" -{% endif %} -CALICO_NO_DEFAULT_POOLS="true" -CALICO_LIBNETWORK_ENABLED="true" -CALICO_HOSTNAME="{{ ansible_hostname }}" diff --git a/roles/network_plugin/calico/templates/cni-calico.conf.j2 b/roles/network_plugin/calico/templates/cni-calico.conf.j2 index 7cd3c902d..2b8d5b17c 100644 --- a/roles/network_plugin/calico/templates/cni-calico.conf.j2 +++ b/roles/network_plugin/calico/templates/cni-calico.conf.j2 @@ -1,8 +1,6 @@ { "name": "calico-k8s-network", -{% if not legacy_calicoctl %} "hostname": "{{ ansible_hostname }}", -{% endif %} "type": "calico", "etcd_endpoints": "{{ etcd_access_endpoint }}", "etcd_cert_file": "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem", From 660282e82f31b36dc0262d32281dcece982e545a Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Mon, 4 Sep 2017 11:30:01 +0300 Subject: [PATCH 27/64] Make daemonsets upgradeable (#1606) Canal will be covered by a separate PR --- roles/kubernetes-apps/ansible/tasks/netchecker.yml | 12 ------------ .../ansible/templates/netchecker-agent-ds.yml.j2 | 5 +++++ .../templates/netchecker-agent-hostnet-ds.yml.j2 | 4 ++++ .../efk/fluentd/templates/fluentd-ds.yml.j2 | 1 - .../network_plugin/weave/templates/weave-net.yml.j2 | 5 +++++ 5 files changed, 14 insertions(+), 13 deletions(-) diff --git a/roles/kubernetes-apps/ansible/tasks/netchecker.yml b/roles/kubernetes-apps/ansible/tasks/netchecker.yml index ca8535c2a..208adedc2 100644 --- a/roles/kubernetes-apps/ansible/tasks/netchecker.yml +++ b/roles/kubernetes-apps/ansible/tasks/netchecker.yml @@ -25,18 +25,6 @@ state: absent when: inventory_hostname == groups['kube-master'][0] -# FIXME: remove if kubernetes/features#124 is implemented -- name: Kubernetes Apps | Purge old Netchecker daemonsets - kube: - name: "{{item.item.name}}" - namespace: "{{netcheck_namespace}}" - kubectl: "{{bin_dir}}/kubectl" - resource: "{{item.item.type}}" - filename: "{{kube_config_dir}}/{{item.item.file}}" - state: absent - with_items: "{{ manifests.results }}" - when: inventory_hostname == groups['kube-master'][0] and item.item.type == "ds" and item.changed - - name: Kubernetes Apps | Start Netchecker Resources kube: name: "{{item.item.name}}" diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 index 8b16e0c30..d73004242 100644 --- a/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/netchecker-agent-ds.yml.j2 @@ -40,3 +40,8 @@ spec: requests: cpu: {{ netchecker_agent_cpu_requests }} memory: {{ netchecker_agent_memory_requests }} + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + diff --git a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 index 6064d8e68..70194c900 100644 --- a/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/netchecker-agent-hostnet-ds.yml.j2 @@ -44,3 +44,7 @@ spec: requests: cpu: {{ netchecker_agent_cpu_requests }} memory: {{ netchecker_agent_memory_requests }} + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate diff --git a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 index 838ebf1e6..2dc26991c 100644 --- a/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 +++ b/roles/kubernetes-apps/efk/fluentd/templates/fluentd-ds.yml.j2 @@ -58,4 +58,3 @@ spec: {% if rbac_enabled %} serviceAccountName: efk {% endif %} - diff --git a/roles/network_plugin/weave/templates/weave-net.yml.j2 b/roles/network_plugin/weave/templates/weave-net.yml.j2 index 691b4cf02..c61f2e7e4 100644 --- a/roles/network_plugin/weave/templates/weave-net.yml.j2 +++ b/roles/network_plugin/weave/templates/weave-net.yml.j2 @@ -154,3 +154,8 @@ items: - name: lib-modules hostPath: path: /lib/modules + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + From fc7905653e32b7e951a7a36f287b827fd537fe0d Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Mon, 4 Sep 2017 11:30:18 +0300 Subject: [PATCH 28/64] Add socat for CoreOS when using host deploy kubelet (#1575) --- roles/download/defaults/main.yml | 7 +++++++ roles/kubernetes/node/meta/main.yml | 6 +++++- roles/kubernetes/node/tasks/install_host.yml | 6 ++++++ 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 25a335915..2c4000866 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -60,6 +60,8 @@ hyperkube_image_repo: "quay.io/coreos/hyperkube" hyperkube_image_tag: "{{ kube_version }}_coreos.0" pod_infra_image_repo: "gcr.io/google_containers/pause-amd64" pod_infra_image_tag: "{{ pod_infra_version }}" +install_socat_image_repo: "xueshanf/install-socat" +install_socat_image_tag: "latest" netcheck_version: "v1.0" netcheck_agent_img_repo: "quay.io/l23network/k8s-netchecker-agent" netcheck_agent_tag: "{{ netcheck_version }}" @@ -194,6 +196,11 @@ downloads: repo: "{{ pod_infra_image_repo }}" tag: "{{ pod_infra_image_tag }}" sha256: "{{ pod_infra_digest_checksum|default(None) }}" + install_socat: + container: true + repo: "{{ install_socat_image_repo }}" + tag: "{{ install_socat_image_tag }}" + sha256: "{{ install_socat_digest_checksum|default(None) }}" nginx: container: true repo: "{{ nginx_image_repo }}" diff --git a/roles/kubernetes/node/meta/main.yml b/roles/kubernetes/node/meta/main.yml index 12a7d73b7..f0656e571 100644 --- a/roles/kubernetes/node/meta/main.yml +++ b/roles/kubernetes/node/meta/main.yml @@ -6,6 +6,10 @@ dependencies: - role: download file: "{{ downloads.pod_infra }}" tags: [download, kubelet] + - role: download + file: "{{ downloads.install_socat }}" + tags: [download, kubelet] + when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] - role: kubernetes/secrets tags: k8s-secrets - role: download @@ -33,4 +37,4 @@ dependencies: tags: [download, dnsmasq] - role: download file: "{{ downloads.kubednsautoscaler }}" - tags: [download, dnsmasq] \ No newline at end of file + tags: [download, dnsmasq] diff --git a/roles/kubernetes/node/tasks/install_host.yml b/roles/kubernetes/node/tasks/install_host.yml index e80b20498..9f1523ffe 100644 --- a/roles/kubernetes/node/tasks/install_host.yml +++ b/roles/kubernetes/node/tasks/install_host.yml @@ -8,3 +8,9 @@ changed_when: false tags: [hyperkube, upgrade] notify: restart kubelet + +- name: install | Copy socat wrapper for Container Linux + command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/opt/bin {{ install_socat_image_repo }}:{{ install_socat_image_tag }}" + args: + creates: "{{ bin_dir }}/socat" + when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] From d279d145d57fc5f8afe311cada45488e4f18e663 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Tue, 5 Sep 2017 08:23:12 +0300 Subject: [PATCH 29/64] Fix non-rbac deployment of resources as a list (#1613) * Use kubectl apply instead of create/replace Disable checks for existing resources to speed up execution. * Fix non-rbac deployment of resources as a list * Fix autoscaler tolerations field * set all kube resources to state=latest * Update netchecker and weave --- library/kube.py | 1 - roles/dnsmasq/tasks/main.yml | 2 +- roles/kubernetes-apps/ansible/tasks/main.yml | 8 +++++--- roles/kubernetes-apps/ansible/tasks/netchecker.yml | 5 ++--- .../ansible/templates/kubedns-autoscaler.yml.j2 | 10 +++------- roles/kubernetes-apps/efk/kibana/tasks/main.yml | 4 ++-- roles/kubernetes-apps/helm/tasks/main.yml | 3 +-- .../network_plugin/calico/tasks/main.yml | 3 ++- .../network_plugin/canal/tasks/main.yml | 2 +- .../network_plugin/flannel/tasks/main.yml | 4 ++-- .../network_plugin/weave/tasks/main.yml | 3 +-- .../policy_controller/calico/tasks/main.yml | 4 ++-- 12 files changed, 22 insertions(+), 27 deletions(-) diff --git a/library/kube.py b/library/kube.py index fdc783fff..77f7e6e35 100644 --- a/library/kube.py +++ b/library/kube.py @@ -270,7 +270,6 @@ def main(): manager = KubeManager(module) state = module.params.get('state') - if state == 'present': result = manager.create() diff --git a/roles/dnsmasq/tasks/main.yml b/roles/dnsmasq/tasks/main.yml index a06afbdce..607e6df51 100644 --- a/roles/dnsmasq/tasks/main.yml +++ b/roles/dnsmasq/tasks/main.yml @@ -95,7 +95,7 @@ kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" - state: "{{item.changed | ternary('latest','present') }}" + state: "latest" with_items: "{{ manifests.results }}" delegate_to: "{{ groups['kube-master'][0] }}" run_once: true diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index 4f9b6ef1d..3c986970c 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -51,10 +51,12 @@ kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" - state: "{{item.changed | ternary('latest','present') }}" + state: "latest" with_items: "{{ manifests.results }}" - failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg - when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] + when: + - dns_mode != 'none' + - inventory_hostname == groups['kube-master'][0] + - not item|skipped tags: dnsmasq - name: Kubernetes Apps | Netchecker diff --git a/roles/kubernetes-apps/ansible/tasks/netchecker.yml b/roles/kubernetes-apps/ansible/tasks/netchecker.yml index 208adedc2..a74a4dc87 100644 --- a/roles/kubernetes-apps/ansible/tasks/netchecker.yml +++ b/roles/kubernetes-apps/ansible/tasks/netchecker.yml @@ -32,7 +32,6 @@ kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" - state: "{{item.changed | ternary('latest','present') }}" + state: "latest" with_items: "{{ manifests.results }}" - failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube-master'][0] and not item|skipped diff --git a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 index fb87d5a50..df92ee615 100644 --- a/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 +++ b/roles/kubernetes-apps/ansible/templates/kubedns-autoscaler.yml.j2 @@ -27,17 +27,13 @@ spec: metadata: labels: k8s-app: kubedns-autoscaler - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' spec: + tolerations: + - effect: NoSchedule + operator: Exists containers: - name: autoscaler image: "{{ kubednsautoscaler_image_repo }}:{{ kubednsautoscaler_image_tag }}" - tolerations: - - effect: NoSchedule - operator: Exists - - effect: CriticalAddonsOnly - operator: exists resources: requests: cpu: "20m" diff --git a/roles/kubernetes-apps/efk/kibana/tasks/main.yml b/roles/kubernetes-apps/efk/kibana/tasks/main.yml index 4c14d1945..ea8568286 100644 --- a/roles/kubernetes-apps/efk/kibana/tasks/main.yml +++ b/roles/kubernetes-apps/efk/kibana/tasks/main.yml @@ -12,7 +12,7 @@ name: "kibana-logging" namespace: "{{system_namespace}}" resource: "deployment" - state: "{{ item | ternary('latest','present') }}" + state: "latest" with_items: "{{ kibana_deployment_manifest.changed }}" run_once: true @@ -29,6 +29,6 @@ name: "kibana-logging" namespace: "{{system_namespace}}" resource: "svc" - state: "{{ item | ternary('latest','present') }}" + state: "latest" with_items: "{{ kibana_service_manifest.changed }}" run_once: true diff --git a/roles/kubernetes-apps/helm/tasks/main.yml b/roles/kubernetes-apps/helm/tasks/main.yml index 2d26c5a0f..d01211e2f 100644 --- a/roles/kubernetes-apps/helm/tasks/main.yml +++ b/roles/kubernetes-apps/helm/tasks/main.yml @@ -27,9 +27,8 @@ kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" - state: "{{item.changed | ternary('latest','present') }}" + state: "latest" with_items: "{{ manifests.results }}" - failed_when: manifests|failed and "Error from server (AlreadyExists)" not in manifests.msg when: dns_mode != 'none' and inventory_hostname == groups['kube-master'][0] and rbac_enabled - name: Helm | Install/upgrade helm diff --git a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml index 5061c5c98..f17e45c7a 100644 --- a/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/calico/tasks/main.yml @@ -6,5 +6,6 @@ kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" - state: "{{item.changed | ternary('latest','present') }}" + state: "latest" with_items: "{{ calico_node_manifests.results }}" + when: inventory_hostname == groups['kube-master'][0] and not item|skipped diff --git a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml index 6f3bb4d85..24607249f 100644 --- a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml @@ -6,6 +6,6 @@ kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" - state: "{{item.changed | ternary('latest','present') }}" + state: "latest" with_items: "{{ canal_manifests.results }}" when: inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml index cfe931375..607c7d617 100644 --- a/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/flannel/tasks/main.yml @@ -11,7 +11,7 @@ filename: "{{ kube_config_dir }}/cni-flannel.yml" resource: "ds" namespace: "{{system_namespace}}" - state: "{{ item | ternary('latest','present') }}" + state: "latest" with_items: "{{ flannel_manifest.changed }}" when: inventory_hostname == groups['kube-master'][0] @@ -19,4 +19,4 @@ wait_for: path: /run/flannel/subnet.env delay: 5 - timeout: 600 \ No newline at end of file + timeout: 600 diff --git a/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml b/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml index c25702b44..3b01d0e66 100644 --- a/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/weave/tasks/main.yml @@ -17,8 +17,7 @@ filename: "{{ kube_config_dir }}/weave-net.yml" resource: "ds" namespace: "{{system_namespace}}" - state: "{{ item | ternary('latest','present') }}" - with_items: "{{ weave_manifest.changed }}" + state: "latest" when: inventory_hostname == groups['kube-master'][0] - name: "Weave | wait for weave to become available" diff --git a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml index 79bb535b7..a6b1e18c1 100644 --- a/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml +++ b/roles/kubernetes-apps/policy_controller/calico/tasks/main.yml @@ -44,6 +44,6 @@ kubectl: "{{bin_dir}}/kubectl" resource: "{{item.item.type}}" filename: "{{kube_config_dir}}/{{item.item.file}}" - state: "{{item.changed | ternary('latest','present') }}" + state: "latest" with_items: "{{ calico_policy_manifests.results }}" - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube-master'][0] and not item|skipped From c77d11f1c73f6a4741116e92b6a8a2cfda60e171 Mon Sep 17 00:00:00 2001 From: ArthurMa <4406arthur@gmail.com> Date: Tue, 5 Sep 2017 13:35:14 +0800 Subject: [PATCH 30/64] Bugfix (#1616) lost executable path --- roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml b/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml index de514b563..8abbe2317 100644 --- a/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml +++ b/roles/kubernetes-apps/efk/elasticsearch/tasks/main.yml @@ -10,7 +10,7 @@ when: rbac_enabled - name: "ElasticSearch | Create Serviceaccount and Clusterrolebinding (RBAC)" - command: "kubectl apply -f {{ kube_config_dir }}/{{ item }} -n {{ system_namespace }}" + command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }} -n {{ system_namespace }}" with_items: - "efk-sa.yml" - "efk-clusterrolebinding.yml" From bf0af1cd3d1c7648b4b731b9dc46aa4f053641fd Mon Sep 17 00:00:00 2001 From: mkrasilnikov Date: Fri, 1 Sep 2017 22:51:37 +0300 Subject: [PATCH 31/64] Vault role updates: * using separated vault roles for generate certs with different `O` (Organization) subject field; * configure vault roles for issuing certificates with different `CN` (Common name) subject field; * set `CN` and `O` to `kubernetes` and `etcd` certificates; * vault/defaults vars definition was simplified; * vault dirs variables defined in kubernetes-defaults foles for using shared tasks in etcd and kubernetes/secrets roles; * upgrade vault to 0.8.1; * generate random vault user password for each role by default; * fix `serial` file name for vault certs; * move vault auth request to issue_cert tasks; * enable `RBAC` in vault CI; --- .gitlab-ci.yml | 1 + roles/etcd/tasks/gen_certs_vault.yml | 41 +--- .../secrets/tasks/gen_certs_vault.yml | 57 ++---- .../secrets/tasks/sync_kube_master_certs.yml | 6 +- roles/kubespray-defaults/defaults/main.yaml | 7 + roles/vault/defaults/main.yml | 187 ++++++++++++------ .../tasks/bootstrap/create_etcd_role.yml | 17 -- roles/vault/tasks/bootstrap/create_mounts.yml | 12 ++ roles/vault/tasks/bootstrap/create_roles.yml | 10 + .../vault/tasks/bootstrap/gen_vault_certs.yml | 20 +- roles/vault/tasks/bootstrap/main.yml | 80 +++----- roles/vault/tasks/cluster/create_mounts.yml | 13 ++ roles/vault/tasks/cluster/create_roles.yml | 14 +- roles/vault/tasks/cluster/main.yml | 44 ++--- roles/vault/tasks/shared/create_role.yml | 5 +- roles/vault/tasks/shared/gen_ca.yml | 4 +- roles/vault/tasks/shared/gen_userpass.yml | 1 - roles/vault/tasks/shared/issue_cert.yml | 45 ++++- 18 files changed, 283 insertions(+), 281 deletions(-) delete mode 100644 roles/vault/tasks/bootstrap/create_etcd_role.yml create mode 100644 roles/vault/tasks/bootstrap/create_mounts.yml create mode 100644 roles/vault/tasks/bootstrap/create_roles.yml create mode 100644 roles/vault/tasks/cluster/create_mounts.yml diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 864cadde5..465b7ac57 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -366,6 +366,7 @@ before_script: .ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables # stage: deploy-gce-part1 + AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }" KUBE_NETWORK_PLUGIN: canal CERT_MGMT: vault CLOUD_IMAGE: ubuntu-1604-xenial diff --git a/roles/etcd/tasks/gen_certs_vault.yml b/roles/etcd/tasks/gen_certs_vault.yml index 4f27eff86..0048a7003 100644 --- a/roles/etcd/tasks/gen_certs_vault.yml +++ b/roles/etcd/tasks/gen_certs_vault.yml @@ -7,51 +7,14 @@ when: inventory_hostname in etcd_node_cert_hosts tags: etcd-secrets -- name: gen_certs_vault | Read in the local credentials - command: cat /etc/vault/roles/etcd/userpass - register: etcd_vault_creds_cat - delegate_to: "{{ groups['vault'][0] }}" - -- name: gen_certs_vault | Set facts for read Vault Creds - set_fact: - etcd_vault_creds: "{{ etcd_vault_creds_cat.stdout|from_json }}" - delegate_to: "{{ groups['vault'][0] }}" - -- name: gen_certs_vault | Log into Vault and obtain an token - uri: - url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}/v1/auth/userpass/login/{{ etcd_vault_creds.username }}" - headers: - Accept: application/json - Content-Type: application/json - method: POST - body_format: json - body: - password: "{{ etcd_vault_creds.password }}" - register: etcd_vault_login_result - delegate_to: "{{ groups['vault'][0] }}" - -- name: gen_certs_vault | Set fact for vault_client_token - set_fact: - vault_client_token: "{{ etcd_vault_login_result.get('json', {}).get('auth', {}).get('client_token') }}" - run_once: true - -- name: gen_certs_vault | Set fact for Vault API token - set_fact: - etcd_vault_headers: - Accept: application/json - Content-Type: application/json - X-Vault-Token: "{{ vault_client_token }}" - run_once: true - when: vault_client_token != "" - # Issue master certs to Etcd nodes - include: ../../vault/tasks/shared/issue_cert.yml vars: + issue_cert_common_name: "etcd:master:{{ item.rsplit('/', 1)[1].rsplit('.', 1)[0] }}" issue_cert_alt_names: "{{ groups.etcd + ['localhost'] }}" issue_cert_copy_ca: "{{ item == etcd_master_certs_needed|first }}" issue_cert_file_group: "{{ etcd_cert_group }}" issue_cert_file_owner: kube - issue_cert_headers: "{{ etcd_vault_headers }}" issue_cert_hosts: "{{ groups.etcd }}" issue_cert_ip_sans: >- [ @@ -74,11 +37,11 @@ # Issue node certs to everyone else - include: ../../vault/tasks/shared/issue_cert.yml vars: + issue_cert_common_name: "etcd:node:{{ item.rsplit('/', 1)[1].rsplit('.', 1)[0] }}" issue_cert_alt_names: "{{ etcd_node_cert_hosts }}" issue_cert_copy_ca: "{{ item == etcd_node_certs_needed|first }}" issue_cert_file_group: "{{ etcd_cert_group }}" issue_cert_file_owner: kube - issue_cert_headers: "{{ etcd_vault_headers }}" issue_cert_hosts: "{{ etcd_node_cert_hosts }}" issue_cert_ip_sans: >- [ diff --git a/roles/kubernetes/secrets/tasks/gen_certs_vault.yml b/roles/kubernetes/secrets/tasks/gen_certs_vault.yml index 31abdbf5b..82535fd20 100644 --- a/roles/kubernetes/secrets/tasks/gen_certs_vault.yml +++ b/roles/kubernetes/secrets/tasks/gen_certs_vault.yml @@ -1,56 +1,23 @@ --- - include: sync_kube_master_certs.yml when: inventory_hostname in groups['kube-master'] - tags: k8s-secrets - include: sync_kube_node_certs.yml when: inventory_hostname in groups['k8s-cluster'] - tags: k8s-secrets -- name: gen_certs_vault | Read in the local credentials - command: cat /etc/vault/roles/kube/userpass - register: kube_vault_creds_cat - delegate_to: "{{ groups['k8s-cluster'][0] }}" - -- name: gen_certs_vault | Set facts for read Vault Creds - set_fact: - kube_vault_creds: "{{ kube_vault_creds_cat.stdout|from_json }}" - delegate_to: "{{ groups['k8s-cluster'][0] }}" - -- name: gen_certs_vault | Log into Vault and obtain an token - uri: - url: "{{ hostvars[groups['vault'][0]]['vault_leader_url'] }}/v1/auth/userpass/login/{{ kube_vault_creds.username }}" - headers: - Accept: application/json - Content-Type: application/json - method: POST - body_format: json - body: - password: "{{ kube_vault_creds.password }}" - register: kube_vault_login_result - delegate_to: "{{ groups['k8s-cluster'][0] }}" - -- name: gen_certs_vault | Set fact for Vault API token - set_fact: - kube_vault_headers: - Accept: application/json - Content-Type: application/json - X-Vault-Token: "{{ kube_vault_login_result.get('json',{}).get('auth', {}).get('client_token') }}" - run_once: true - -# Issue certs to kube-master nodes +# Issue admin certs to kube-master hosts - include: ../../../vault/tasks/shared/issue_cert.yml vars: - issue_cert_copy_ca: "{{ item == kube_master_certs_needed|first }}" + issue_cert_common_name: "admin:{{ item.rsplit('/', 1)[1].rsplit('.', 1)[0] }}" + issue_cert_copy_ca: "{{ item == kube_admin_certs_needed|first }}" issue_cert_file_group: "{{ kube_cert_group }}" issue_cert_file_owner: kube - issue_cert_headers: "{{ kube_vault_headers }}" issue_cert_hosts: "{{ groups['kube-master'] }}" issue_cert_path: "{{ item }}" - issue_cert_role: kube + issue_cert_role: kube-master issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" issue_cert_mount_path: "{{ kube_vault_mount_path }}" - with_items: "{{ kube_master_certs_needed|d([]) }}" + with_items: "{{ kube_admin_certs_needed|d([]) }}" when: inventory_hostname in groups['kube-master'] - name: gen_certs_vault | Set fact about certificate alt names @@ -69,12 +36,13 @@ when: loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined run_once: true +# Issue master components certs to kube-master hosts - include: ../../../vault/tasks/shared/issue_cert.yml vars: + issue_cert_common_name: "kubernetes" issue_cert_alt_names: "{{ kube_cert_alt_names }}" issue_cert_file_group: "{{ kube_cert_group }}" issue_cert_file_owner: kube - issue_cert_headers: "{{ kube_vault_headers }}" issue_cert_hosts: "{{ groups['kube-master'] }}" issue_cert_ip_sans: >- [ @@ -87,7 +55,7 @@ "127.0.0.1","::1","{{ kube_apiserver_ip }}" ] issue_cert_path: "{{ item }}" - issue_cert_role: kube + issue_cert_role: kube-master issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" issue_cert_mount_path: "{{ kube_vault_mount_path }}" with_items: "{{ kube_master_components_certs_needed|d([]) }}" @@ -97,27 +65,28 @@ # Issue node certs to k8s-cluster nodes - include: ../../../vault/tasks/shared/issue_cert.yml vars: + issue_cert_common_name: "system:node:{{ item.rsplit('/', 1)[1].rsplit('.', 1)[0] }}" issue_cert_copy_ca: "{{ item == kube_node_certs_needed|first }}" issue_cert_file_group: "{{ kube_cert_group }}" issue_cert_file_owner: kube - issue_cert_headers: "{{ kube_vault_headers }}" issue_cert_hosts: "{{ groups['k8s-cluster'] }}" issue_cert_path: "{{ item }}" - issue_cert_role: kube + issue_cert_role: kube-node issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" issue_cert_mount_path: "{{ kube_vault_mount_path }}" with_items: "{{ kube_node_certs_needed|d([]) }}" when: inventory_hostname in groups['k8s-cluster'] +# Issue proxy certs to k8s-cluster nodes - include: ../../../vault/tasks/shared/issue_cert.yml vars: + issue_cert_common_name: "system:kube-proxy:{{ item.rsplit('/', 1)[1].rsplit('.', 1)[0] }}" issue_cert_copy_ca: "{{ item == kube_proxy_certs_needed|first }}" issue_cert_file_group: "{{ kube_cert_group }}" issue_cert_file_owner: kube - issue_cert_headers: "{{ kube_vault_headers }}" issue_cert_hosts: "{{ groups['k8s-cluster'] }}" issue_cert_path: "{{ item }}" - issue_cert_role: kube + issue_cert_role: kube-proxy issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" issue_cert_mount_path: "{{ kube_vault_mount_path }}" with_items: "{{ kube_proxy_certs_needed|d([]) }}" diff --git a/roles/kubernetes/secrets/tasks/sync_kube_master_certs.yml b/roles/kubernetes/secrets/tasks/sync_kube_master_certs.yml index 6fa861a36..277038612 100644 --- a/roles/kubernetes/secrets/tasks/sync_kube_master_certs.yml +++ b/roles/kubernetes/secrets/tasks/sync_kube_master_certs.yml @@ -2,7 +2,7 @@ - name: sync_kube_master_certs | Create list of needed kube admin certs set_fact: - kube_master_cert_list: "{{ kube_master_cert_list|d([]) + ['admin-' + item + '.pem'] }}" + kube_admin_cert_list: "{{ kube_admin_cert_list|d([]) + ['admin-' + item + '.pem'] }}" with_items: "{{ groups['kube-master'] }}" - include: ../../../vault/tasks/shared/sync_file.yml @@ -13,11 +13,11 @@ sync_file_hosts: "{{ groups['kube-master'] }}" sync_file_is_cert: true sync_file_owner: kube - with_items: "{{ kube_master_cert_list|d([]) }}" + with_items: "{{ kube_admin_cert_list|d([]) }}" - name: sync_kube_master_certs | Set facts for kube admin sync_file results set_fact: - kube_master_certs_needed: "{{ kube_master_certs_needed|default([]) + [item.path] }}" + kube_admin_certs_needed: "{{ kube_admin_certs_needed|default([]) + [item.path] }}" with_items: "{{ sync_file_results|d([]) }}" when: item.no_srcs|bool diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index 5405e2577..c86f322fc 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -135,3 +135,10 @@ rbac_enabled: "{{ 'RBAC' in authorization_modes }}" ## List of key=value pairs that describe feature gates for ## the k8s cluster. kube_feature_gates: [] + +# Vault data dirs. +vault_base_dir: /etc/vault +vault_cert_dir: "{{ vault_base_dir }}/ssl" +vault_config_dir: "{{ vault_base_dir }}/config" +vault_roles_dir: "{{ vault_base_dir }}/roles" +vault_secrets_dir: "{{ vault_base_dir }}/secrets" diff --git a/roles/vault/defaults/main.yml b/roles/vault/defaults/main.yml index eb2ffd122..d4e2ed66c 100644 --- a/roles/vault/defaults/main.yml +++ b/roles/vault/defaults/main.yml @@ -1,4 +1,6 @@ --- +vault_bootstrap: false +vault_deployment_type: docker vault_adduser_vars: comment: "Hashicorp Vault User" @@ -6,41 +8,18 @@ vault_adduser_vars: name: vault shell: /sbin/nologin system: yes + +# This variables redefined in kubespray-defaults for using shared tasks +# in etcd and kubernetes/secrets roles vault_base_dir: /etc/vault -# https://releases.hashicorp.com/vault/0.6.4/vault_0.6.4_SHA256SUMS -vault_version: 0.6.4 -vault_binary_checksum: 04d87dd553aed59f3fe316222217a8d8777f40115a115dac4d88fac1611c51a6 -vault_bootstrap: false -vault_ca_options: - common_name: vault - format: pem - ttl: 87600h vault_cert_dir: "{{ vault_base_dir }}/ssl" -vault_client_headers: - Accept: "application/json" - Content-Type: "application/json" -vault_config: - backend: - etcd: - address: "{{ vault_etcd_url }}" - ha_enabled: "true" - redirect_addr: "https://{{ ansible_default_ipv4.address }}:{{ vault_port }}" - tls_ca_file: "{{ vault_etcd_cert_dir }}/ca.pem" - cluster_name: "kubernetes-vault" - default_lease_ttl: "{{ vault_default_lease_ttl }}" - listener: - tcp: - address: "0.0.0.0:{{ vault_port }}" - tls_cert_file: "{{ vault_cert_dir }}/api.pem" - tls_key_file: "{{ vault_cert_dir }}/api-key.pem" - max_lease_ttl: "{{ vault_max_lease_ttl }}" vault_config_dir: "{{ vault_base_dir }}/config" -vault_container_name: kube-hashicorp-vault -# This variable is meant to match the GID of vault inside Hashicorp's official Vault Container -vault_default_lease_ttl: 720h -vault_default_role_permissions: - allow_any_name: true -vault_deployment_type: docker +vault_roles_dir: "{{ vault_base_dir }}/roles" +vault_secrets_dir: "{{ vault_base_dir }}/secrets" +vault_log_dir: "/var/log/vault" + +vault_version: 0.8.1 +vault_binary_checksum: 3c4d70ba71619a43229e65c67830e30e050eab7a81ac6b28325ff707e5914188 vault_download_url: "https://releases.hashicorp.com/vault/{{ vault_version }}/vault_{{ vault_version }}_linux_amd64.zip" vault_download_vars: container: "{{ vault_deployment_type != 'host' }}" @@ -55,17 +34,19 @@ vault_download_vars: unarchive: true url: "{{ vault_download_url }}" version: "{{ vault_version }}" -vault_etcd_url: "https://{{ hostvars[groups.etcd[0]]['ip']|d(hostvars[groups.etcd[0]]['ansible_default_ipv4']['address']) }}:2379" + +vault_container_name: kube-hashicorp-vault +vault_temp_container_name: vault-temp vault_image_repo: "vault" vault_image_tag: "{{ vault_version }}" -vault_log_dir: "/var/log/vault" -vault_max_lease_ttl: 87600h -vault_needs_gen: false + +vault_address: 0.0.0.0 vault_port: 8200 -vault_roles_dir: "{{ vault_base_dir }}/roles" -vault_secret_shares: 1 -vault_secret_threshold: 1 -vault_secrets_dir: "{{ vault_base_dir }}/secrets" +vault_etcd_url: "https://{{ hostvars[groups.etcd[0]]['ip']|d(hostvars[groups.etcd[0]]['ansible_default_ipv4']['address']) }}:2379" + +vault_default_lease_ttl: 720h +vault_max_lease_ttl: 87600h + vault_temp_config: backend: file: @@ -73,29 +54,109 @@ vault_temp_config: default_lease_ttl: "{{ vault_default_lease_ttl }}" listener: tcp: - address: "0.0.0.0:{{ vault_port }}" + address: "{{ vault_address }}:{{ vault_port }}" tls_disable: "true" max_lease_ttl: "{{ vault_max_lease_ttl }}" -vault_temp_container_name: vault-temp -# etcd pki mount options + +vault_config: + backend: + etcd: + address: "{{ vault_etcd_url }}" + ha_enabled: "true" + redirect_addr: "https://{{ ansible_default_ipv4.address }}:{{ vault_port }}" + tls_ca_file: "{{ vault_etcd_cert_dir }}/ca.pem" + cluster_name: "kubernetes-vault" + default_lease_ttl: "{{ vault_default_lease_ttl }}" + max_lease_ttl: "{{ vault_max_lease_ttl }}" + listener: + tcp: + address: "{{ vault_address }}:{{ vault_port }}" + tls_cert_file: "{{ vault_cert_dir }}/api.pem" + tls_key_file: "{{ vault_cert_dir }}/api-key.pem" + +vault_secret_shares: 1 +vault_secret_threshold: 1 + +vault_ca_options: + vault: + common_name: vault + format: pem + ttl: "{{ vault_max_lease_ttl }}" + exclude_cn_from_sans: true + etcd: + common_name: etcd + format: pem + ttl: "{{ vault_max_lease_ttl }}" + exclude_cn_from_sans: true + kube: + common_name: kube + format: pem + ttl: "{{ vault_max_lease_ttl }}" + exclude_cn_from_sans: true + +vault_client_headers: + Accept: "application/json" + Content-Type: "application/json" + vault_etcd_cert_dir: /etc/ssl/etcd/ssl -vault_etcd_mount_path: etcd -vault_etcd_default_lease_ttl: 720h -vault_etcd_max_lease_ttl: 87600h -vault_etcd_role: - name: etcd - group: etcd - policy_rules: default - role_options: default - mount_path: "{{ vault_etcd_mount_path }}" -# kubernetes pki mount options -vault_kube_cert_dir: "{{ kube_cert_dir }}" -vault_kube_mount_path: kube -vault_kube_default_lease_ttl: 720h -vault_kube_max_lease_ttl: 87600h -vault_kube_role: - name: kube - group: k8s-cluster - policy_rules: default - role_options: default - mount_path: "{{ vault_kube_mount_path }}" +vault_kube_cert_dir: /etc/kubernetes/ssl + +vault_pki_mounts: + vault: + name: vault + default_lease_ttl: "{{ vault_default_lease_ttl }}" + max_lease_ttl: "{{ vault_max_lease_ttl }}" + description: "Vault Root CA" + cert_dir: "{{ vault_cert_dir }}" + roles: + - name: vault + group: vault + password: "{{ lookup('pipe','date +%Y%m%d%H%M%S' + cluster_name + 'vault') | to_uuid }}" + policy_rules: default + role_options: default + etcd: + name: etcd + default_lease_ttl: "{{ vault_default_lease_ttl }}" + max_lease_ttl: "{{ vault_max_lease_ttl }}" + description: "Etcd Root CA" + cert_dir: "{{ vault_etcd_cert_dir }}" + roles: + - name: etcd + group: etcd + password: "{{ lookup('pipe','date +%Y%m%d%H%M%S' + cluster_name + 'etcd') | to_uuid }}" + policy_rules: default + role_options: + allow_any_name: true + enforce_hostnames: false + organization: "kube:etcd" + kube: + name: kube + default_lease_ttl: "{{ vault_default_lease_ttl }}" + max_lease_ttl: "{{ vault_max_lease_ttl }}" + description: "Kubernetes Root CA" + cert_dir: "{{ vault_kube_cert_dir }}" + roles: + - name: kube-master + group: kube-master + password: "{{ lookup('pipe','date +%Y%m%d%H%M%S' + cluster_name + 'kube-master') | to_uuid }}" + policy_rules: default + role_options: + allow_any_name: true + enforce_hostnames: false + organization: "system:masters" + - name: kube-node + group: k8s-cluster + password: "{{ lookup('pipe','date +%Y%m%d%H%M%S' + cluster_name + 'kube-node') | to_uuid }}" + policy_rules: default + role_options: + allow_any_name: true + enforce_hostnames: false + organization: "system:nodes" + - name: kube-proxy + group: k8s-cluster + password: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S' + cluster_name + 'kube-proxy') | to_uuid }}" + policy_rules: default + role_options: + allow_any_name: true + enforce_hostnames: false + organization: "system:node-proxier" diff --git a/roles/vault/tasks/bootstrap/create_etcd_role.yml b/roles/vault/tasks/bootstrap/create_etcd_role.yml deleted file mode 100644 index 74cd5fc2f..000000000 --- a/roles/vault/tasks/bootstrap/create_etcd_role.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -- include: ../shared/auth_backend.yml - vars: - auth_backend_description: A Username/Password Auth Backend primarily used for services needing to issue certificates - auth_backend_path: userpass - auth_backend_type: userpass - delegate_to: "{{ groups.vault|first }}" - run_once: true - -- include: ../shared/create_role.yml - vars: - create_role_name: "{{ vault_etcd_role.name }}" - create_role_group: "{{ vault_etcd_role.group }}" - create_role_policy_rules: "{{ vault_etcd_role.policy_rules }}" - create_role_options: "{{ vault_etcd_role.role_options }}" - create_role_mount_path: "{{ vault_etcd_role.mount_path }}" - when: inventory_hostname in groups.etcd diff --git a/roles/vault/tasks/bootstrap/create_mounts.yml b/roles/vault/tasks/bootstrap/create_mounts.yml new file mode 100644 index 000000000..0010c35c5 --- /dev/null +++ b/roles/vault/tasks/bootstrap/create_mounts.yml @@ -0,0 +1,12 @@ +--- +- include: ../shared/create_mount.yml + vars: + create_mount_path: "{{ item.name }}" + create_mount_default_lease_ttl: "{{ item.default_lease_ttl }}" + create_mount_max_lease_ttl: "{{ item.max_lease_ttl }}" + create_mount_description: "{{ item.description }}" + create_mount_cert_dir: "{{ item.cert_dir }}" + create_mount_config_ca_needed: "{{ item.config_ca }}" + with_items: + - "{{ vault_pki_mounts.vault|combine({'config_ca': not vault_ca_cert_needed}) }}" + - "{{ vault_pki_mounts.etcd|combine({'config_ca': not vault_etcd_ca_cert_needed}) }}" diff --git a/roles/vault/tasks/bootstrap/create_roles.yml b/roles/vault/tasks/bootstrap/create_roles.yml new file mode 100644 index 000000000..11411d236 --- /dev/null +++ b/roles/vault/tasks/bootstrap/create_roles.yml @@ -0,0 +1,10 @@ +--- +- include: ../shared/create_role.yml + vars: + create_role_name: "{{ item.name }}" + create_role_group: "{{ item.group }}" + create_role_policy_rules: "{{ item.policy_rules }}" + create_role_password: "{{ item.password }}" + create_role_options: "{{ item.role_options }}" + create_role_mount_path: "{{ mount.name }}" + with_items: "{{ mount.roles }}" diff --git a/roles/vault/tasks/bootstrap/gen_vault_certs.yml b/roles/vault/tasks/bootstrap/gen_vault_certs.yml index 651c2ac49..ce4538571 100644 --- a/roles/vault/tasks/bootstrap/gen_vault_certs.yml +++ b/roles/vault/tasks/bootstrap/gen_vault_certs.yml @@ -1,29 +1,21 @@ --- - -- name: boostrap/gen_vault_certs | Add the vault role - uri: - url: "{{ vault_leader_url }}/v1/{{ vault_ca_options.common_name }}/roles/vault" - headers: "{{ vault_headers }}" - method: POST - body_format: json - body: "{{ vault_default_role_permissions }}" - status_code: 204 - when: inventory_hostname == groups.vault|first and vault_api_cert_needed - - include: ../shared/issue_cert.yml vars: + issue_cert_common_name: "{{ vault_pki_mounts.vault.roles[0].name }}" issue_cert_alt_names: "{{ groups.vault + ['localhost'] }}" issue_cert_hosts: "{{ groups.vault }}" issue_cert_ip_sans: >- [ {%- for host in groups.vault -%} "{{ hostvars[host]['ansible_default_ipv4']['address'] }}", + {%- if hostvars[host]['ip'] is defined -%} + "{{ hostvars[host]['ip'] }}", + {%- endif -%} {%- endfor -%} "127.0.0.1","::1" ] - issue_cert_mount_path: "{{ vault_ca_options.common_name }}" + issue_cert_mount_path: "{{ vault_pki_mounts.vault.name }}" issue_cert_path: "{{ vault_cert_dir }}/api.pem" - issue_cert_headers: "{{ hostvars[groups.vault|first]['vault_headers'] }}" - issue_cert_role: vault + issue_cert_role: "{{ vault_pki_mounts.vault.roles[0].name }}" issue_cert_url: "{{ vault_leader_url }}" when: vault_api_cert_needed diff --git a/roles/vault/tasks/bootstrap/main.yml b/roles/vault/tasks/bootstrap/main.yml index 768d9e03b..88e5c2050 100644 --- a/roles/vault/tasks/bootstrap/main.yml +++ b/roles/vault/tasks/bootstrap/main.yml @@ -1,5 +1,4 @@ --- - - include: ../shared/check_vault.yml when: inventory_hostname in groups.vault @@ -9,72 +8,57 @@ - include: ../shared/find_leader.yml when: inventory_hostname in groups.vault and vault_cluster_is_initialized|d() -## Sync Certs - - include: sync_vault_certs.yml when: inventory_hostname in groups.vault - include: sync_etcd_certs.yml when: inventory_hostname in groups.etcd -## Generate Certs - -# Start a temporary instance of Vault - include: start_vault_temp.yml - when: >- - inventory_hostname == groups.vault|first and - not vault_cluster_is_initialized + when: inventory_hostname == groups.vault|first and not vault_cluster_is_initialized -# Set vault_leader_url for all nodes based on above -- name: vault | bootstrap +- name: vault | Set fact about vault leader url set_fact: vault_leader_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}" when: not vault_cluster_is_initialized -# Ensure vault PKI mounts exists -- include: ../shared/create_mount.yml - vars: - create_mount_path: "{{ vault_ca_options.common_name }}" - create_mount_default_lease_ttl: "{{ vault_default_lease_ttl }}" - create_mount_max_lease_ttl: "{{ vault_max_lease_ttl }}" - create_mount_description: "Vault Root CA" - create_mount_cert_dir: "{{ vault_cert_dir }}" - create_mount_config_ca_needed: "{{ not vault_ca_cert_needed }}" +- include: create_mounts.yml when: inventory_hostname == groups.vault|first -# Generate root CA certs for Vault if none exist +- include: ../shared/auth_backend.yml + vars: + auth_backend_description: A Username/Password Auth Backend primarily used for services needing to issue certificates + auth_backend_path: userpass + auth_backend_type: userpass + when: inventory_hostname == groups.vault|first + +- include: create_roles.yml + with_items: + - "{{ vault_pki_mounts.vault }}" + - "{{ vault_pki_mounts.etcd }}" + loop_control: + loop_var: mount + - include: ../shared/gen_ca.yml vars: - gen_ca_cert_dir: "{{ vault_cert_dir }}" - gen_ca_mount_path: "{{ vault_ca_options.common_name }}" + gen_ca_cert_dir: "{{ vault_pki_mounts.vault.cert_dir }}" + gen_ca_mount_path: "{{ vault_pki_mounts.vault.name }}" + gen_ca_vault_headers: "{{ vault_headers }}" + gen_ca_vault_options: "{{ vault_ca_options.vault }}" when: >- - inventory_hostname in groups.vault and - not vault_cluster_is_initialized and - vault_ca_cert_needed + inventory_hostname in groups.vault + and not vault_cluster_is_initialized + and vault_ca_cert_needed + +- include: ../shared/gen_ca.yml + vars: + gen_ca_cert_dir: "{{ vault_pki_mounts.etcd.cert_dir }}" + gen_ca_mount_path: "{{ vault_pki_mounts.etcd.name }}" + gen_ca_vault_headers: "{{ vault_headers }}" + gen_ca_vault_options: "{{ vault_ca_options.etcd }}" + when: inventory_hostname in groups.etcd and vault_etcd_ca_cert_needed -# Generate Vault API certs - include: gen_vault_certs.yml when: inventory_hostname in groups.vault and vault_api_cert_needed -# Ensure etcd PKI mounts exists -- include: ../shared/create_mount.yml - vars: - create_mount_path: "{{ vault_etcd_mount_path }}" - create_mount_default_lease_ttl: "{{ vault_etcd_default_lease_ttl }}" - create_mount_max_lease_ttl: "{{ vault_etcd_max_lease_ttl }}" - create_mount_description: "Etcd Root CA" - create_mount_cert_dir: "{{ vault_etcd_cert_dir }}" - create_mount_config_ca_needed: "{{ not vault_etcd_ca_cert_needed }}" - when: inventory_hostname == groups.vault|first - -# Generate root CA certs for etcd if none exist -- include: ../shared/gen_ca.yml - vars: - gen_ca_cert_dir: "{{ vault_etcd_cert_dir }}" - gen_ca_mount_path: "{{ vault_etcd_mount_path }}" - when: inventory_hostname in groups.etcd and vault_etcd_ca_cert_needed - -- include: create_etcd_role.yml - -# Update all host's CA bundle, etcd CA will be added in etcd role - include: ca_trust.yml diff --git a/roles/vault/tasks/cluster/create_mounts.yml b/roles/vault/tasks/cluster/create_mounts.yml new file mode 100644 index 000000000..b1be8c9fe --- /dev/null +++ b/roles/vault/tasks/cluster/create_mounts.yml @@ -0,0 +1,13 @@ +--- +- include: ../shared/create_mount.yml + vars: + create_mount_path: "{{ item.name }}" + create_mount_default_lease_ttl: "{{ item.default_lease_ttl }}" + create_mount_max_lease_ttl: "{{ item.max_lease_ttl }}" + create_mount_description: "{{ item.description }}" + create_mount_cert_dir: "{{ item.cert_dir }}" + create_mount_config_ca_needed: "{{ item.name != vault_pki_mounts.kube.name }}" + with_items: + - "{{ vault_pki_mounts.vault }}" + - "{{ vault_pki_mounts.etcd }}" + - "{{ vault_pki_mounts.kube }}" diff --git a/roles/vault/tasks/cluster/create_roles.yml b/roles/vault/tasks/cluster/create_roles.yml index 54aae815d..9314bfa84 100644 --- a/roles/vault/tasks/cluster/create_roles.yml +++ b/roles/vault/tasks/cluster/create_roles.yml @@ -1,18 +1,10 @@ --- -- include: ../shared/auth_backend.yml - vars: - auth_backend_description: A Username/Password Auth Backend primarily used for services needing to issue certificates - auth_backend_path: userpass - auth_backend_type: userpass - when: inventory_hostname == groups.vault|first - - include: ../shared/create_role.yml vars: create_role_name: "{{ item.name }}" create_role_group: "{{ item.group }}" + create_role_password: "{{ item.password }}" create_role_policy_rules: "{{ item.policy_rules }}" create_role_options: "{{ item.role_options }}" - create_role_mount_path: "{{ item.mount_path }}" - with_items: - - "{{ vault_etcd_role }}" - - "{{ vault_kube_role }}" + create_role_mount_path: "{{ vault_pki_mounts.kube.name }}" + with_items: "{{ vault_pki_mounts.kube.roles }}" diff --git a/roles/vault/tasks/cluster/main.yml b/roles/vault/tasks/cluster/main.yml index 7a5acba0a..bca6da5be 100644 --- a/roles/vault/tasks/cluster/main.yml +++ b/roles/vault/tasks/cluster/main.yml @@ -5,8 +5,6 @@ - include: ../shared/check_etcd.yml when: inventory_hostname in groups.vault -## Vault Cluster Setup - - include: configure.yml when: inventory_hostname in groups.vault @@ -25,42 +23,22 @@ - include: ../shared/find_leader.yml when: inventory_hostname in groups.vault -- include: ../shared/create_mount.yml - vars: - create_mount_path: "{{ vault_ca_options.common_name }}" - create_mount_default_lease_ttl: "{{ vault_default_lease_ttl }}" - create_mount_max_lease_ttl: "{{ vault_max_lease_ttl }}" - create_mount_description: "Vault Root CA" - create_mount_cert_dir: "{{ vault_cert_dir }}" - create_mount_config_ca_needed: true - when: inventory_hostname == groups.vault|first - -- include: ../shared/create_mount.yml - vars: - create_mount_path: "{{ vault_etcd_mount_path }}" - create_mount_default_lease_ttl: "{{ vault_etcd_default_lease_ttl }}" - create_mount_max_lease_ttl: "{{ vault_etcd_max_lease_ttl }}" - create_mount_description: "Etcd Root CA" - create_mount_cert_dir: "{{ vault_etcd_cert_dir }}" - create_mount_config_ca_needed: true - when: inventory_hostname == groups.vault|first - -- include: ../shared/create_mount.yml - vars: - create_mount_path: "{{ vault_kube_mount_path }}" - create_mount_default_lease_ttl: "{{ vault_kube_default_lease_ttl }}" - create_mount_max_lease_ttl: "{{ vault_kube_max_lease_ttl }}" - create_mount_description: "Kubernetes Root CA" - create_mount_cert_dir: "{{ vault_kube_cert_dir }}" - create_mount_config_ca_needed: false +- include: create_mounts.yml when: inventory_hostname == groups.vault|first - include: ../shared/gen_ca.yml vars: - gen_ca_cert_dir: "{{ vault_kube_cert_dir }}" - gen_ca_mount_path: "{{ vault_kube_mount_path }}" + gen_ca_cert_dir: "{{ vault_pki_mounts.kube.cert_dir }}" + gen_ca_mount_path: "{{ vault_pki_mounts.kube.name }}" + gen_ca_vault_headers: "{{ vault_headers }}" + gen_ca_vault_options: "{{ vault_ca_options.kube }}" when: inventory_hostname in groups.vault -## Vault Policies, Roles, and Auth Backends +- include: ../shared/auth_backend.yml + vars: + auth_backend_description: A Username/Password Auth Backend primarily used for services needing to issue certificates + auth_backend_path: userpass + auth_backend_type: userpass + when: inventory_hostname == groups.vault|first - include: create_roles.yml diff --git a/roles/vault/tasks/shared/create_role.yml b/roles/vault/tasks/shared/create_role.yml index fae45207a..dc9b5e1c6 100644 --- a/roles/vault/tasks/shared/create_role.yml +++ b/roles/vault/tasks/shared/create_role.yml @@ -1,5 +1,4 @@ --- - # The JSON inside JSON here is intentional (Vault API wants it) - name: create_role | Create a policy for the new role allowing issuing uri: @@ -22,7 +21,7 @@ status_code: 204 when: inventory_hostname == groups[create_role_group]|first -- name: create_role | Create the new role in the {{ create_role_mount_path }} pki mount +- name: create_role | Create {{ create_role_name }} role in the {{ create_role_mount_path }} pki mount uri: url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}/v1/{{ create_role_mount_path }}/roles/{{ create_role_name }}" headers: "{{ hostvars[groups.vault|first]['vault_headers'] }}" @@ -42,7 +41,7 @@ - include: gen_userpass.yml vars: gen_userpass_group: "{{ create_role_group }}" - gen_userpass_password: "{{ create_role_password|d(''|to_uuid) }}" + gen_userpass_password: "{{ create_role_password }}" gen_userpass_policies: "{{ create_role_name }}" gen_userpass_role: "{{ create_role_name }}" gen_userpass_username: "{{ create_role_name }}" diff --git a/roles/vault/tasks/shared/gen_ca.yml b/roles/vault/tasks/shared/gen_ca.yml index b80ebeb6b..291f42734 100644 --- a/roles/vault/tasks/shared/gen_ca.yml +++ b/roles/vault/tasks/shared/gen_ca.yml @@ -8,10 +8,10 @@ - name: "bootstrap/gen_ca | Generate {{ gen_ca_mount_path }} root CA" uri: url: "{{ vault_leader_url }}/v1/{{ gen_ca_mount_path }}/root/generate/exported" - headers: "{{ vault_headers }}" + headers: "{{ gen_ca_vault_headers }}" method: POST body_format: json - body: "{{ vault_ca_options }}" + body: "{{ gen_ca_vault_options }}" register: vault_ca_gen delegate_to: "{{ groups.vault|first }}" run_once: true diff --git a/roles/vault/tasks/shared/gen_userpass.yml b/roles/vault/tasks/shared/gen_userpass.yml index 4ef301171..2bc0c98f1 100644 --- a/roles/vault/tasks/shared/gen_userpass.yml +++ b/roles/vault/tasks/shared/gen_userpass.yml @@ -1,5 +1,4 @@ --- - - name: shared/gen_userpass | Create the Username/Password combo for the role uri: url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}/v1/auth/userpass/users/{{ gen_userpass_username }}" diff --git a/roles/vault/tasks/shared/issue_cert.yml b/roles/vault/tasks/shared/issue_cert.yml index fa09bfd2b..0b14d77c5 100644 --- a/roles/vault/tasks/shared/issue_cert.yml +++ b/roles/vault/tasks/shared/issue_cert.yml @@ -11,7 +11,6 @@ # issue_cert_file_mode: Mode of the placed cert file # issue_cert_file_owner: Owner of the placed cert file and directory # issue_cert_format: Format for returned data. Can be pem, der, or pem_bundle -# issue_cert_headers: Headers passed into the issue request # issue_cert_hosts: List of hosts to distribute the cert to # issue_cert_ip_sans: Requested IP Subject Alternative Names, in a list # issue_cert_mount_path: Mount point in Vault to make the request to @@ -27,7 +26,47 @@ mode: "{{ issue_cert_dir_mode | d('0755') }}" owner: "{{ issue_cert_file_owner | d('root') }}" -- name: "issue_cert | Generate the cert for {{ issue_cert_role }}" +- name: "issue_cert | Read in the local credentials" + command: cat {{ vault_roles_dir }}/{{ issue_cert_role }}/userpass + register: vault_creds_cat + delegate_to: "{{ issue_cert_hosts|first }}" + run_once: true + +- name: gen_certs_vault | Set facts for read Vault Creds + set_fact: + user_vault_creds: "{{ vault_creds_cat.stdout|from_json }}" + delegate_to: "{{ issue_cert_hosts|first }}" + run_once: true + +- name: gen_certs_vault | Log into Vault and obtain an token + uri: + url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}/v1/auth/userpass/login/{{ user_vault_creds.username }}" + headers: + Accept: application/json + Content-Type: application/json + method: POST + body_format: json + body: + password: "{{ user_vault_creds.password }}" + register: vault_login_result + delegate_to: "{{ issue_cert_hosts|first }}" + run_once: true + +- name: gen_certs_vault | Set fact for vault_client_token + set_fact: + vault_client_token: "{{ vault_login_result.get('json', {}).get('auth', {}).get('client_token') }}" + run_once: true + +- name: gen_certs_vault | Set fact for Vault API token + set_fact: + issue_cert_headers: + Accept: application/json + Content-Type: application/json + X-Vault-Token: "{{ vault_client_token }}" + run_once: true + when: vault_client_token != "" + +- name: "issue_cert | Generate {{ issue_cert_path }} for {{ issue_cert_role }} role" uri: url: "{{ issue_cert_url }}/v1/{{ issue_cert_mount_path|d('pki') }}/issue/{{ issue_cert_role }}" headers: "{{ issue_cert_headers }}" @@ -70,7 +109,7 @@ - name: issue_cert | Copy certificate serial to all hosts copy: content: "{{ hostvars[issue_cert_hosts|first]['issue_cert_result']['json']['data']['serial_number'] }}" - dest: "{{ issue_cert_path.rsplit('.', 1)|first }}.serial }}" + dest: "{{ issue_cert_path.rsplit('.', 1)|first }}.serial" group: "{{ issue_cert_file_group | d('root' )}}" mode: "{{ issue_cert_file_mode | d('0640') }}" owner: "{{ issue_cert_file_owner | d('root') }}" From 3acb86805b590dc1553bfe4591d39538d21a7619 Mon Sep 17 00:00:00 2001 From: mkrasilnikov Date: Tue, 5 Sep 2017 00:16:44 +0300 Subject: [PATCH 32/64] Rename vault_address to vault_bind_address --- roles/vault/defaults/main.yml | 6 +++--- roles/vault/tasks/shared/issue_cert.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/roles/vault/defaults/main.yml b/roles/vault/defaults/main.yml index d4e2ed66c..2320ae862 100644 --- a/roles/vault/defaults/main.yml +++ b/roles/vault/defaults/main.yml @@ -40,7 +40,7 @@ vault_temp_container_name: vault-temp vault_image_repo: "vault" vault_image_tag: "{{ vault_version }}" -vault_address: 0.0.0.0 +vault_bind_address: 0.0.0.0 vault_port: 8200 vault_etcd_url: "https://{{ hostvars[groups.etcd[0]]['ip']|d(hostvars[groups.etcd[0]]['ansible_default_ipv4']['address']) }}:2379" @@ -54,7 +54,7 @@ vault_temp_config: default_lease_ttl: "{{ vault_default_lease_ttl }}" listener: tcp: - address: "{{ vault_address }}:{{ vault_port }}" + address: "{{ vault_bind_address }}:{{ vault_port }}" tls_disable: "true" max_lease_ttl: "{{ vault_max_lease_ttl }}" @@ -70,7 +70,7 @@ vault_config: max_lease_ttl: "{{ vault_max_lease_ttl }}" listener: tcp: - address: "{{ vault_address }}:{{ vault_port }}" + address: "{{ vault_bind_address }}:{{ vault_port }}" tls_cert_file: "{{ vault_cert_dir }}/api.pem" tls_key_file: "{{ vault_cert_dir }}/api-key.pem" diff --git a/roles/vault/tasks/shared/issue_cert.yml b/roles/vault/tasks/shared/issue_cert.yml index 0b14d77c5..844183816 100644 --- a/roles/vault/tasks/shared/issue_cert.yml +++ b/roles/vault/tasks/shared/issue_cert.yml @@ -108,7 +108,7 @@ - name: issue_cert | Copy certificate serial to all hosts copy: - content: "{{ hostvars[issue_cert_hosts|first]['issue_cert_result']['json']['data']['serial_number'] }}" + wcontent: "{{ issue_cert_result['issue_cert_result']['json']['data']['serial_number'] }}" dest: "{{ issue_cert_path.rsplit('.', 1)|first }}.serial" group: "{{ issue_cert_file_group | d('root' )}}" mode: "{{ issue_cert_file_mode | d('0640') }}" From e1384f6618f5edd95d8665fbaaa17b33ac19e08e Mon Sep 17 00:00:00 2001 From: mkrasilnikov Date: Tue, 5 Sep 2017 00:21:34 +0300 Subject: [PATCH 33/64] Using issue cert result var instead hostvars --- roles/vault/tasks/shared/issue_cert.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/vault/tasks/shared/issue_cert.yml b/roles/vault/tasks/shared/issue_cert.yml index 844183816..3f37c38cb 100644 --- a/roles/vault/tasks/shared/issue_cert.yml +++ b/roles/vault/tasks/shared/issue_cert.yml @@ -108,7 +108,7 @@ - name: issue_cert | Copy certificate serial to all hosts copy: - wcontent: "{{ issue_cert_result['issue_cert_result']['json']['data']['serial_number'] }}" + wcontent: "{{ issue_cert_result['json']['data']['serial_number'] }}" dest: "{{ issue_cert_path.rsplit('.', 1)|first }}.serial" group: "{{ issue_cert_file_group | d('root' )}}" mode: "{{ issue_cert_file_mode | d('0640') }}" From 06035c0f4e2753f127bc64b29d8c77d19ff67fb8 Mon Sep 17 00:00:00 2001 From: mkrasilnikov Date: Tue, 5 Sep 2017 00:37:27 +0300 Subject: [PATCH 34/64] Change vault CI CLOUD_MACHINE_TYPE to n1-standard-2 --- .gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 465b7ac57..7080c7c67 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -367,6 +367,7 @@ before_script: .ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables # stage: deploy-gce-part1 AUTHORIZATION_MODES: "{ 'authorization_modes': [ 'RBAC' ] }" + CLOUD_MACHINE_TYPE: "n1-standard-2" KUBE_NETWORK_PLUGIN: canal CERT_MGMT: vault CLOUD_IMAGE: ubuntu-1604-xenial From ad313c9d496cf8d8c10bb6223c304e932bbc9c92 Mon Sep 17 00:00:00 2001 From: mkrasilnikov Date: Tue, 5 Sep 2017 00:38:00 +0300 Subject: [PATCH 35/64] typo fix --- roles/vault/tasks/shared/issue_cert.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/vault/tasks/shared/issue_cert.yml b/roles/vault/tasks/shared/issue_cert.yml index 3f37c38cb..51b969f4e 100644 --- a/roles/vault/tasks/shared/issue_cert.yml +++ b/roles/vault/tasks/shared/issue_cert.yml @@ -108,7 +108,7 @@ - name: issue_cert | Copy certificate serial to all hosts copy: - wcontent: "{{ issue_cert_result['json']['data']['serial_number'] }}" + content: "{{ issue_cert_result['json']['data']['serial_number'] }}" dest: "{{ issue_cert_path.rsplit('.', 1)|first }}.serial" group: "{{ issue_cert_file_group | d('root' )}}" mode: "{{ issue_cert_file_mode | d('0640') }}" From b930b0ef5a160685275cc62ddf6979a32b97c051 Mon Sep 17 00:00:00 2001 From: mkrasilnikov Date: Tue, 5 Sep 2017 11:05:06 +0300 Subject: [PATCH 36/64] Place vault role credentials only to vault group hosts --- roles/vault/tasks/bootstrap/main.yml | 1 + roles/vault/tasks/cluster/main.yml | 1 + roles/vault/tasks/shared/create_role.yml | 8 ++++---- roles/vault/tasks/shared/gen_userpass.yml | 5 ++--- roles/vault/tasks/shared/issue_cert.yml | 6 +++--- 5 files changed, 11 insertions(+), 10 deletions(-) diff --git a/roles/vault/tasks/bootstrap/main.yml b/roles/vault/tasks/bootstrap/main.yml index 88e5c2050..e061028b7 100644 --- a/roles/vault/tasks/bootstrap/main.yml +++ b/roles/vault/tasks/bootstrap/main.yml @@ -38,6 +38,7 @@ - "{{ vault_pki_mounts.etcd }}" loop_control: loop_var: mount + when: inventory_hostname in groups.vault - include: ../shared/gen_ca.yml vars: diff --git a/roles/vault/tasks/cluster/main.yml b/roles/vault/tasks/cluster/main.yml index bca6da5be..9c7c83aaf 100644 --- a/roles/vault/tasks/cluster/main.yml +++ b/roles/vault/tasks/cluster/main.yml @@ -42,3 +42,4 @@ when: inventory_hostname == groups.vault|first - include: create_roles.yml + when: inventory_hostname in groups.vault diff --git a/roles/vault/tasks/shared/create_role.yml b/roles/vault/tasks/shared/create_role.yml index dc9b5e1c6..d76e73f13 100644 --- a/roles/vault/tasks/shared/create_role.yml +++ b/roles/vault/tasks/shared/create_role.yml @@ -19,7 +19,8 @@ {{ create_role_policy_rules | to_json + '\n' }} {%- endif -%} status_code: 204 - when: inventory_hostname == groups[create_role_group]|first + delegate_to: "{{ groups.vault|first }}" + run_once: true - name: create_role | Create {{ create_role_name }} role in the {{ create_role_mount_path }} pki mount uri: @@ -34,15 +35,14 @@ {{ create_role_options }} {%- endif -%} status_code: 204 - when: inventory_hostname == groups[create_role_group]|first + delegate_to: "{{ groups.vault|first }}" + run_once: true ## Userpass based auth method - include: gen_userpass.yml vars: - gen_userpass_group: "{{ create_role_group }}" gen_userpass_password: "{{ create_role_password }}" gen_userpass_policies: "{{ create_role_name }}" gen_userpass_role: "{{ create_role_name }}" gen_userpass_username: "{{ create_role_name }}" - when: inventory_hostname in groups[create_role_group] diff --git a/roles/vault/tasks/shared/gen_userpass.yml b/roles/vault/tasks/shared/gen_userpass.yml index 2bc0c98f1..5def39d0e 100644 --- a/roles/vault/tasks/shared/gen_userpass.yml +++ b/roles/vault/tasks/shared/gen_userpass.yml @@ -10,13 +10,13 @@ password: "{{ gen_userpass_password }}" policies: "{{ gen_userpass_role }}" status_code: 204 - when: inventory_hostname == groups[gen_userpass_group]|first + delegate_to: "{{ groups.vault|first }}" + run_once: true - name: shared/gen_userpass | Ensure destination directory exists file: path: "{{ vault_roles_dir }}/{{ gen_userpass_role }}" state: directory - when: inventory_hostname in groups[gen_userpass_group] - name: shared/gen_userpass | Copy credentials to all hosts in the group copy: @@ -26,4 +26,3 @@ 'password': gen_userpass_password} | to_nice_json(indent=4) }} dest: "{{ vault_roles_dir }}/{{ gen_userpass_role }}/userpass" - when: inventory_hostname in groups[gen_userpass_group] diff --git a/roles/vault/tasks/shared/issue_cert.yml b/roles/vault/tasks/shared/issue_cert.yml index 51b969f4e..24db59957 100644 --- a/roles/vault/tasks/shared/issue_cert.yml +++ b/roles/vault/tasks/shared/issue_cert.yml @@ -29,13 +29,13 @@ - name: "issue_cert | Read in the local credentials" command: cat {{ vault_roles_dir }}/{{ issue_cert_role }}/userpass register: vault_creds_cat - delegate_to: "{{ issue_cert_hosts|first }}" + delegate_to: "{{ groups.vault|first }}" run_once: true - name: gen_certs_vault | Set facts for read Vault Creds set_fact: user_vault_creds: "{{ vault_creds_cat.stdout|from_json }}" - delegate_to: "{{ issue_cert_hosts|first }}" + delegate_to: "{{ groups.vault|first }}" run_once: true - name: gen_certs_vault | Log into Vault and obtain an token @@ -49,7 +49,7 @@ body: password: "{{ user_vault_creds.password }}" register: vault_login_result - delegate_to: "{{ issue_cert_hosts|first }}" + delegate_to: "{{ groups.vault|first }}" run_once: true - name: gen_certs_vault | Set fact for vault_client_token From 82eedbd62208f5fadd38427119526a01c132b718 Mon Sep 17 00:00:00 2001 From: Yorgos Saslis Date: Tue, 5 Sep 2017 14:10:53 +0300 Subject: [PATCH 37/64] Update ansible inventory file when template changes (#1612) This trigger ensures the inventory file is kept up-to-date. Otherwise, if the file exists and you've made changes to your terraform-managed infra without having deleted the file, it would never get updated. For example, consider the case where you've destroyed and re-applied the terraform resources, none of the IPs would get updated, so ansible would be trying to connect to the old ones. --- contrib/terraform/aws/create-infrastructure.tf | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/contrib/terraform/aws/create-infrastructure.tf b/contrib/terraform/aws/create-infrastructure.tf index a58bca53c..f1f1e205b 100644 --- a/contrib/terraform/aws/create-infrastructure.tf +++ b/contrib/terraform/aws/create-infrastructure.tf @@ -162,7 +162,7 @@ resource "aws_instance" "k8s-worker" { */ data "template_file" "inventory" { template = "${file("${path.module}/templates/inventory.tpl")}" - + vars { public_ip_address_bastion = "${join("\n",formatlist("bastion ansible_ssh_host=%s" , aws_instance.bastion-server.*.public_ip))}" connection_strings_master = "${join("\n",formatlist("%s ansible_ssh_host=%s",aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}" @@ -176,6 +176,7 @@ data "template_file" "inventory" { kube_insecure_apiserver_address = "kube_apiserver_insecure_bind_address: ${var.kube_insecure_apiserver_address}" } + } resource "null_resource" "inventories" { @@ -183,4 +184,8 @@ resource "null_resource" "inventories" { command = "echo '${data.template_file.inventory.rendered}' > ../../../inventory/hosts" } + triggers { + template = "${data.template_file.inventory.rendered}" + } + } From 957b7115feeff8a681c5a843d01302395cb3c144 Mon Sep 17 00:00:00 2001 From: mkrasilnikov Date: Tue, 5 Sep 2017 14:40:26 +0300 Subject: [PATCH 38/64] Remove node name from kube-proxy and admin certificates --- roles/kubernetes/secrets/tasks/gen_certs_vault.yml | 4 ++-- roles/vault/tasks/bootstrap/main.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/kubernetes/secrets/tasks/gen_certs_vault.yml b/roles/kubernetes/secrets/tasks/gen_certs_vault.yml index 82535fd20..8cad8cc66 100644 --- a/roles/kubernetes/secrets/tasks/gen_certs_vault.yml +++ b/roles/kubernetes/secrets/tasks/gen_certs_vault.yml @@ -8,7 +8,7 @@ # Issue admin certs to kube-master hosts - include: ../../../vault/tasks/shared/issue_cert.yml vars: - issue_cert_common_name: "admin:{{ item.rsplit('/', 1)[1].rsplit('.', 1)[0] }}" + issue_cert_common_name: "admin" issue_cert_copy_ca: "{{ item == kube_admin_certs_needed|first }}" issue_cert_file_group: "{{ kube_cert_group }}" issue_cert_file_owner: kube @@ -80,7 +80,7 @@ # Issue proxy certs to k8s-cluster nodes - include: ../../../vault/tasks/shared/issue_cert.yml vars: - issue_cert_common_name: "system:kube-proxy:{{ item.rsplit('/', 1)[1].rsplit('.', 1)[0] }}" + issue_cert_common_name: "system:kube-proxy" issue_cert_copy_ca: "{{ item == kube_proxy_certs_needed|first }}" issue_cert_file_group: "{{ kube_cert_group }}" issue_cert_file_owner: kube diff --git a/roles/vault/tasks/bootstrap/main.yml b/roles/vault/tasks/bootstrap/main.yml index e061028b7..b87954ca7 100644 --- a/roles/vault/tasks/bootstrap/main.yml +++ b/roles/vault/tasks/bootstrap/main.yml @@ -6,7 +6,7 @@ when: inventory_hostname in groups.vault - include: ../shared/find_leader.yml - when: inventory_hostname in groups.vault and vault_cluster_is_initialized|d() + when: inventory_hostname in groups.vault and vault_cluster_is_initialized - include: sync_vault_certs.yml when: inventory_hostname in groups.vault From 35d48cc88cbf9df0c715698a11599b03abe597fd Mon Sep 17 00:00:00 2001 From: Tennis Smith Date: Tue, 5 Sep 2017 10:41:47 -0500 Subject: [PATCH 39/64] Point apiserver address to 0.0.0.0 (#1617) * Point apiserver address to 0.0.0.0 Added loadbalancer api server address * Update documentation --- contrib/terraform/aws/README.md | 16 ++++++++++++---- contrib/terraform/aws/create-infrastructure.tf | 3 +-- contrib/terraform/aws/templates/inventory.tpl | 2 +- contrib/terraform/aws/terraform.tfvars | 13 +++++++------ contrib/terraform/aws/variables.tf | 4 ++-- 5 files changed, 23 insertions(+), 15 deletions(-) diff --git a/contrib/terraform/aws/README.md b/contrib/terraform/aws/README.md index 451fc58a7..188fea00e 100644 --- a/contrib/terraform/aws/README.md +++ b/contrib/terraform/aws/README.md @@ -25,16 +25,24 @@ export AWS_DEFAULT_REGION="zzz" - Rename `contrib/terraform/aws/terraform.tfvars.example` to `terraform.tfvars` - Update `contrib/terraform/aws/terraform.tfvars` with your data - - Allocate new AWS Elastic IPs: Depending on # of Availability Zones used (2 for each AZ) - - Create an AWS EC2 SSH Key - - +- Allocate a new AWS Elastic IP. Use this for your `loadbalancer_apiserver_address` value (below) +- Create an AWS EC2 SSH Key - Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials +Example: +```commandline +terraform apply -var-file=credentials.tfvars -var 'loadbalancer_apiserver_address=34.212.228.77' +``` + - Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory` - Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag. +Example (this one assumes you are using CoreOS) +```commandline +ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_ssh_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache +``` + **Troubleshooting** ***Remaining AWS IAM Instance Profile***: diff --git a/contrib/terraform/aws/create-infrastructure.tf b/contrib/terraform/aws/create-infrastructure.tf index f1f1e205b..04c5a8881 100644 --- a/contrib/terraform/aws/create-infrastructure.tf +++ b/contrib/terraform/aws/create-infrastructure.tf @@ -173,8 +173,7 @@ data "template_file" "inventory" { list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}" elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\"" elb_api_port = "loadbalancer_apiserver.port=${var.aws_elb_api_port}" - kube_insecure_apiserver_address = "kube_apiserver_insecure_bind_address: ${var.kube_insecure_apiserver_address}" - + loadbalancer_apiserver_address = "loadbalancer_apiserver.address=${var.loadbalancer_apiserver_address}" } } diff --git a/contrib/terraform/aws/templates/inventory.tpl b/contrib/terraform/aws/templates/inventory.tpl index 8d5afd1cf..dd8126002 100644 --- a/contrib/terraform/aws/templates/inventory.tpl +++ b/contrib/terraform/aws/templates/inventory.tpl @@ -25,4 +25,4 @@ kube-master [k8s-cluster:vars] ${elb_api_fqdn} ${elb_api_port} -${kube_insecure_apiserver_address} +${loadbalancer_apiserver_address} diff --git a/contrib/terraform/aws/terraform.tfvars b/contrib/terraform/aws/terraform.tfvars index a538d46f3..bc83a719d 100644 --- a/contrib/terraform/aws/terraform.tfvars +++ b/contrib/terraform/aws/terraform.tfvars @@ -5,11 +5,11 @@ aws_cluster_name = "devtest" aws_vpc_cidr_block = "10.250.192.0/18" aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"] aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"] -aws_avail_zones = ["eu-central-1a","eu-central-1b"] +aws_avail_zones = ["us-west-2a","us-west-2b"] #Bastion Host -aws_bastion_ami = "ami-5900cc36" -aws_bastion_size = "t2.small" +aws_bastion_ami = "ami-db56b9a3" +aws_bastion_size = "t2.medium" #Kubernetes Cluster @@ -23,9 +23,10 @@ aws_etcd_size = "t2.medium" aws_kube_worker_num = 4 aws_kube_worker_size = "t2.medium" -aws_cluster_ami = "ami-903df7ff" +aws_cluster_ami = "ami-db56b9a3" #Settings AWS ELB -aws_elb_api_port = 443 -k8s_secure_api_port = 443 +aws_elb_api_port = 6443 +k8s_secure_api_port = 6443 +kube_insecure_apiserver_address = "0.0.0.0" diff --git a/contrib/terraform/aws/variables.tf b/contrib/terraform/aws/variables.tf index c740e6472..c7c65c772 100644 --- a/contrib/terraform/aws/variables.tf +++ b/contrib/terraform/aws/variables.tf @@ -96,6 +96,6 @@ variable "k8s_secure_api_port" { description = "Secure Port of K8S API Server" } -variable "kube_insecure_apiserver_address" { - description= "Bind Address for insecure Port of K8s API Server" +variable "loadbalancer_apiserver_address" { + description= "Bind Address for ELB of K8s API Server" } From 4c88ac69f277b8ee45686b24f9651adcf965991a Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Wed, 6 Sep 2017 09:36:54 +0300 Subject: [PATCH 40/64] Use kubectl apply instead of create/replace (#1610) Disable checks for existing resources to speed up execution. --- library/kube.py | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/library/kube.py b/library/kube.py index 77f7e6e35..52f6a235d 100644 --- a/library/kube.py +++ b/library/kube.py @@ -139,7 +139,7 @@ class KubeManager(object): if check and self.exists(): return [] - cmd = ['create'] + cmd = ['apply'] if not self.filename: self.module.fail_json(msg='filename required to create') @@ -150,10 +150,7 @@ class KubeManager(object): def replace(self): - if not self.force and not self.exists(): - return [] - - cmd = ['replace'] + cmd = ['apply'] if self.force: cmd.append('--force') @@ -271,7 +268,7 @@ def main(): manager = KubeManager(module) state = module.params.get('state') if state == 'present': - result = manager.create() + result = manager.create(check=False) elif state == 'absent': result = manager.delete() @@ -283,11 +280,7 @@ def main(): result = manager.stop() elif state == 'latest': - if manager.exists(): - manager.force = True - result = manager.replace() - else: - result = manager.create(check=False) + result = manager.replace() else: module.fail_json(msg='Unrecognized state %s.' % state) From a341adb7f31cb2083b18dc43d03538a68b43fd65 Mon Sep 17 00:00:00 2001 From: Brad Beam Date: Wed, 6 Sep 2017 02:55:08 -0500 Subject: [PATCH 41/64] Updating CN for node certs generated by vault (#1622) This allows the node authorization plugin to function correctly --- roles/kubernetes/secrets/tasks/gen_certs_vault.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/roles/kubernetes/secrets/tasks/gen_certs_vault.yml b/roles/kubernetes/secrets/tasks/gen_certs_vault.yml index 8cad8cc66..4c5dc2eaa 100644 --- a/roles/kubernetes/secrets/tasks/gen_certs_vault.yml +++ b/roles/kubernetes/secrets/tasks/gen_certs_vault.yml @@ -65,7 +65,9 @@ # Issue node certs to k8s-cluster nodes - include: ../../../vault/tasks/shared/issue_cert.yml vars: - issue_cert_common_name: "system:node:{{ item.rsplit('/', 1)[1].rsplit('.', 1)[0] }}" + # Need to strip out the 'node-' prefix from the cert name so it can be used + # with the node authorization plugin ( CN matches kubelet node name ) + issue_cert_common_name: "system:node:{{ item.rsplit('/', 1)[1].rsplit('.', 1)[0] | regex_replace('^node-', '') }}" issue_cert_copy_ca: "{{ item == kube_node_certs_needed|first }}" issue_cert_file_group: "{{ kube_cert_group }}" issue_cert_file_owner: kube From 0453ed8235c59b6fe57ae56d820d4687d7a2edb3 Mon Sep 17 00:00:00 2001 From: Matthieu Date: Wed, 6 Sep 2017 10:32:32 +0200 Subject: [PATCH 42/64] Fix an error with Canal when RBAC are disabled (#1619) * Fix an error with Canal when RBAC are disabled * Update using same rbac strategy used elsewhere --- roles/kubernetes-apps/network_plugin/canal/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml index 24607249f..cbe4f0ac7 100644 --- a/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/canal/tasks/main.yml @@ -8,4 +8,4 @@ filename: "{{kube_config_dir}}/{{item.item.file}}" state: "latest" with_items: "{{ canal_manifests.results }}" - when: inventory_hostname == groups['kube-master'][0] + when: inventory_hostname == groups['kube-master'][0] and not item|skipped From cbaa2b57739f483b58d40dea3618b1630c576466 Mon Sep 17 00:00:00 2001 From: Chad Swenson Date: Wed, 6 Sep 2017 06:23:16 -0500 Subject: [PATCH 43/64] Retry Remove all Docker containers in reset (#1623) Due to various occasional docker bugs, removing a container will sometimes fail. This can often be mitigated by trying again. --- roles/reset/tasks/main.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 26b6141dd..624e7135e 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -38,6 +38,10 @@ - name: reset | remove all containers shell: "{{ docker_bin_dir }}/docker ps -aq | xargs -r docker rm -fv" + register: remove_all_containers + retries: 4 + until: remove_all_containers.rc == 0 + delay: 5 tags: ['docker'] - name: reset | restart docker if needed From 9bce364b3c54fa75bd1d9b4a81f0e3c387d90565 Mon Sep 17 00:00:00 2001 From: Maxim Krasilnikov Date: Wed, 6 Sep 2017 15:10:18 +0300 Subject: [PATCH 44/64] Update auth enabled methods in group_vars example (#1625) --- inventory/group_vars/k8s-cluster.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/inventory/group_vars/k8s-cluster.yml b/inventory/group_vars/k8s-cluster.yml index b70cd6766..fb926c729 100644 --- a/inventory/group_vars/k8s-cluster.yml +++ b/inventory/group_vars/k8s-cluster.yml @@ -55,8 +55,8 @@ kube_users: ## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth) #kube_oidc_auth: false -#kube_basic_auth: false -#kube_token_auth: false +#kube_basic_auth: true +#kube_token_auth: true ## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ From e6ff8c92a0c4c5f07bb6b541aee6a2be605c0f99 Mon Sep 17 00:00:00 2001 From: Oliver Moser Date: Wed, 6 Sep 2017 14:10:52 +0200 Subject: [PATCH 45/64] Using 'hostnamectl' to set unconfigured hostname on CoreOS (#1600) --- roles/bootstrap-os/tasks/main.yml | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/roles/bootstrap-os/tasks/main.yml b/roles/bootstrap-os/tasks/main.yml index e7cb01b13..5e1cdbc03 100644 --- a/roles/bootstrap-os/tasks/main.yml +++ b/roles/bootstrap-os/tasks/main.yml @@ -21,9 +21,20 @@ - name: Gather nodes hostnames setup: gather_subset: '!all' - filter: ansible_hostname + filter: ansible_* -- name: Assign inventory name to unconfigured hostnames +- name: Assign inventory name to unconfigured hostnames (non-CoreOS) hostname: name: "{{inventory_hostname}}" - when: ansible_hostname == 'localhost' + when: ansible_os_family not in ['CoreOS', 'Container Linux by CoreOS'] + +- name: Assign inventory name to unconfigured hostnames (CoreOS only) + command: "hostnamectl set-hostname {{inventory_hostname}}" + register: hostname_changed + when: ansible_hostname == 'localhost' and ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] + +- name: Update hostname fact (CoreOS only) + setup: + gather_subset: '!all' + filter: ansible_hostname + when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] and hostname_changed.changed From c60d1040568d75747f09f1f454ac3db9de48da1a Mon Sep 17 00:00:00 2001 From: Sam Powers Date: Wed, 6 Sep 2017 05:11:13 -0700 Subject: [PATCH 46/64] Update checksums (etcd calico calico-cni weave) to fix uploads.yml (#1584) the uploads.yml playbook was broken with checksum mismatch errors in various kubespray commits, for example, 3bfad5ca735169c8f800864156a3c0d4d5a76df7 which updated the version from 3.0.6 to 3.0.17 without updating the corresponding checksums. --- roles/download/defaults/main.yml | 2 +- roles/uploads/defaults/main.yml | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 2c4000866..d5c5ef7e4 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -35,7 +35,7 @@ pod_infra_version: 3.0 etcd_download_url: "https://storage.googleapis.com/kargo/{{etcd_version}}_etcd" # Checksums -etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485" +etcd_checksum: "274c46a7f8d26f7ae99d6880610f54933cbcf7f3beafa19236c52eb5df8c7a0b" # Containers # Possible values: host, docker diff --git a/roles/uploads/defaults/main.yml b/roles/uploads/defaults/main.yml index 587c0f043..b3df52d5e 100644 --- a/roles/uploads/defaults/main.yml +++ b/roles/uploads/defaults/main.yml @@ -14,10 +14,10 @@ calico_cni_ipam_download_url: "https://github.com/projectcalico/calico-cni/relea weave_download_url: "https://github.com/weaveworks/weave/releases/download/{{weave_version}}/weave" # Checksums -calico_cni_checksum: "9a6bd6da267c498a1833117777c069f44f720d23226d8459bada2a0b41cb8258" -calico_cni_ipam_checksum: "8d3574736df1ce10ea88fdec94d84dc58642081d3774d2d48249c6ee94ed316d" -weave_checksum: "ee22e690985115a5986352b2c75589674349c618a5c95893f87600a13e2d58e9" -etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485" +calico_cni_checksum: "c72abd0d7ee88376952e43999bcbfa7958171708108bd3f1087c599115350b46" +calico_cni_ipam_checksum: "280fdb1d80f11904adc11760a9a5f3ae29b2aaf911ff0163a8da25646e757413" +weave_checksum: "311f5fe25036c774c3ea9975e033f67e1f3c5afbe8b5693a1d36d51c94ac31c4" +etcd_checksum: "274c46a7f8d26f7ae99d6880610f54933cbcf7f3beafa19236c52eb5df8c7a0b" downloads: - name: calico-cni-plugin From e26aec96b0b8da7a6a571352551ea0104897bace Mon Sep 17 00:00:00 2001 From: Chad Swenson Date: Wed, 6 Sep 2017 07:11:51 -0500 Subject: [PATCH 47/64] Consolidate kube-proxy module and sysctl loading (#1586) This sets br_netfilter and net.bridge.bridge-nf-call-iptables sysctl from a single play before kube-proxy is first ran instead of from the flannel and weave network_plugin roles after kube-proxy is started --- roles/kubernetes/node/tasks/main.yml | 31 +++++++++++++++++++++ roles/network_plugin/flannel/tasks/main.yml | 31 --------------------- roles/network_plugin/weave/tasks/main.yml | 12 -------- 3 files changed, 31 insertions(+), 43 deletions(-) diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index e0558f8cd..d166fe661 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -49,6 +49,37 @@ when: kube_apiserver_node_port_range is defined tags: kube-proxy +- name: Verify if br_netfilter module exists + shell: "modinfo br_netfilter" + register: modinfo_br_netfilter + failed_when: modinfo_br_netfilter.rc not in [0, 1] + changed_when: false + +- name: Enable br_netfilter module + modprobe: + name: br_netfilter + state: present + when: modinfo_br_netfilter.rc == 0 + +# kube-proxy needs net.bridge.bridge-nf-call-iptables enabled when found if br_netfilter is not a module +- name: Check if bridge-nf-call-iptables key exists + command: "sysctl net.bridge.bridge-nf-call-iptables" + failed_when: false + changed_when: false + register: sysctl_bridge_nf_call_iptables + +- name: Enable bridge-nf-call tables + sysctl: + name: "{{ item }}" + state: present + value: 1 + reload: yes + when: modinfo_br_netfilter.rc == 1 and sysctl_bridge_nf_call_iptables.rc == 0 + with_items: + - net.bridge.bridge-nf-call-iptables + - net.bridge.bridge-nf-call-arptables + - net.bridge.bridge-nf-call-ip6tables + - name: Write proxy manifest template: src: manifests/kube-proxy.manifest.j2 diff --git a/roles/network_plugin/flannel/tasks/main.yml b/roles/network_plugin/flannel/tasks/main.yml index 037684fa4..7da3bfaa4 100644 --- a/roles/network_plugin/flannel/tasks/main.yml +++ b/roles/network_plugin/flannel/tasks/main.yml @@ -1,37 +1,6 @@ --- - include: pre-upgrade.yml -- name: Flannel | Verify if br_netfilter module exists - shell: "modinfo br_netfilter" - register: modinfo_br_netfilter - failed_when: modinfo_br_netfilter.rc not in [0, 1] - changed_when: false - -- name: Flannel | Enable br_netfilter module - modprobe: - name: br_netfilter - state: present - when: modinfo_br_netfilter.rc == 0 - -# kube-proxy needs net.bridge.bridge-nf-call-iptables enabled when found if br_netfilter is not a module -- name: Flannel | Check if bridge-nf-call-iptables key exists - command: "sysctl net.bridge.bridge-nf-call-iptables" - failed_when: false - changed_when: false - register: sysctl_bridge_nf_call_iptables - -- name: Flannel | Enable bridge-nf-call tables - sysctl: - name: "{{ item }}" - state: present - value: 1 - reload: yes - when: modinfo_br_netfilter.rc == 1 and sysctl_bridge_nf_call_iptables.rc == 0 - with_items: - - net.bridge.bridge-nf-call-iptables - - net.bridge.bridge-nf-call-arptables - - net.bridge.bridge-nf-call-ip6tables - - name: Flannel | Create cni-flannel-rbac manifest template: src: cni-flannel-rbac.yml.j2 diff --git a/roles/network_plugin/weave/tasks/main.yml b/roles/network_plugin/weave/tasks/main.yml index bd6691859..38895ab40 100644 --- a/roles/network_plugin/weave/tasks/main.yml +++ b/roles/network_plugin/weave/tasks/main.yml @@ -4,18 +4,6 @@ - include: seed.yml when: weave_mode_seed -- name: Weave | Verify if br_netfilter module exists - shell: "modinfo br_netfilter" - register: modinfo_br_netfilter - failed_when: modinfo_br_netfilter.rc not in [0, 1] - changed_when: false - -- name: Weave | Enable br_netfilter module - modprobe: - name: br_netfilter - state: present - when: modinfo_br_netfilter.rc == 0 - - name: Weave | Copy cni plugins from hyperkube command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -r /opt/cni/bin/. /cnibindir/" register: cni_task_result From 7117614ee5279e2be6f1ef8acc650fe82fe5b8c5 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Wed, 6 Sep 2017 20:20:25 +0300 Subject: [PATCH 48/64] Use a generated password for kube user (#1624) Removed unnecessary root user --- .gitignore | 1 + docs/getting-started.md | 26 +++++++++++++++++++-- inventory/group_vars/k8s-cluster.yml | 9 +------ roles/kubespray-defaults/defaults/main.yaml | 3 --- tests/testcases/010_check-apiserver.yml | 4 +++- 5 files changed, 29 insertions(+), 14 deletions(-) diff --git a/.gitignore b/.gitignore index 8d5d5088b..4df491aa1 100644 --- a/.gitignore +++ b/.gitignore @@ -24,6 +24,7 @@ __pycache__/ .Python env/ build/ +credentials/ develop-eggs/ dist/ downloads/ diff --git a/docs/getting-started.md b/docs/getting-started.md index 25bcbfaad..5494e6f0c 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -57,7 +57,7 @@ ansible-playbook -i my_inventory/inventory.cfg cluster.yml -b -v \ See more details in the [ansible guide](ansible.md). Adding nodes --------------------------- +------------ You may want to add worker nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters. @@ -66,4 +66,26 @@ You may want to add worker nodes to your existing cluster. This can be done by r ``` ansible-playbook -i my_inventory/inventory.cfg scale.yml -b -v \ --private-key=~/.ssh/private_key -``` \ No newline at end of file +``` + +Connecting to Kubernetes +------------------------ +By default, Kubespray configures kube-master hosts with insecure access to +kube-apiserver via port 8080. A kubeconfig file is not necessary in this case, +because kubectl will use http://localhost:8080 to connect. The kubeconfig files +generated will point to localhost (on kube-masters) and kube-node hosts will +connect either to a localhost nginx proxy or to a loadbalancer if configured. +More details on this process is in the [HA guide](ha.md). + +Kubespray permits connecting to the cluster remotely on any IP of any +kube-master host on port 6443 by default. However, this requires +authentication. One could generate a kubeconfig based on one installed +kube-master hosts (needs improvement) or connect with a username and password. +By default, two users are created: `kube` and `admin` with the same password. +The password can be viewed after deployment by looking at the file +`PATH_TO_KUBESPRAY/credentials/kube_user`. This contains a randomly generated +password. If you wish to set your own password, just precreate/modify this +file yourself. + +For more information on kubeconfig and accessing a Kubernetes cluster, refer to +the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/). diff --git a/inventory/group_vars/k8s-cluster.yml b/inventory/group_vars/k8s-cluster.yml index fb926c729..81d7017cb 100644 --- a/inventory/group_vars/k8s-cluster.yml +++ b/inventory/group_vars/k8s-cluster.yml @@ -40,18 +40,11 @@ kube_log_level: 2 # Users to create for basic auth in Kubernetes API via HTTP # Optionally add groups for user -kube_api_pwd: "changeme" +kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15') }}" kube_users: kube: pass: "{{kube_api_pwd}}" role: admin - root: - pass: "{{kube_api_pwd}}" - role: admin - # groups: - # - system:masters - - ## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth) #kube_oidc_auth: false diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index c86f322fc..fac0b44d8 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -66,9 +66,6 @@ kube_users: kube: pass: "{{kube_api_pwd}}" role: admin - root: - pass: "{{kube_api_pwd}}" - role: admin # Choose network plugin (calico, weave or flannel) # Can also be set to 'cloud', which lets the cloud provider setup appropriate routing diff --git a/tests/testcases/010_check-apiserver.yml b/tests/testcases/010_check-apiserver.yml index 8ca19e196..a9123f976 100644 --- a/tests/testcases/010_check-apiserver.yml +++ b/tests/testcases/010_check-apiserver.yml @@ -2,10 +2,12 @@ - hosts: kube-master tasks: + - debug: + msg: "kube pass: {{ lookup('password', '../../credentials/kube_user length=15') }}" - name: Check the API servers are responding uri: url: "https://{{ access_ip | default(ansible_default_ipv4.address) }}:{{ kube_apiserver_port }}/api/v1" user: kube - password: changeme + password: "{{ lookup('password', '../../credentials/kube_user length=15') }}" validate_certs: no status_code: 200 From a47aaae0788e07244878abd6ef4fb215fdab0a98 Mon Sep 17 00:00:00 2001 From: Tennis Smith Date: Thu, 7 Sep 2017 13:26:52 -0700 Subject: [PATCH 49/64] Add bastion host definitions (#1621) * Add comment line and documentation for bastion host usage * Take out unneeded sudo parm * Remove blank lines * revert changes * take out disabling of strict host checking --- contrib/terraform/aws/README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/contrib/terraform/aws/README.md b/contrib/terraform/aws/README.md index 188fea00e..d0d63f7e3 100644 --- a/contrib/terraform/aws/README.md +++ b/contrib/terraform/aws/README.md @@ -36,6 +36,11 @@ terraform apply -var-file=credentials.tfvars -var 'loadbalancer_apiserver_addres - Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory` +- Ansible will automatically generate an ssh config file for your bastion hosts. To make use of it, make sure you have a line in your `ansible.cfg` file that looks like the following: +```commandline +ssh_args = -F ./ssh-bastion.conf -o ControlMaster=auto -o ControlPersist=30m +``` + - Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag. Example (this one assumes you are using CoreOS) From fb30f659516d55e2494561a2008d1d9e6a16b3ed Mon Sep 17 00:00:00 2001 From: Yorgos Saslis Date: Thu, 7 Sep 2017 23:29:27 +0300 Subject: [PATCH 50/64] Add option for fact cache expiry (#1602) * Add option for fact cache expiry By adding the `fact_caching_timeout` we avoid having really stale/invalid data ending up in there. Leaving commented out by default, for backwards compatibility, but nice to have there. * Enabled cache-expiry by default Set to 2 hours and modified comment to reflect change --- ansible.cfg | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ansible.cfg b/ansible.cfg index aecb198a0..6ec3c35bc 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -7,6 +7,9 @@ host_key_checking=False gathering = smart fact_caching = jsonfile fact_caching_connection = /tmp +# Once created, fact cache is never force updated. This is why the tiemeout exists. If you're still getting +# stale data in your runs (e.g. stale `etcd_access_addresses`), you might want to use `--flush-cache`. +fact_caching_timeout = 7200 # 2 hours stdout_callback = skippy library = ./library callback_whitelist = profile_tasks From e16b57aa055a576c6572cf27dcaf535a70692429 Mon Sep 17 00:00:00 2001 From: Maxim Krasilnikov Date: Thu, 7 Sep 2017 23:30:16 +0300 Subject: [PATCH 51/64] Store vault users passwords to credentials dir. Create vault and etcd roles after start vault cluster (#1632) --- roles/kubernetes/secrets/tasks/main.yml | 2 +- roles/vault/defaults/main.yml | 10 +++++----- roles/vault/tasks/cluster/create_mounts.yml | 2 +- roles/vault/tasks/cluster/create_roles.yml | 4 ++-- roles/vault/tasks/cluster/main.yml | 6 ++++++ 5 files changed, 15 insertions(+), 9 deletions(-) diff --git a/roles/kubernetes/secrets/tasks/main.yml b/roles/kubernetes/secrets/tasks/main.yml index 2a15591df..97987f706 100644 --- a/roles/kubernetes/secrets/tasks/main.yml +++ b/roles/kubernetes/secrets/tasks/main.yml @@ -31,7 +31,7 @@ src: known_users.csv.j2 dest: "{{ kube_users_dir }}/known_users.csv" backup: yes - when: inventory_hostname in "{{ groups['kube-master'] }}" and kube_basic_auth|default(true) + when: inventory_hostname in groups['kube-master'] and kube_basic_auth|default(true) notify: set secret_changed # diff --git a/roles/vault/defaults/main.yml b/roles/vault/defaults/main.yml index 2320ae862..8916d4b3a 100644 --- a/roles/vault/defaults/main.yml +++ b/roles/vault/defaults/main.yml @@ -111,7 +111,7 @@ vault_pki_mounts: roles: - name: vault group: vault - password: "{{ lookup('pipe','date +%Y%m%d%H%M%S' + cluster_name + 'vault') | to_uuid }}" + password: "{{ lookup('password', 'credentials/vault/vault length=15') }}" policy_rules: default role_options: default etcd: @@ -123,7 +123,7 @@ vault_pki_mounts: roles: - name: etcd group: etcd - password: "{{ lookup('pipe','date +%Y%m%d%H%M%S' + cluster_name + 'etcd') | to_uuid }}" + password: "{{ lookup('password', 'credentials/vault/etcd length=15') }}" policy_rules: default role_options: allow_any_name: true @@ -138,7 +138,7 @@ vault_pki_mounts: roles: - name: kube-master group: kube-master - password: "{{ lookup('pipe','date +%Y%m%d%H%M%S' + cluster_name + 'kube-master') | to_uuid }}" + password: "{{ lookup('password', 'credentials/vault/kube-master length=15') }}" policy_rules: default role_options: allow_any_name: true @@ -146,7 +146,7 @@ vault_pki_mounts: organization: "system:masters" - name: kube-node group: k8s-cluster - password: "{{ lookup('pipe','date +%Y%m%d%H%M%S' + cluster_name + 'kube-node') | to_uuid }}" + password: "{{ lookup('password', 'credentials/vault/kube-node length=15') }}" policy_rules: default role_options: allow_any_name: true @@ -154,7 +154,7 @@ vault_pki_mounts: organization: "system:nodes" - name: kube-proxy group: k8s-cluster - password: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S' + cluster_name + 'kube-proxy') | to_uuid }}" + password: "{{ lookup('password', 'credentials/vault/kube-proxy length=15') }}" policy_rules: default role_options: allow_any_name: true diff --git a/roles/vault/tasks/cluster/create_mounts.yml b/roles/vault/tasks/cluster/create_mounts.yml index b1be8c9fe..d64fa0bae 100644 --- a/roles/vault/tasks/cluster/create_mounts.yml +++ b/roles/vault/tasks/cluster/create_mounts.yml @@ -6,7 +6,7 @@ create_mount_max_lease_ttl: "{{ item.max_lease_ttl }}" create_mount_description: "{{ item.description }}" create_mount_cert_dir: "{{ item.cert_dir }}" - create_mount_config_ca_needed: "{{ item.name != vault_pki_mounts.kube.name }}" + create_mount_config_ca_needed: item.name != vault_pki_mounts.kube.name with_items: - "{{ vault_pki_mounts.vault }}" - "{{ vault_pki_mounts.etcd }}" diff --git a/roles/vault/tasks/cluster/create_roles.yml b/roles/vault/tasks/cluster/create_roles.yml index 9314bfa84..468229fd4 100644 --- a/roles/vault/tasks/cluster/create_roles.yml +++ b/roles/vault/tasks/cluster/create_roles.yml @@ -6,5 +6,5 @@ create_role_password: "{{ item.password }}" create_role_policy_rules: "{{ item.policy_rules }}" create_role_options: "{{ item.role_options }}" - create_role_mount_path: "{{ vault_pki_mounts.kube.name }}" - with_items: "{{ vault_pki_mounts.kube.roles }}" + create_role_mount_path: "{{ mount.name }}" + with_items: "{{ mount.roles }}" diff --git a/roles/vault/tasks/cluster/main.yml b/roles/vault/tasks/cluster/main.yml index 9c7c83aaf..94af5e5dc 100644 --- a/roles/vault/tasks/cluster/main.yml +++ b/roles/vault/tasks/cluster/main.yml @@ -42,4 +42,10 @@ when: inventory_hostname == groups.vault|first - include: create_roles.yml + with_items: + - "{{ vault_pki_mounts.vault }}" + - "{{ vault_pki_mounts.etcd }}" + - "{{ vault_pki_mounts.kube }}" + loop_control: + loop_var: mount when: inventory_hostname in groups.vault From 6f1fd12265336818c68464e16cebbbfeb9850af1 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Fri, 8 Sep 2017 10:19:58 +0300 Subject: [PATCH 52/64] Revert "Add option for fact cache expiry" (#1636) * Revert "Add option for fact cache expiry (#1602)" This reverts commit fb30f659516d55e2494561a2008d1d9e6a16b3ed. --- ansible.cfg | 3 --- 1 file changed, 3 deletions(-) diff --git a/ansible.cfg b/ansible.cfg index 6ec3c35bc..aecb198a0 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -7,9 +7,6 @@ host_key_checking=False gathering = smart fact_caching = jsonfile fact_caching_connection = /tmp -# Once created, fact cache is never force updated. This is why the tiemeout exists. If you're still getting -# stale data in your runs (e.g. stale `etcd_access_addresses`), you might want to use `--flush-cache`. -fact_caching_timeout = 7200 # 2 hours stdout_callback = skippy library = ./library callback_whitelist = profile_tasks From 079d317adead071ea759d30ca062d4c052579bb9 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Fri, 8 Sep 2017 15:00:57 +0300 Subject: [PATCH 53/64] Default is_atomic to false (#1637) --- roles/kubespray-defaults/defaults/main.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index fac0b44d8..84fc0ee74 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -4,6 +4,9 @@ bootstrap_os: none kube_api_anonymous_auth: false +# Default value, but will be set to true automatically if detected +is_atomic: false + ## Change this to use another Kubernetes version, e.g. a current beta release kube_version: v1.6.7 From f29a42721fe9f1423d41792136c3f0e210b01e8f Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Fri, 8 Sep 2017 15:47:13 +0300 Subject: [PATCH 54/64] Clean up debug in check apiserver test (#1638) * Clean up debug in check apiserver test * Change password generation for kube_user Special characters are not allowed in known_users.csv file --- inventory/group_vars/k8s-cluster.yml | 2 +- tests/testcases/010_check-apiserver.yml | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/inventory/group_vars/k8s-cluster.yml b/inventory/group_vars/k8s-cluster.yml index 81d7017cb..de4642807 100644 --- a/inventory/group_vars/k8s-cluster.yml +++ b/inventory/group_vars/k8s-cluster.yml @@ -40,7 +40,7 @@ kube_log_level: 2 # Users to create for basic auth in Kubernetes API via HTTP # Optionally add groups for user -kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15') }}" +kube_api_pwd: "{{ lookup('password', 'credentials/kube_user length=15 chars=ascii_letters,digits') }}" kube_users: kube: pass: "{{kube_api_pwd}}" diff --git a/tests/testcases/010_check-apiserver.yml b/tests/testcases/010_check-apiserver.yml index a9123f976..82c8b23d4 100644 --- a/tests/testcases/010_check-apiserver.yml +++ b/tests/testcases/010_check-apiserver.yml @@ -2,8 +2,6 @@ - hosts: kube-master tasks: - - debug: - msg: "kube pass: {{ lookup('password', '../../credentials/kube_user length=15') }}" - name: Check the API servers are responding uri: url: "https://{{ access_ip | default(ansible_default_ipv4.address) }}:{{ kube_apiserver_port }}/api/v1" From aaa0105f75287ec9fb2500901c09fd1cad71046d Mon Sep 17 00:00:00 2001 From: Brad Beam Date: Thu, 7 Sep 2017 22:19:46 +0000 Subject: [PATCH 55/64] Flexing calicocni.hostname based on cloud provider --- roles/network_plugin/calico/templates/cni-calico.conf.j2 | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/roles/network_plugin/calico/templates/cni-calico.conf.j2 b/roles/network_plugin/calico/templates/cni-calico.conf.j2 index 2b8d5b17c..973c7de53 100644 --- a/roles/network_plugin/calico/templates/cni-calico.conf.j2 +++ b/roles/network_plugin/calico/templates/cni-calico.conf.j2 @@ -1,6 +1,10 @@ { "name": "calico-k8s-network", +{% if cloud_provider is defined %} + "hostname": "{{ inventory_hostname }}", +{% else %} "hostname": "{{ ansible_hostname }}", +{% endif %} "type": "calico", "etcd_endpoints": "{{ etcd_access_endpoint }}", "etcd_cert_file": "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem", From eeffbbb43ca41784442e15cbb1383a7a1c516db6 Mon Sep 17 00:00:00 2001 From: Brad Beam Date: Thu, 7 Sep 2017 22:21:40 +0000 Subject: [PATCH 56/64] Updating calicocni.hostname to calicocni.nodename --- roles/network_plugin/calico/templates/cni-calico.conf.j2 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/network_plugin/calico/templates/cni-calico.conf.j2 b/roles/network_plugin/calico/templates/cni-calico.conf.j2 index 973c7de53..f49682ea9 100644 --- a/roles/network_plugin/calico/templates/cni-calico.conf.j2 +++ b/roles/network_plugin/calico/templates/cni-calico.conf.j2 @@ -1,9 +1,9 @@ { "name": "calico-k8s-network", {% if cloud_provider is defined %} - "hostname": "{{ inventory_hostname }}", + "nodename": "{{ inventory_hostname }}", {% else %} - "hostname": "{{ ansible_hostname }}", + "nodename": "{{ ansible_hostname }}", {% endif %} "type": "calico", "etcd_endpoints": "{{ etcd_access_endpoint }}", From f2057dd43d64301504a6f071c7f41b6628512969 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Sat, 9 Sep 2017 23:32:12 +0300 Subject: [PATCH 57/64] Refactor downloads (#1642) * Refactor downloads Add prefixes to tasks (file vs container) Remove some delegates Clean up some conditions * Update ansible.cfg --- roles/download/tasks/main.yml | 62 +++++++------------ .../download/tasks/set_docker_image_facts.yml | 13 ++-- 2 files changed, 27 insertions(+), 48 deletions(-) diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml index f9ae253d1..9fa0d7ca8 100644 --- a/roles/download/tasks/main.yml +++ b/roles/download/tasks/main.yml @@ -1,12 +1,5 @@ --- -- name: downloading... - debug: - msg: "{{ download.url }}" - when: - - download.enabled|bool - - not download.container|bool - -- name: Create dest directories +- name: file_download | Create dest directories file: path: "{{local_release_dir}}/{{download.dest|dirname}}" state: directory @@ -16,7 +9,7 @@ - not download.container|bool tags: bootstrap-os -- name: Download items +- name: file_download | Download item get_url: url: "{{download.url}}" dest: "{{local_release_dir}}/{{download.dest}}" @@ -31,7 +24,7 @@ - download.enabled|bool - not download.container|bool -- name: Extract archives +- name: file_download | Extract archives unarchive: src: "{{ local_release_dir }}/{{download.dest}}" dest: "{{ local_release_dir }}/{{download.dest|dirname}}" @@ -41,10 +34,9 @@ when: - download.enabled|bool - not download.container|bool - - download.unarchive is defined - - download.unarchive == True + - download.unarchive|default(False) -- name: Fix permissions +- name: file_download | Fix permissions file: state: file path: "{{local_release_dir}}/{{download.dest}}" @@ -56,10 +48,11 @@ - (download.unarchive is not defined or download.unarchive == False) - set_fact: - download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube-master'][0]}}{% endif %}" + download_delegate: "{% if download_localhost|bool %}localhost{% else %}{{groups['kube-master'][0]}}{% endif %}" + run_once: true tags: facts -- name: Create dest directory for saved/loaded container images +- name: container_download | Create dest directory for saved/loaded container images file: path: "{{local_release_dir}}/containers" state: directory @@ -72,15 +65,14 @@ tags: bootstrap-os # This is required for the download_localhost delegate to work smooth with Container Linux by CoreOS cluster nodes -- name: Hack python binary path for localhost +- name: container_download | Hack python binary path for localhost raw: sh -c "mkdir -p /opt/bin; ln -sf /usr/bin/python /opt/bin/python" - when: download_delegate == 'localhost' delegate_to: localhost + when: download_delegate == 'localhost' failed_when: false - run_once: true tags: localhost -- name: Download | create local directory for saved/loaded container images +- name: container_download | create local directory for saved/loaded container images file: path: "{{local_release_dir}}/containers" state: directory @@ -95,24 +87,16 @@ - download_delegate == 'localhost' tags: localhost -- name: Make download decision if pull is required by tag or sha256 +- name: container_download | Make download decision if pull is required by tag or sha256 include: set_docker_image_facts.yml when: - download.enabled|bool - download.container|bool - delegate_to: "{{ download_delegate if download_run_once|bool else inventory_hostname }}" + delegate_to: "{{ download_delegate if download_run_once|bool or omit }}" run_once: "{{ download_run_once|bool }}" tags: facts -- name: pulling... - debug: - msg: "{{ pull_args }}" - when: - - download.enabled|bool - - download.container|bool - -# NOTE(bogdando) this brings no docker-py deps for nodes -- name: Download containers if pull is required or told to always pull +- name: container_download | Download containers if pull is required or told to always pull command: "{{ docker_bin_dir }}/docker pull {{ pull_args }}" register: pull_task_result until: pull_task_result|succeeded @@ -122,29 +106,29 @@ - download.enabled|bool - download.container|bool - pull_required|bool|default(download_always_pull) - delegate_to: "{{ download_delegate if download_run_once|bool else inventory_hostname }}" + delegate_to: "{{ download_delegate if download_run_once|bool or omit }}" run_once: "{{ download_run_once|bool }}" - set_fact: fname: "{{local_release_dir}}/containers/{{download.repo|regex_replace('/|\0|:', '_')}}:{{download.tag|default(download.sha256)|regex_replace('/|\0|:', '_')}}.tar" + run_once: true tags: facts -- name: "Set default value for 'container_changed' to false" +- name: "container_download | Set default value for 'container_changed' to false" set_fact: container_changed: "{{pull_required|default(false)|bool}}" -- name: "Update the 'container_changed' fact" +- name: "container_download | Update the 'container_changed' fact" set_fact: container_changed: "{{ pull_required|bool|default(false) or not 'up to date' in pull_task_result.stdout }}" when: - download.enabled|bool - download.container|bool - pull_required|bool|default(download_always_pull) - delegate_to: "{{ download_delegate if download_run_once|bool else inventory_hostname }}" run_once: "{{ download_run_once|bool }}" tags: facts -- name: Stat saved container image +- name: container_download | Stat saved container image stat: path: "{{fname}}" register: img @@ -158,7 +142,7 @@ run_once: true tags: facts -- name: Download | save container images +- name: container_download | save container images shell: "{{ docker_bin_dir }}/docker save {{ pull_args }} | gzip -{{ download_compress }} > {{ fname }}" delegate_to: "{{ download_delegate }}" register: saved @@ -170,7 +154,7 @@ - download.container|bool - (container_changed|bool or not img.stat.exists) -- name: Download | copy container images to ansible host +- name: container_download | copy container images to ansible host synchronize: src: "{{ fname }}" dest: "{{ fname }}" @@ -186,7 +170,7 @@ - download.container|bool - saved.changed -- name: Download | upload container images to nodes +- name: container_download | upload container images to nodes synchronize: src: "{{ fname }}" dest: "{{ fname }}" @@ -206,7 +190,7 @@ - download.container|bool tags: [upload, upgrade] -- name: Download | load container images +- name: container_download | load container images shell: "{{ docker_bin_dir }}/docker load < {{ fname }}" when: - (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] and diff --git a/roles/download/tasks/set_docker_image_facts.yml b/roles/download/tasks/set_docker_image_facts.yml index 4ae81d954..832c076b1 100644 --- a/roles/download/tasks/set_docker_image_facts.yml +++ b/roles/download/tasks/set_docker_image_facts.yml @@ -9,25 +9,20 @@ - name: Register docker images info raw: >- - {{ docker_bin_dir }}/docker images -q | xargs {{ docker_bin_dir }}/docker inspect -f "{{ '{{' }} .RepoTags {{ '}}' }},{{ '{{' }} .RepoDigests {{ '}}' }}" + {{ docker_bin_dir }}/docker images -q | xargs {{ docker_bin_dir }}/docker inspect -f "{{ '{{' }} (index .RepoTags 0) {{ '}}' }},{{ '{{' }} (index .RepoDigests 0) {{ '}}' }}" | tr '\n' ',' no_log: true - register: docker_images_raw + register: docker_images failed_when: false changed_when: false check_mode: no when: not download_always_pull|bool -- set_fact: - docker_images: "{{docker_images_raw.stdout|regex_replace('\\[|\\]|\\n]','')|regex_replace('\\s',',')}}" - no_log: true - when: not download_always_pull|bool - - set_fact: pull_required: >- - {%- if pull_args in docker_images.split(',') %}false{%- else -%}true{%- endif -%} + {%- if pull_args in docker_images.stdout.split(',') %}false{%- else -%}true{%- endif -%} when: not download_always_pull|bool - name: Check the local digest sha256 corresponds to the given image tag assert: - that: "{{download.repo}}:{{download.tag}} in docker_images.split(',')" + that: "{{download.repo}}:{{download.tag}} in docker_images.stdout.split(',')" when: not download_always_pull|bool and not pull_required|bool and pull_by_digest|bool From 9fa1873a65fe078472f580878878ca2981d35563 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Sat, 9 Sep 2017 23:38:03 +0300 Subject: [PATCH 58/64] Add kube dashboard, enabled by default (#1643) * Add kube dashboard, enabled by default Also add rbac role for kube user * Update main.yml --- docs/getting-started.md | 12 ++ inventory/group_vars/k8s-cluster.yml | 5 + .../kubernetes-apps/ansible/defaults/main.yml | 11 ++ .../ansible/tasks/dashboard.yml | 20 ++++ roles/kubernetes-apps/ansible/tasks/main.yml | 5 + .../ansible/templates/dashboard.yml.j2 | 110 ++++++++++++++++++ 6 files changed, 163 insertions(+) create mode 100644 roles/kubernetes-apps/ansible/tasks/dashboard.yml create mode 100644 roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 diff --git a/docs/getting-started.md b/docs/getting-started.md index 5494e6f0c..d96ae3ea7 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -89,3 +89,15 @@ file yourself. For more information on kubeconfig and accessing a Kubernetes cluster, refer to the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/). + +Accessing Kubernetes Dashboard +------------------------------ + +If the variable `dashboard_enabled` is set (default is true), then you can +access the Kubernetes Dashboard at the following URL: + + https://kube:_kube-password_@_host_:6443/ui/ + +To see the password, refer to the section above, titled *Connecting to +Kubernetes*. The host can be any kube-master or kube-node or loadbalancer +(when enabled). diff --git a/inventory/group_vars/k8s-cluster.yml b/inventory/group_vars/k8s-cluster.yml index de4642807..fbb1a34e5 100644 --- a/inventory/group_vars/k8s-cluster.yml +++ b/inventory/group_vars/k8s-cluster.yml @@ -45,6 +45,8 @@ kube_users: kube: pass: "{{kube_api_pwd}}" role: admin + groups: + - system:masters ## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth) #kube_oidc_auth: false @@ -141,6 +143,9 @@ vault_deployment_type: docker # K8s image pull policy (imagePullPolicy) k8s_image_pull_policy: IfNotPresent +# Kubernetes dashboard (available at http://first_master:6443/ui by default) +dashboard_enabled: true + # Monitoring apps for k8s efk_enabled: false diff --git a/roles/kubernetes-apps/ansible/defaults/main.yml b/roles/kubernetes-apps/ansible/defaults/main.yml index 42c4a027d..3665254da 100644 --- a/roles/kubernetes-apps/ansible/defaults/main.yml +++ b/roles/kubernetes-apps/ansible/defaults/main.yml @@ -38,6 +38,17 @@ netchecker_server_memory_limit: 256M netchecker_server_cpu_requests: 50m netchecker_server_memory_requests: 64M +# Dashboard +dashboard_enabled: false +dashboard_image_repo: kubernetesdashboarddev/kubernetes-dashboard-amd64 +dashboard_image_tag: head + +# Limits for dashboard +dashboard_cpu_limit: 100m +dashboard_memory_limit: 256M +dashboard_cpu_requests: 50m +dashboard_memory_requests: 64M + # SSL etcd_cert_dir: "/etc/ssl/etcd/ssl" canal_cert_dir: "/etc/canal/certs" diff --git a/roles/kubernetes-apps/ansible/tasks/dashboard.yml b/roles/kubernetes-apps/ansible/tasks/dashboard.yml new file mode 100644 index 000000000..63ea3cf70 --- /dev/null +++ b/roles/kubernetes-apps/ansible/tasks/dashboard.yml @@ -0,0 +1,20 @@ +--- +- name: Kubernetes Apps | Lay down dashboard template + template: + src: "{{item.file}}" + dest: "{{kube_config_dir}}/{{item.file}}" + with_items: + - {file: dashboard.yml.j2, type: deploy, name: netchecker-agent} + register: manifests + when: inventory_hostname == groups['kube-master'][0] + +- name: Kubernetes Apps | Start dashboard + kube: + name: "{{item.item.name}}" + namespace: "{{system_namespace}}" + kubectl: "{{bin_dir}}/kubectl" + resource: "{{item.item.type}}" + filename: "{{kube_config_dir}}/{{item.item.file}}" + state: "latest" + with_items: "{{ manifests.results }}" + when: inventory_hostname == groups['kube-master'][0] diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index 3c986970c..9bea815e7 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -63,3 +63,8 @@ include: tasks/netchecker.yml when: deploy_netchecker tags: netchecker + +- name: Kubernetes Apps | Dashboard + include: tasks/dashboard.yml + when: dashboard_enabled + tags: dashboard diff --git a/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 b/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 new file mode 100644 index 000000000..ac32b1c7f --- /dev/null +++ b/roles/kubernetes-apps/ansible/templates/dashboard.yml.j2 @@ -0,0 +1,110 @@ +# Copyright 2015 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Configuration to deploy head version of the Dashboard UI compatible with +# Kubernetes 1.6 (RBAC enabled). +# +# Example usage: kubectl create -f + +{% if rbac_enabled %} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: {{ system_namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: kubernetes-dashboard + labels: + k8s-app: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: kubernetes-dashboard + namespace: {{ system_namespace }} +{% endif %} +--- +kind: Deployment +apiVersion: extensions/v1beta1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: {{ system_namespace }} +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: kubernetes-dashboard + template: + metadata: + labels: + k8s-app: kubernetes-dashboard + spec: + containers: + - name: kubernetes-dashboard + image: {{ dashboard_image_repo }}:{{ dashboard_image_tag }} + # Image is tagged and updated with :head, so always pull it. + imagePullPolicy: Always + resources: + limits: + cpu: {{ dashboard_cpu_limit }} + memory: {{ dashboard_memory_limit }} + requests: + cpu: {{ dashboard_cpu_requests }} + memory: {{ dashboard_memory_requests }} + ports: + - containerPort: 9090 + protocol: TCP + args: + # Uncomment the following line to manually specify Kubernetes API server Host + # If not specified, Dashboard will attempt to auto discover the API server and connect + # to it. Uncomment only if the default does not work. + # - --apiserver-host=http://my-address:port + livenessProbe: + httpGet: + path: / + port: 9090 + initialDelaySeconds: 30 + timeoutSeconds: 30 +{% if rbac_enabled %} + serviceAccountName: kubernetes-dashboard +{% endif %} + # Comment the following tolerations if Dashboard must not be deployed on master + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule +--- +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: {{ system_namespace }} +spec: + ports: + - port: 80 + targetPort: 9090 + selector: + k8s-app: kubernetes-dashboard + From 649388188b41931622f9a6f393b7a9a540e0591b Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Sat, 9 Sep 2017 23:38:38 +0300 Subject: [PATCH 59/64] Fix netchecker update side effect (#1644) * Fix netchecker update side effect kubectl apply should only be used on resources created with kubectl apply. To workaround this, we should apply the old manifest before upgrading it. * Update 030_check-network.yml --- .../ansible/tasks/netchecker.yml | 17 +++++++++++++++++ tests/testcases/030_check-network.yml | 2 +- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/roles/kubernetes-apps/ansible/tasks/netchecker.yml b/roles/kubernetes-apps/ansible/tasks/netchecker.yml index a74a4dc87..4e91da224 100644 --- a/roles/kubernetes-apps/ansible/tasks/netchecker.yml +++ b/roles/kubernetes-apps/ansible/tasks/netchecker.yml @@ -1,4 +1,21 @@ --- + +- name: Kubernetes Apps | Check if netchecker-server manifest already exists + stat: + path: "{{ kube_config_dir }}/netchecker-server-deployment.yml.j2" + register: netchecker_server_manifest + tags: ['facts', 'upgrade'] + +- name: Kubernetes Apps | Apply netchecker-server manifest to update annotations + kube: + name: "netchecker-server" + namespace: "{{ netcheck_namespace }}" + kubectl: "{{bin_dir}}/kubectl" + resource: "deploy" + state: latest + when: inventory_hostname == groups['kube-master'][0] and netchecker_server_manifest.stat.exists + tags: upgrade + - name: Kubernetes Apps | Lay Down Netchecker Template template: src: "{{item.file}}" diff --git a/tests/testcases/030_check-network.yml b/tests/testcases/030_check-network.yml index ee5f60785..7c934c592 100644 --- a/tests/testcases/030_check-network.yml +++ b/tests/testcases/030_check-network.yml @@ -16,7 +16,7 @@ shell: "{{bin_dir}}/kubectl get pods --all-namespaces -owide" register: get_pods - - debug: msg="{{get_pods.stdout}}" + - debug: msg="{{get_pods.stdout.split('\n')}}" - name: Get pod names shell: "{{bin_dir}}/kubectl get pods -o json" From 5d99fa09404beee664c8ed6cf8f8a86082187792 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Sat, 9 Sep 2017 23:41:20 +0300 Subject: [PATCH 60/64] Purge old upgrade hooks and unused tasks (#1641) --- roles/dnsmasq/tasks/main.yml | 2 - roles/dnsmasq/tasks/pre_upgrade.yml | 9 --- roles/etcd/tasks/install_docker.yml | 19 ----- roles/etcd/tasks/main.yml | 4 - roles/etcd/tasks/pre_upgrade.yml | 60 -------------- roles/kubernetes/master/tasks/main.yml | 3 - .../kubernetes/master/tasks/post-upgrade.yml | 31 ------- roles/kubernetes/master/tasks/pre-upgrade.yml | 80 +------------------ .../kubernetes/preinstall/tasks/gitinfos.yml | 30 ------- roles/kubernetes/preinstall/tasks/main.yml | 4 - roles/network_plugin/weave/tasks/main.yml | 2 - .../weave/tasks/pre-upgrade.yml | 42 ---------- roles/uploads/defaults/main.yml | 58 -------------- roles/uploads/tasks/main.yml | 27 ------- uploads.yml | 11 --- 15 files changed, 2 insertions(+), 380 deletions(-) delete mode 100644 roles/dnsmasq/tasks/pre_upgrade.yml delete mode 100644 roles/etcd/tasks/pre_upgrade.yml delete mode 100644 roles/kubernetes/master/tasks/post-upgrade.yml delete mode 100644 roles/kubernetes/preinstall/tasks/gitinfos.yml delete mode 100644 roles/network_plugin/weave/tasks/pre-upgrade.yml delete mode 100644 roles/uploads/defaults/main.yml delete mode 100644 roles/uploads/tasks/main.yml delete mode 100644 uploads.yml diff --git a/roles/dnsmasq/tasks/main.yml b/roles/dnsmasq/tasks/main.yml index 607e6df51..4a9031013 100644 --- a/roles/dnsmasq/tasks/main.yml +++ b/roles/dnsmasq/tasks/main.yml @@ -1,6 +1,4 @@ --- -- include: pre_upgrade.yml - - name: ensure dnsmasq.d directory exists file: path: /etc/dnsmasq.d diff --git a/roles/dnsmasq/tasks/pre_upgrade.yml b/roles/dnsmasq/tasks/pre_upgrade.yml deleted file mode 100644 index 9d1517580..000000000 --- a/roles/dnsmasq/tasks/pre_upgrade.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: Delete legacy dnsmasq daemonset - kube: - name: dnsmasq - namespace: "{{system_namespace}}" - kubectl: "{{bin_dir}}/kubectl" - resource: "ds" - state: absent - when: inventory_hostname == groups['kube-master'][0] diff --git a/roles/etcd/tasks/install_docker.yml b/roles/etcd/tasks/install_docker.yml index 76eead2a2..f7589e812 100644 --- a/roles/etcd/tasks/install_docker.yml +++ b/roles/etcd/tasks/install_docker.yml @@ -11,22 +11,3 @@ retries: 4 delay: "{{ retry_stagger | random + 3 }}" changed_when: false - -# Plan B: looks nicer, but requires docker-py on all hosts: -# - name: Install | Set up etcd-binarycopy container -# docker: -# name: etcd-binarycopy -# state: present -# image: "{{ etcd_image_repo }}:{{ etcd_image_tag }}" -# when: etcd_deployment_type == "docker" -# -# - name: Install | Copy etcdctl from etcd-binarycopy container -# command: /usr/bin/docker cp "etcd-binarycopy:{{ etcd_container_bin_dir }}etcdctl" "{{ bin_dir }}/etcdctl" -# when: etcd_deployment_type == "docker" -# -# - name: Install | Clean up etcd-binarycopy container -# docker: -# name: etcd-binarycopy -# state: absent -# image: "{{ etcd_image_repo }}:{{ etcd_image_tag }}" -# when: etcd_deployment_type == "docker" diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index a21016941..3f8403570 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -1,8 +1,4 @@ --- -- include: pre_upgrade.yml - when: etcd_cluster_setup - tags: etcd-pre-upgrade - - include: check_certs.yml when: cert_management == "script" tags: [etcd-secrets, facts] diff --git a/roles/etcd/tasks/pre_upgrade.yml b/roles/etcd/tasks/pre_upgrade.yml deleted file mode 100644 index c08aee621..000000000 --- a/roles/etcd/tasks/pre_upgrade.yml +++ /dev/null @@ -1,60 +0,0 @@ ---- -- name: "Pre-upgrade | check for etcd-proxy unit file" - stat: - path: /etc/systemd/system/etcd-proxy.service - register: etcd_proxy_service_file - tags: facts - -- name: "Pre-upgrade | check for etcd-proxy init script" - stat: - path: /etc/init.d/etcd-proxy - register: etcd_proxy_init_script - tags: facts - -- name: "Pre-upgrade | stop etcd-proxy if service defined" - service: - name: etcd-proxy - state: stopped - when: (etcd_proxy_service_file.stat.exists|default(False) or etcd_proxy_init_script.stat.exists|default(False)) - -- name: "Pre-upgrade | remove etcd-proxy service definition" - file: - path: "{{ item }}" - state: absent - when: (etcd_proxy_service_file.stat.exists|default(False) or etcd_proxy_init_script.stat.exists|default(False)) - with_items: - - /etc/systemd/system/etcd-proxy.service - - /etc/init.d/etcd-proxy - -- name: "Pre-upgrade | find etcd-proxy container" - command: "{{ docker_bin_dir }}/docker ps -aq --filter 'name=etcd-proxy*'" - register: etcd_proxy_container - changed_when: false - failed_when: false - -- name: "Pre-upgrade | remove etcd-proxy if it exists" - command: "{{ docker_bin_dir }}/docker rm -f {{item}}" - with_items: "{{etcd_proxy_container.stdout_lines|default()}}" - -- name: "Pre-upgrade | see if etcdctl is installed" - stat: - path: "{{ bin_dir }}/etcdctl" - register: etcdctl_installed - -- name: "Pre-upgrade | check if member list is non-SSL" - command: "{{ bin_dir }}/etcdctl --no-sync --peers={{ etcd_access_addresses | regex_replace('https','http') }} member list" - register: etcd_member_list - retries: 10 - delay: 3 - until: etcd_member_list.rc != 2 - run_once: true - when: etcdctl_installed.stat.exists - changed_when: false - failed_when: false - -- name: "Pre-upgrade | change peer names to SSL" - shell: >- - {{ bin_dir }}/etcdctl --no-sync --peers={{ etcd_access_addresses | regex_replace('https','http') }} member list | - awk -F"[: =]" '{print "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses | regex_replace('https','http') }} member update "$1" https:"$7":"$8}' | bash - run_once: true - when: 'etcdctl_installed.stat.exists and etcd_member_list.rc == 0 and "http://" in etcd_member_list.stdout' diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml index 24a3a495a..452463118 100644 --- a/roles/kubernetes/master/tasks/main.yml +++ b/roles/kubernetes/master/tasks/main.yml @@ -85,6 +85,3 @@ dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest" notify: Master | wait for kube-controller-manager tags: kube-controller-manager - -- include: post-upgrade.yml - tags: k8s-post-upgrade diff --git a/roles/kubernetes/master/tasks/post-upgrade.yml b/roles/kubernetes/master/tasks/post-upgrade.yml deleted file mode 100644 index 221bf542d..000000000 --- a/roles/kubernetes/master/tasks/post-upgrade.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- -- name: "Post-upgrade | stop kubelet on all masters" - service: - name: kubelet - state: stopped - delegate_to: "{{item}}" - with_items: "{{groups['kube-master']}}" - when: needs_etcd_migration|bool - run_once: true - -- name: "Post-upgrade | Pause for kubelet stop" - pause: - seconds: 10 - when: needs_etcd_migration|bool - -- name: "Post-upgrade | start kubelet on all masters" - service: - name: kubelet - state: started - delegate_to: "{{item}}" - with_items: "{{groups['kube-master']}}" - when: needs_etcd_migration|bool - run_once: true - -- name: "Post-upgrade | etcd3 upgrade | purge etcd2 k8s data" - command: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} rm -r /registry" - environment: - ETCDCTL_API: 2 - delegate_to: "{{groups['etcd'][0]}}" - run_once: true - when: kube_apiserver_storage_backend == "etcd3" and needs_etcd_migration|bool|default(false) diff --git a/roles/kubernetes/master/tasks/pre-upgrade.yml b/roles/kubernetes/master/tasks/pre-upgrade.yml index 604659279..7cd650cbd 100644 --- a/roles/kubernetes/master/tasks/pre-upgrade.yml +++ b/roles/kubernetes/master/tasks/pre-upgrade.yml @@ -1,38 +1,4 @@ --- -- name: "Pre-upgrade | check for kube-apiserver unit file" - stat: - path: /etc/systemd/system/kube-apiserver.service - register: kube_apiserver_service_file - tags: [facts, kube-apiserver] - -- name: "Pre-upgrade | check for kube-apiserver init script" - stat: - path: /etc/init.d/kube-apiserver - register: kube_apiserver_init_script - tags: [facts, kube-apiserver] - -- name: "Pre-upgrade | stop kube-apiserver if service defined" - service: - name: kube-apiserver - state: stopped - when: (kube_apiserver_service_file.stat.exists|default(False) or kube_apiserver_init_script.stat.exists|default(False)) - tags: kube-apiserver - -- name: "Pre-upgrade | remove kube-apiserver service definition" - file: - path: "{{ item }}" - state: absent - when: (kube_apiserver_service_file.stat.exists|default(False) or kube_apiserver_init_script.stat.exists|default(False)) - with_items: - - /etc/systemd/system/kube-apiserver.service - - /etc/init.d/kube-apiserver - tags: kube-apiserver - -- name: "Pre-upgrade | See if kube-apiserver manifest exists" - stat: - path: /etc/kubernetes/manifests/kube-apiserver.manifest - register: kube_apiserver_manifest - - name: "Pre-upgrade | etcd3 upgrade | see if old config exists" command: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} ls /registry/minions" environment: @@ -47,19 +13,6 @@ kube_apiserver_storage_backend: "etcd2" when: old_data_exists.rc == 0 and not force_etcd3|bool -- name: "Pre-upgrade | etcd3 upgrade | see if data was already migrated" - command: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} get --limit=1 --prefix=true /registry/minions" - environment: - ETCDCTL_API: 3 - register: data_migrated - delegate_to: "{{groups['etcd'][0]}}" - when: kube_apiserver_storage_backend == "etcd3" - failed_when: false - -- name: "Pre-upgrade | etcd3 upgrade | set needs_etcd_migration" - set_fact: - needs_etcd_migration: "{{ force_etcd3|default(false) and kube_apiserver_storage_backend == 'etcd3' and data_migrated.stdout_lines|length == 0 and old_data_exists.rc == 0 }}" - - name: "Pre-upgrade | Delete master manifests on all kube-masters" file: path: "/etc/kubernetes/manifests/{{item[1]}}.manifest" @@ -69,7 +22,7 @@ - "{{groups['kube-master']}}" - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"] register: kube_apiserver_manifest_replaced - when: (secret_changed|default(false) or etcd_secret_changed|default(false) or needs_etcd_migration|bool) and kube_apiserver_manifest.stat.exists + when: (secret_changed|default(false) or etcd_secret_changed|default(false)) - name: "Pre-upgrade | Delete master containers forcefully on all kube-masters" shell: "docker ps -f name=k8s-{{item}}* -q | xargs --no-run-if-empty docker rm -f" @@ -77,34 +30,5 @@ with_nested: - "{{groups['kube-master']}}" - ["kube-apiserver", "kube-controller-manager", "kube-scheduler"] - register: kube_apiserver_manifest_replaced - when: (secret_changed|default(false) or etcd_secret_changed|default(false) or needs_etcd_migration|bool) and kube_apiserver_manifest.stat.exists - run_once: true - -- name: "Pre-upgrade | etcd3 upgrade | stop etcd" - service: - name: etcd - state: stopped - delegate_to: "{{item}}" - with_items: "{{groups['etcd']}}" - when: needs_etcd_migration|bool - run_once: true - -- name: "Pre-upgrade | etcd3 upgrade | migrate data" - command: "{{ bin_dir }}/etcdctl migrate --data-dir=\"{{ etcd_data_dir }}\" --wal-dir=\"{{ etcd_data_dir }}/member/wal\"" - environment: - ETCDCTL_API: 3 - delegate_to: "{{item}}" - with_items: "{{groups['etcd']}}" - register: etcd_migrated - when: needs_etcd_migration|bool - run_once: true - -- name: "Pre-upgrade | etcd3 upgrade | start etcd" - service: - name: etcd - state: started - delegate_to: "{{item}}" - with_items: "{{groups['etcd']}}" - when: needs_etcd_migration|bool + when: kube_apiserver_manifest_replaced.changed run_once: true diff --git a/roles/kubernetes/preinstall/tasks/gitinfos.yml b/roles/kubernetes/preinstall/tasks/gitinfos.yml deleted file mode 100644 index 323c0babf..000000000 --- a/roles/kubernetes/preinstall/tasks/gitinfos.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -# Deploy git infos -# ---------------- -- name: 'GIT | Install script for collecting git info' - template: - src: "{{ role_path }}/gen-gitinfos.sh" - dest: "{{ bin_dir }}/gen-gitinfos.sh" - mode: a+rwx - -- name: 'GIT | generate git informations' - local_action: command {{ role_path }}/gen-gitinfos.sh global - register: gitinfo - check_mode: no - -- name: 'GIT | copy ansible information' - template: - src: ansible_git.j2 - dest: /etc/.ansible.ini - backup: yes - -- name: 'GIT | generate diff file' - local_action: command {{ role_path }}/gen-gitinfos.sh diff - register: gitdiff - check_mode: no - -- name: 'GIT | copy git diff file' - copy: - content: "{{ gitdiff.stdout }}" - dest: /etc/.git-ansible.diff - backup: yes diff --git a/roles/kubernetes/preinstall/tasks/main.yml b/roles/kubernetes/preinstall/tasks/main.yml index 65716816e..620aae35f 100644 --- a/roles/kubernetes/preinstall/tasks/main.yml +++ b/roles/kubernetes/preinstall/tasks/main.yml @@ -16,10 +16,6 @@ become: true tags: bootstrap-os -- include: gitinfos.yml - when: run_gitinfos - tags: facts - - include: set_facts.yml tags: facts diff --git a/roles/network_plugin/weave/tasks/main.yml b/roles/network_plugin/weave/tasks/main.yml index 38895ab40..462278e94 100644 --- a/roles/network_plugin/weave/tasks/main.yml +++ b/roles/network_plugin/weave/tasks/main.yml @@ -1,6 +1,4 @@ --- -- include: pre-upgrade.yml - - include: seed.yml when: weave_mode_seed diff --git a/roles/network_plugin/weave/tasks/pre-upgrade.yml b/roles/network_plugin/weave/tasks/pre-upgrade.yml deleted file mode 100644 index bcf3c2af2..000000000 --- a/roles/network_plugin/weave/tasks/pre-upgrade.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -- name: Weave pre-upgrade | Stop legacy weave - command: weave stop - failed_when: false - -- name: Weave pre-upgrade | Stop legacy systemd weave services - service: - name: "{{ item }}" - enabled: no - state: stopped - with_items: - - weaveexpose - - weaveproxy - - weave - failed_when: false - -- name: Weave pre-upgrade | Purge legacy systemd weave systemd unit files - file: - path: "{{ item }}" - state: absent - register: purged_weave_systemd_units - with_items: - - "/etc/systemd/system/weaveexpose.service" - - "/etc/systemd/system/weaveproxy.service" - - "/etc/systemd/system/weave.service" - -- name: Weave pre-upgrade | Reload systemd - command: systemctl daemon-reload - when: ansible_service_mgr == "systemd" and purged_weave_systemd_units.changed - -- name: Weave pre-upgrade | Purge legacy weave configs and binary - file: - path: "{{ item }}" - state: absent - with_items: - - "{{ bin_dir }}/weave" - - "/etc/weave.env" - -- name: Weave pre-upgrade | Purge legacy weave docker containers - shell: "docker ps -af 'name=^/weave.*' -q | xargs --no-run-if-empty docker rm -f" - retries: 3 - failed_when: false diff --git a/roles/uploads/defaults/main.yml b/roles/uploads/defaults/main.yml deleted file mode 100644 index b3df52d5e..000000000 --- a/roles/uploads/defaults/main.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -local_release_dir: /tmp - -# Versions -etcd_version: v3.0.17 -calico_version: v2.5.0 -calico_cni_version: v1.10.0 -weave_version: v2.0.1 - -# Download URL's -etcd_download_url: "https://github.com/coreos/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-amd64.tar.gz" -calico_cni_download_url: "https://github.com/projectcalico/calico-cni/releases/download/{{calico_cni_version}}/calico" -calico_cni_ipam_download_url: "https://github.com/projectcalico/calico-cni/releases/download/{{calico_cni_version}}/calico-ipam" -weave_download_url: "https://github.com/weaveworks/weave/releases/download/{{weave_version}}/weave" - -# Checksums -calico_cni_checksum: "c72abd0d7ee88376952e43999bcbfa7958171708108bd3f1087c599115350b46" -calico_cni_ipam_checksum: "280fdb1d80f11904adc11760a9a5f3ae29b2aaf911ff0163a8da25646e757413" -weave_checksum: "311f5fe25036c774c3ea9975e033f67e1f3c5afbe8b5693a1d36d51c94ac31c4" -etcd_checksum: "274c46a7f8d26f7ae99d6880610f54933cbcf7f3beafa19236c52eb5df8c7a0b" - -downloads: - - name: calico-cni-plugin - dest: calico/bin/calico - version: "{{calico_cni_version}}" - sha256: "{{ calico_cni_checksum }}" - source_url: "{{ calico_cni_download_url }}" - url: "{{ calico_cni_download_url }}" - owner: "root" - mode: "0755" - - - name: calico-cni-plugin-ipam - dest: calico/bin/calico-ipam - version: "{{calico_cni_version}}" - sha256: "{{ calico_cni_ipam_checksum }}" - source_url: "{{ calico_cni_ipam_download_url }}" - url: "{{ calico_cni_ipam_download_url }}" - owner: "root" - mode: "0755" - - - name: weave - dest: weave/bin/weave - version: "{{weave_version}}" - source_url: "{{weave_download_url}}" - url: "{{weave_download_url}}" - sha256: "{{ weave_checksum }}" - owner: "root" - mode: "0755" - - - name: etcd - version: "{{etcd_version}}" - dest: "etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz" - sha256: "{{ etcd_checksum }}" - source_url: "{{ etcd_download_url }}" - url: "{{ etcd_download_url }}" - unarchive: true - owner: "etcd" - mode: "0755" diff --git a/roles/uploads/tasks/main.yml b/roles/uploads/tasks/main.yml deleted file mode 100644 index a770020c2..000000000 --- a/roles/uploads/tasks/main.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- -- name: Create dest directories - file: - path: "{{local_release_dir}}/{{item.dest|dirname}}" - state: directory - recurse: yes - with_items: '{{downloads}}' - -- name: Download items - get_url: - url: "{{item.source_url}}" - dest: "{{local_release_dir}}/{{item.dest}}" - sha256sum: "{{item.sha256 | default(omit)}}" - owner: "{{ item.owner|default(omit) }}" - mode: "{{ item.mode|default(omit) }}" - with_items: '{{downloads}}' - -- name: uploads items - gc_storage: - bucket: kargo - object: "{{item.version}}_{{item.name}}" - src: "{{ local_release_dir }}/{{item.dest}}" - mode: put - permission: public-read - gs_access_key: 'changeme' - gs_secret_key: 'changeme' - with_items: '{{downloads}}' diff --git a/uploads.yml b/uploads.yml deleted file mode 100644 index 5544f4588..000000000 --- a/uploads.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- hosts: localhost - roles: - - {role: uploads} - -# TEST download -- hosts: localhost - vars: - local_release_dir: /tmp/from_gcloud - roles: - - {role: download} \ No newline at end of file From 0f231f0e76cfe9c2baf7979ed85eb875e051e236 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Sat, 9 Sep 2017 23:41:31 +0300 Subject: [PATCH 61/64] Improve method to create and wait for gce instances (#1645) --- tests/cloud_playbooks/create-gce.yml | 14 +++++++++++--- tests/testcases/010_check-apiserver.yml | 2 +- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/tests/cloud_playbooks/create-gce.yml b/tests/cloud_playbooks/create-gce.yml index 1a82c50d7..afa757719 100644 --- a/tests/cloud_playbooks/create-gce.yml +++ b/tests/cloud_playbooks/create-gce.yml @@ -34,6 +34,10 @@ tags: "build-{{test_name}},{{kube_network_plugin}}" register: gce + - name: Add instances to host group + add_host: hostname={{item.name}} ansible_host={{item.public_ip}} groupname="waitfor_hosts" + with_items: '{{gce.instance_data}}' + - name: Template the inventory template: src: ../templates/inventory-gce.j2 @@ -51,6 +55,10 @@ dest: "{{ inventory_path|dirname }}/group_vars/fake_hosts.yml" when: mode in ['scale', 'separate-scale', 'ha-scale'] - - name: Wait for SSH to come up - wait_for: host={{item.public_ip}} port=22 delay=30 timeout=180 state=started - with_items: "{{gce.instance_data}}" + +- name: Wait for instances + hosts: "waitfor_hosts" + gather_facts: false + tasks: + - name: Wait for SSH to come up. + local_action: wait_for host={{inventory_hostname}} port=22 delay=5 timeout=240 state=started diff --git a/tests/testcases/010_check-apiserver.yml b/tests/testcases/010_check-apiserver.yml index 82c8b23d4..5b053fd4b 100644 --- a/tests/testcases/010_check-apiserver.yml +++ b/tests/testcases/010_check-apiserver.yml @@ -6,6 +6,6 @@ uri: url: "https://{{ access_ip | default(ansible_default_ipv4.address) }}:{{ kube_apiserver_port }}/api/v1" user: kube - password: "{{ lookup('password', '../../credentials/kube_user length=15') }}" + password: "{{ lookup('password', '../../credentials/kube_user length=15 chars=ascii_letters,digits') }}" validate_certs: no status_code: 200 From 75b13caf0bbf6e930c9c8003975470268211fe2e Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Sat, 9 Sep 2017 23:41:48 +0300 Subject: [PATCH 62/64] Fix kube-apiserver status checks when changing insecure bind addr (#1633) --- roles/kubernetes-apps/ansible/tasks/main.yml | 2 +- roles/kubernetes/master/handlers/main.yml | 2 +- roles/kubernetes/preinstall/tasks/set_facts.yml | 4 ++++ roles/kubespray-defaults/defaults/main.yaml | 1 + 4 files changed, 7 insertions(+), 2 deletions(-) diff --git a/roles/kubernetes-apps/ansible/tasks/main.yml b/roles/kubernetes-apps/ansible/tasks/main.yml index 9bea815e7..c2ffd7507 100644 --- a/roles/kubernetes-apps/ansible/tasks/main.yml +++ b/roles/kubernetes-apps/ansible/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: Kubernetes Apps | Wait for kube-apiserver uri: - url: http://localhost:{{ kube_apiserver_insecure_port }}/healthz + url: "{{ kube_apiserver_insecure_endpoint }}/healthz" register: result until: result.status == 200 retries: 10 diff --git a/roles/kubernetes/master/handlers/main.yml b/roles/kubernetes/master/handlers/main.yml index e408ce04e..d6034aeb2 100644 --- a/roles/kubernetes/master/handlers/main.yml +++ b/roles/kubernetes/master/handlers/main.yml @@ -39,7 +39,7 @@ - name: Master | wait for the apiserver to be running uri: - url: http://localhost:{{ kube_apiserver_insecure_port }}/healthz + url: "{{ kube_apiserver_insecure_endpoint }}/healthz" register: result until: result.status == 200 retries: 20 diff --git a/roles/kubernetes/preinstall/tasks/set_facts.yml b/roles/kubernetes/preinstall/tasks/set_facts.yml index 056f9edcf..96ec25499 100644 --- a/roles/kubernetes/preinstall/tasks/set_facts.yml +++ b/roles/kubernetes/preinstall/tasks/set_facts.yml @@ -32,6 +32,10 @@ {%- endif -%} {%- endif %} +- set_fact: + kube_apiserver_insecure_endpoint: >- + http://{{ kube_apiserver_insecure_bind_address | regex_replace('0\.0\.0\.0','127.0.0.1') }}:{{ kube_apiserver_insecure_port }} + - set_fact: etcd_address: "{{ ip | default(ansible_default_ipv4['address']) }}" diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index 84fc0ee74..e6015560a 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -92,6 +92,7 @@ kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('ad # https kube_apiserver_port: 6443 # http +kube_apiserver_insecure_bind_address: 127.0.0.1 kube_apiserver_insecure_port: 8080 # Path used to store Docker data From e8bde03a50fc87c887135ea52083fab8050b2fbc Mon Sep 17 00:00:00 2001 From: Seungkyu Ahn Date: Sun, 10 Sep 2017 05:54:13 +0900 Subject: [PATCH 63/64] Setting kubectl bin directory (#1635) --- roles/upgrade/pre-upgrade/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/upgrade/pre-upgrade/tasks/main.yml b/roles/upgrade/pre-upgrade/tasks/main.yml index decc9d05b..e4dbe569b 100644 --- a/roles/upgrade/pre-upgrade/tasks/main.yml +++ b/roles/upgrade/pre-upgrade/tasks/main.yml @@ -1,6 +1,6 @@ --- - name: See if node is in ready state - shell: "kubectl get nodes | grep {{ inventory_hostname }}" + shell: "{{ bin_dir }}/kubectl get nodes | grep {{ inventory_hostname }}" register: kubectl_nodes delegate_to: "{{ groups['kube-master'][0] }}" failed_when: false From 943aaf84e52c5d2a46e156be2bb43cf30cc1e640 Mon Sep 17 00:00:00 2001 From: Matthew Mosesohn Date: Mon, 11 Sep 2017 12:47:04 +0300 Subject: [PATCH 64/64] Update getting-started.md --- docs/getting-started.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/getting-started.md b/docs/getting-started.md index d96ae3ea7..65b590a2f 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -81,7 +81,7 @@ Kubespray permits connecting to the cluster remotely on any IP of any kube-master host on port 6443 by default. However, this requires authentication. One could generate a kubeconfig based on one installed kube-master hosts (needs improvement) or connect with a username and password. -By default, two users are created: `kube` and `admin` with the same password. +By default, a user with admin rights is created, named `kube`. The password can be viewed after deployment by looking at the file `PATH_TO_KUBESPRAY/credentials/kube_user`. This contains a randomly generated password. If you wish to set your own password, just precreate/modify this