Remove non-kubeadm deployment (#3811)

* Remove non-kubeadm deployment

* More cleanup

* More cleanup

* More cleanup

* More cleanup

* Fix gitlab

* Try stop gce first before absent to make the delete process work

* More cleanup

* Fix bug with checking if kubeadm has already run

* Fix bug with checking if kubeadm has already run

* More fixes

* Fix test

* fix

* Fix gitlab checkout untill kubespray 2.8 is on quay

* Fixed

* Add upgrade path from non-kubeadm to kubeadm. Revert ssl path

* Readd secret checking

* Do gitlab checks from v2.7.0 test upgrade path to 2.8.0

* fix typo

* Fix CI jobs to kubeadm again. Fix broken hyperkube path

* Fix gitlab

* Fix rotate tokens

* More fixes

* More fixes

* Fix tokens
This commit is contained in:
Andreas Krüger 2018-12-06 11:33:38 +01:00 committed by Kubernetes Prow Robot
parent 0d1be39a97
commit ddffdb63bf
65 changed files with 111 additions and 2042 deletions

View file

@ -24,7 +24,6 @@ variables:
IDEMPOT_CHECK: "false" IDEMPOT_CHECK: "false"
RESET_CHECK: "false" RESET_CHECK: "false"
UPGRADE_TEST: "false" UPGRADE_TEST: "false"
KUBEADM_ENABLED: "false"
LOG_LEVEL: "-vv" LOG_LEVEL: "-vv"
# asia-east1-a # asia-east1-a
@ -89,11 +88,11 @@ before_script:
- echo ${PWD} - echo ${PWD}
- echo "${STARTUP_SCRIPT}" - echo "${STARTUP_SCRIPT}"
- cd tests && make create-${CI_PLATFORM} -s ; cd - - cd tests && make create-${CI_PLATFORM} -s ; cd -
#- git fetch --all && git checkout v2.7.0
# Check out latest tag if testing upgrade # Check out latest tag if testing upgrade
# Uncomment when gitlab kubespray repo has tags # Uncomment when gitlab kubespray repo has tags
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1)) - test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
- test "${UPGRADE_TEST}" != "false" && git checkout 53d87e53c5899d4ea2904ab7e3883708dd6363d3
# Checkout the CI vars file so it is available # Checkout the CI vars file so it is available
- test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml - test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
# Workaround https://github.com/kubernetes-sigs/kubespray/issues/2021 # Workaround https://github.com/kubernetes-sigs/kubespray/issues/2021
@ -137,9 +136,7 @@ before_script:
# Tests Cases # Tests Cases
## Test Master API ## Test Master API
- > - ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
## Ping the between 2 pod ## Ping the between 2 pod
- ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL - ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL

View file

@ -13,20 +13,6 @@
vars: vars:
ansible_connection: local ansible_connection: local
- hosts: localhost
gather_facts: false
tasks:
- name: deploy warning for non kubeadm
debug:
msg: "DEPRECATION: non-kubeadm deployment is deprecated from v2.9. Will be removed in next release."
when: not kubeadm_enabled and not skip_non_kubeadm_warning
- name: deploy cluster for non kubeadm
pause:
prompt: "Are you sure you want to deploy cluster using the deprecated non-kubeadm mode."
echo: no
when: not kubeadm_enabled and not skip_non_kubeadm_warning
- hosts: bastion[0] - hosts: bastion[0]
gather_facts: False gather_facts: False
roles: roles:
@ -96,7 +82,7 @@
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles: roles:
- { role: kubespray-defaults} - { role: kubespray-defaults}
- { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" } - { role: kubernetes/kubeadm, tags: kubeadm}
- { role: network_plugin, tags: network } - { role: network_plugin, tags: network }
- hosts: kube-master[0] - hosts: kube-master[0]
@ -104,7 +90,7 @@
roles: roles:
- { role: kubespray-defaults} - { role: kubespray-defaults}
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" } - { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"], when: "kubeadm_enabled" } - { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"]}
- hosts: kube-master - hosts: kube-master
any_errors_fatal: "{{ any_errors_fatal | default(true) }}" any_errors_fatal: "{{ any_errors_fatal | default(true) }}"

View file

@ -1,7 +1,6 @@
# kubespray-dind.yaml: minimal kubespray ansible playbook usable for DIND # kubespray-dind.yaml: minimal kubespray ansible playbook usable for DIND
# See contrib/dind/README.md # See contrib/dind/README.md
kube_api_anonymous_auth: true kube_api_anonymous_auth: true
kubeadm_enabled: true
kubelet_fail_swap_on: false kubelet_fail_swap_on: false

View file

@ -1,8 +1,6 @@
DISTROS=(debian centos) DISTROS=(debian centos)
NETCHECKER_HOST=${NODES[0]} NETCHECKER_HOST=${NODES[0]}
EXTRAS=( EXTRAS=(
'kube_network_plugin=kube-router {"kubeadm_enabled":true,"kube_router_run_service_proxy":false}' 'kube_network_plugin=kube-router {"kube_router_run_service_proxy":false}'
'kube_network_plugin=kube-router {"kubeadm_enabled":true,"kube_router_run_service_proxy":true}' 'kube_network_plugin=kube-router {"kube_router_run_service_proxy":true}'
'kube_network_plugin=kube-router {"kubeadm_enabled":false,"kube_router_run_service_proxy":false}'
'kube_network_plugin=kube-router {"kubeadm_enabled":false,"kube_router_run_service_proxy":true}'
) )

View file

@ -1,8 +1,8 @@
DISTROS=(debian centos) DISTROS=(debian centos)
EXTRAS=( EXTRAS=(
'kube_network_plugin=calico {"kubeadm_enabled":true}' 'kube_network_plugin=calico {}'
'kube_network_plugin=canal {"kubeadm_enabled":true}' 'kube_network_plugin=canal {}'
'kube_network_plugin=cilium {"kubeadm_enabled":true}' 'kube_network_plugin=cilium {}'
'kube_network_plugin=flannel {"kubeadm_enabled":true}' 'kube_network_plugin=flannel {}'
'kube_network_plugin=weave {"kubeadm_enabled":true}' 'kube_network_plugin=weave {}'
) )

View file

@ -15,8 +15,6 @@ Use cri-o instead of docker, set following variable:
#### all.yml #### all.yml
``` ```
kubeadm_enabled: true
...
download_container: false download_container: false
skip_downloads: false skip_downloads: false
``` ```
@ -28,4 +26,3 @@ etcd_deployment_type: host
kubelet_deployment_type: host kubelet_deployment_type: host
container_manager: crio container_manager: crio
``` ```

View file

@ -62,34 +62,6 @@ You can change the default configuration by overriding `kube_router_...` variabl
these are named to follow `kube-router` command-line options as per these are named to follow `kube-router` command-line options as per
<https://www.kube-router.io/docs/user-guide/#try-kube-router-with-cluster-installers>. <https://www.kube-router.io/docs/user-guide/#try-kube-router-with-cluster-installers>.
## Caveats
### kubeadm_enabled: true
If you want to set `kube-router` to replace `kube-proxy`
(`--run-service-proxy=true`) while using `kubeadm_enabled`,
then 'kube-proxy` DaemonSet will be removed *after* kubeadm finishes
running, as it's not possible to skip kube-proxy install in kubeadm flags
and/or config, see https://github.com/kubernetes/kubeadm/issues/776.
Given above, if `--run-service-proxy=true` is needed it would be
better to void `kubeadm_enabled` i.e. set:
```
kubeadm_enabled: false
kube_router_run_service_proxy: true
```
If for some reason you do want/need to set `kubeadm_enabled`, removing
it afterwards behave better if kube-proxy is set to ipvs mode, i.e. set:
```
kubeadm_enabled: true
kube_router_run_service_proxy: true
kube_proxy_mode: ipvs
```
## Advanced BGP Capabilities ## Advanced BGP Capabilities
https://github.com/cloudnativelabs/kube-router#advanced-bgp-capabilities https://github.com/cloudnativelabs/kube-router#advanced-bgp-capabilities
@ -105,4 +77,4 @@ Next options will set up annotations for kube-router, using `kubectl annotate` c
kube_router_annotations_master: [] kube_router_annotations_master: []
kube_router_annotations_node: [] kube_router_annotations_node: []
kube_router_annotations_all: [] kube_router_annotations_all: []
``` ```

View file

@ -20,9 +20,6 @@ Some variables of note include:
string) string)
* *etcd_version* - Specify version of ETCD to use * *etcd_version* - Specify version of ETCD to use
* *ipip* - Enables Calico ipip encapsulation by default * *ipip* - Enables Calico ipip encapsulation by default
* *hyperkube_image_repo* - Specify the Docker repository where Hyperkube
resides
* *hyperkube_image_tag* - Specify the Docker tag where Hyperkube resides
* *kube_network_plugin* - Sets k8s network plugin (default Calico) * *kube_network_plugin* - Sets k8s network plugin (default Calico)
* *kube_proxy_mode* - Changes k8s proxy mode to iptables mode * *kube_proxy_mode* - Changes k8s proxy mode to iptables mode
* *kube_version* - Specify a given Kubernetes hyperkube version * *kube_version* - Specify a given Kubernetes hyperkube version

View file

@ -42,16 +42,10 @@ bin_dir: /usr/local/bin
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external' ## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
## When openstack is used make sure to source in the openstack credentials ## When openstack is used make sure to source in the openstack credentials
## like you would do when using nova-client before starting the playbook. ## like you would do when using nova-client before starting the playbook.
## Note: The 'external' cloud provider is not supported. ## Note: The 'external' cloud provider is not supported.
## TODO(riverzhang): https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager ## TODO(riverzhang): https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager
#cloud_provider: #cloud_provider:
## kubeadm deployment mode
kubeadm_enabled: true
# Skip alert information
skip_non_kubeadm_warning: false
## Set these proxy values in order to update package manager and docker daemon to use proxies ## Set these proxy values in order to update package manager and docker daemon to use proxies
#http_proxy: "" #http_proxy: ""
#https_proxy: "" #https_proxy: ""

View file

@ -136,8 +136,6 @@ calico_policy_image_repo: "quay.io/calico/kube-controllers"
calico_policy_image_tag: "{{ calico_policy_version }}" calico_policy_image_tag: "{{ calico_policy_version }}"
calico_rr_image_repo: "quay.io/calico/routereflector" calico_rr_image_repo: "quay.io/calico/routereflector"
calico_rr_image_tag: "{{ calico_rr_version }}" calico_rr_image_tag: "{{ calico_rr_version }}"
hyperkube_image_repo: "{{ kube_image_repo }}/hyperkube-{{ image_arch }}"
hyperkube_image_tag: "{{ kube_version }}"
pod_infra_image_repo: "gcr.io/google_containers/pause-{{ image_arch }}" pod_infra_image_repo: "gcr.io/google_containers/pause-{{ image_arch }}"
pod_infra_image_tag: "{{ pod_infra_version }}" pod_infra_image_tag: "{{ pod_infra_version }}"
install_socat_image_repo: "xueshanf/install-socat" install_socat_image_repo: "xueshanf/install-socat"
@ -272,7 +270,7 @@ downloads:
- k8s-cluster - k8s-cluster
kubeadm: kubeadm:
enabled: "{{ kubeadm_enabled }}" enabled: true
file: true file: true
version: "{{ kubeadm_version }}" version: "{{ kubeadm_version }}"
dest: "{{local_release_dir}}/kubeadm" dest: "{{local_release_dir}}/kubeadm"
@ -284,20 +282,11 @@ downloads:
groups: groups:
- k8s-cluster - k8s-cluster
hyperkube:
enabled: "{{ kubeadm_enabled == false }}"
container: true
repo: "{{ hyperkube_image_repo }}"
tag: "{{ hyperkube_image_tag }}"
sha256: "{{ hyperkube_digest_checksum|default(None) }}"
groups:
- k8s-cluster
hyperkube_file: hyperkube_file:
enabled: true enabled: true
file: true file: true
version: "{{ kube_version }}" version: "{{ kube_version }}"
dest: "{{local_release_dir}}/hyperkube" dest: "{{ local_release_dir }}/hyperkube"
sha256: "{{ hyperkube_binary_checksum }}" sha256: "{{ hyperkube_binary_checksum }}"
url: "{{ hyperkube_download_url }}" url: "{{ hyperkube_download_url }}"
unarchive: false unarchive: false

View file

@ -21,7 +21,6 @@
resource: "deploy" resource: "deploy"
state: absent state: absent
when: when:
- kubeadm_enabled|default(false)
- kubeadm_init is defined - kubeadm_init is defined
- kubeadm_init.changed|default(false) - kubeadm_init.changed|default(false)
- inventory_hostname == groups['kube-master'][0] - inventory_hostname == groups['kube-master'][0]
@ -50,7 +49,6 @@
- 'deploy' - 'deploy'
- 'svc' - 'svc'
when: when:
- kubeadm_enabled|default(false)
- kubeadm_init is defined - kubeadm_init is defined
- kubeadm_init.changed|default(false) - kubeadm_init.changed|default(false)
- inventory_hostname == groups['kube-master'][0] - inventory_hostname == groups['kube-master'][0]

View file

@ -1,6 +1,6 @@
--- ---
- name: Rotate Tokens | Get default token name - name: Rotate Tokens | Get default token name
shell: "{{ bin_dir }}/kubectl get secrets -o custom-columns=name:{.metadata.name} --no-headers | grep -m1 default-token" shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets -o custom-columns=name:{.metadata.name} --no-headers | grep -m1 default-token"
register: default_token register: default_token
changed_when: false changed_when: false
until: default_token.rc == 0 until: default_token.rc == 0
@ -8,7 +8,7 @@
retries: 5 retries: 5
- name: Rotate Tokens | Get default token data - name: Rotate Tokens | Get default token data
command: "{{ bin_dir }}/kubectl get secrets {{ default_token.stdout }} -ojson" command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets {{ default_token.stdout }} -ojson"
register: default_token_data register: default_token_data
changed_when: false changed_when: false
@ -31,7 +31,7 @@
# instead of filtering manually # instead of filtering manually
- name: Rotate Tokens | Get all serviceaccount tokens to expire - name: Rotate Tokens | Get all serviceaccount tokens to expire
shell: >- shell: >-
{{ bin_dir }}/kubectl get secrets --all-namespaces {{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf get secrets --all-namespaces
-o 'jsonpath={range .items[*]}{"\n"}{.metadata.namespace}{" "}{.metadata.name}{" "}{.type}{end}' -o 'jsonpath={range .items[*]}{"\n"}{.metadata.namespace}{" "}{.metadata.name}{" "}{.type}{end}'
| grep kubernetes.io/service-account-token | grep kubernetes.io/service-account-token
| egrep 'default-token|kube-proxy|kube-dns|dnsmasq|netchecker|weave|calico|canal|flannel|dashboard|cluster-proportional-autoscaler|tiller|local-volume-provisioner' | egrep 'default-token|kube-proxy|kube-dns|dnsmasq|netchecker|weave|calico|canal|flannel|dashboard|cluster-proportional-autoscaler|tiller|local-volume-provisioner'
@ -39,10 +39,10 @@
when: needs_rotation when: needs_rotation
- name: Rotate Tokens | Delete expired tokens - name: Rotate Tokens | Delete expired tokens
command: "{{ bin_dir }}/kubectl delete secrets -n {{ item.split(' ')[0] }} {{ item.split(' ')[1] }}" command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete secrets -n {{ item.split(' ')[0] }} {{ item.split(' ')[1] }}"
with_items: "{{ tokens_to_delete.stdout_lines }}" with_items: "{{ tokens_to_delete.stdout_lines }}"
when: needs_rotation when: needs_rotation
- name: Rotate Tokens | Delete pods in system namespace - name: Rotate Tokens | Delete pods in system namespace
command: "{{ bin_dir }}/kubectl delete pods -n kube-system --all" command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete pods -n kube-system --all"
when: needs_rotation when: needs_rotation

View file

@ -10,25 +10,6 @@
tags: tags:
- facts - facts
- name: Gather certs for admin kubeconfig
slurp:
src: "{{ item }}"
register: admin_certs
with_items:
- "{{ kube_cert_dir }}/ca.pem"
- "{{ kube_cert_dir }}/admin-{{ inventory_hostname }}.pem"
- "{{ kube_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
when: not kubeadm_enabled|d(false)|bool
- name: Write admin kubeconfig
template:
src: admin.conf.j2
dest: "{{ kube_config_dir }}/admin.conf"
owner: root
group: "{{ kube_cert_group }}"
mode: 0640
when: not kubeadm_enabled|d(false)|bool
- name: Create kube config dir - name: Create kube config dir
file: file:
path: "/root/.kube" path: "/root/.kube"

View file

@ -124,7 +124,7 @@
# FIXME(jjo): need to post-remove kube-proxy until https://github.com/kubernetes/kubeadm/issues/776 # FIXME(jjo): need to post-remove kube-proxy until https://github.com/kubernetes/kubeadm/issues/776
# is fixed # is fixed
- name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services - name: Delete kube-proxy daemonset if kube_proxy_remove set, e.g. kube_network_plugin providing proxy services
shell: "{{ bin_dir }}/kubectl delete daemonset -n kube-system kube-proxy" shell: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf delete daemonset -n kube-system kube-proxy"
delegate_to: "{{groups['kube-master']|first}}" delegate_to: "{{groups['kube-master']|first}}"
run_once: true run_once: true
when: when:

View file

@ -91,13 +91,16 @@
command: /bin/true command: /bin/true
notify: notify:
- Master | set secret_changed to true - Master | set secret_changed to true
- Master | clear kubeconfig for root user - Master | Copy new kubeconfig for root user
- name: Master | set secret_changed to true - name: Master | set secret_changed to true
set_fact: set_fact:
secret_changed: true secret_changed: true
- name: Master | clear kubeconfig for root user - name: Master | Copy new kubeconfig for root user
file: copy:
path: /root/.kube/config src: "{{ kube_config_dir }}/admin.conf"
state: absent dest: "/root/.kube/config"
remote_src: yes
mode: "0600"
backup: yes

View file

@ -1,7 +1,7 @@
--- ---
- name: kubeadm | Retrieve files to purge - name: kubeadm | Retrieve files to purge
find: find:
paths: "{{kube_cert_dir }}" paths: "{{ kube_cert_dir }}"
patterns: '*.pem' patterns: '*.pem'
register: files_to_purge_for_kubeadm register: files_to_purge_for_kubeadm

View file

@ -26,19 +26,22 @@
file: file:
path: "{{ kube_config_dir }}/admin.conf" path: "{{ kube_config_dir }}/admin.conf"
state: absent state: absent
when: not kubeadm_already_run.stat.exists when:
- not kubeadm_already_run.stat.exists
- name: kubeadm | Delete old static pods - name: kubeadm | Delete old static pods
file: file:
path: "{{ kube_config_dir }}/manifests/{{item}}.manifest" path: "{{ kube_config_dir }}/manifests/{{item}}.manifest"
state: absent state: absent
with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler", "kube-proxy"] with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler", "kube-proxy"]
when: old_apiserver_cert.stat.exists when:
- old_apiserver_cert.stat.exists
- name: kubeadm | Forcefully delete old static pods - name: kubeadm | Forcefully delete old static pods
shell: "docker ps -f name=k8s_{{item}} -q | xargs --no-run-if-empty docker rm -f" shell: "docker ps -f name=k8s_{{item}} -q | xargs --no-run-if-empty docker rm -f"
with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler"] with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
when: old_apiserver_cert.stat.exists when:
- old_apiserver_cert.stat.exists
- name: kubeadm | aggregate all SANs - name: kubeadm | aggregate all SANs
set_fact: set_fact:
@ -220,7 +223,8 @@
- name: kubeadm | cleanup old certs if necessary - name: kubeadm | cleanup old certs if necessary
import_tasks: kubeadm-cleanup-old-certs.yml import_tasks: kubeadm-cleanup-old-certs.yml
when: old_apiserver_cert.stat.exists when:
- old_apiserver_cert.stat.exists
- name: kubeadm | Remove taint for master with node role - name: kubeadm | Remove taint for master with node role
command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf taint node {{ inventory_hostname }} node-role.kubernetes.io/master:NoSchedule-" command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf taint node {{ inventory_hostname }} node-role.kubernetes.io/master:NoSchedule-"

View file

@ -4,12 +4,14 @@
- k8s-pre-upgrade - k8s-pre-upgrade
- import_tasks: users-file.yml - import_tasks: users-file.yml
when: kube_basic_auth|default(true) when:
- kube_basic_auth|default(true)
- import_tasks: encrypt-at-rest.yml - import_tasks: encrypt-at-rest.yml
when: kube_encrypt_secret_data when:
- kube_encrypt_secret_data
- name: install | Copy kubectl binary from download dir - name: Install | Copy kubectl binary from download dir
synchronize: synchronize:
src: "{{ local_release_dir }}/hyperkube" src: "{{ local_release_dir }}/hyperkube"
dest: "{{ bin_dir }}/kubectl" dest: "{{ bin_dir }}/kubectl"
@ -57,10 +59,5 @@
kube_apiserver_enable_admission_plugins: "{{ kube_apiserver_enable_admission_plugins | difference(['SecurityContextDeny']) | union(['PodSecurityPolicy']) | unique }}" kube_apiserver_enable_admission_plugins: "{{ kube_apiserver_enable_admission_plugins | difference(['SecurityContextDeny']) | union(['PodSecurityPolicy']) | unique }}"
when: podsecuritypolicy_enabled when: podsecuritypolicy_enabled
- name: Include kubeadm setup if enabled - name: Include kubeadm setup
import_tasks: kubeadm-setup.yml import_tasks: kubeadm-setup.yml
when: kubeadm_enabled|bool|default(false)
- name: Include static pod setup if not using kubeadm
import_tasks: static-pod-setup.yml
when: not kubeadm_enabled|bool|default(false)

View file

@ -1,59 +0,0 @@
---
- name: Create audit-policy directory
file:
path: "{{ audit_policy_file | dirname }}"
state: directory
tags:
- kube-apiserver
when: kubernetes_audit|default(false)
- name: Write api audit policy yaml
template:
src: apiserver-audit-policy.yaml.j2
dest: "{{ audit_policy_file }}"
notify: Master | Restart apiserver
tags:
- kube-apiserver
when: kubernetes_audit|default(false)
- name: Write kube-apiserver manifest
template:
src: manifests/kube-apiserver.manifest.j2
dest: "{{ kube_manifest_dir }}/kube-apiserver.manifest"
notify: Master | Restart apiserver
tags:
- kube-apiserver
- meta: flush_handlers
- name: Write kube-scheduler kubeconfig
template:
src: kube-scheduler-kubeconfig.yaml.j2
dest: "{{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml"
tags:
- kube-scheduler
- name: Write kube-scheduler manifest
template:
src: manifests/kube-scheduler.manifest.j2
dest: "{{ kube_manifest_dir }}/kube-scheduler.manifest"
notify: Master | Restart kube-scheduler
tags:
- kube-scheduler
- name: Write kube-controller-manager kubeconfig
template:
src: kube-controller-manager-kubeconfig.yaml.j2
dest: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml"
tags:
- kube-controller-manager
- name: Write kube-controller-manager manifest
template:
src: manifests/kube-controller-manager.manifest.j2
dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest"
notify: Master | Restart kube-controller-manager
tags:
- kube-controller-manager
- meta: flush_handlers

View file

@ -12,4 +12,3 @@
dest: "{{ kube_users_dir }}/known_users.csv" dest: "{{ kube_users_dir }}/known_users.csv"
mode: 0640 mode: 0640
backup: yes backup: yes
notify: Master | set secret_changed

View file

@ -1,18 +0,0 @@
apiVersion: v1
kind: Config
clusters:
- name: local
cluster:
certificate-authority: {{ kube_cert_dir }}/ca.pem
server: {{ kube_apiserver_endpoint }}
users:
- name: kube-controller-manager
user:
client-certificate: {{ kube_cert_dir }}/kube-controller-manager.pem
client-key: {{ kube_cert_dir }}/kube-controller-manager-key.pem
contexts:
- context:
cluster: local
user: kube-controller-manager
name: kube-controller-manager-{{ cluster_name }}
current-context: kube-controller-manager-{{ cluster_name }}

View file

@ -1,18 +0,0 @@
apiVersion: v1
kind: Config
clusters:
- name: local
cluster:
certificate-authority: {{ kube_cert_dir }}/ca.pem
server: {{ kube_apiserver_endpoint }}
users:
- name: kube-scheduler
user:
client-certificate: {{ kube_cert_dir }}/kube-scheduler.pem
client-key: {{ kube_cert_dir }}/kube-scheduler-key.pem
contexts:
- context:
cluster: local
user: kube-scheduler
name: kube-scheduler-{{ cluster_name }}
current-context: kube-scheduler-{{ cluster_name }}

View file

@ -174,7 +174,7 @@ apiServerCertSANs:
{% for san in apiserver_sans.split(' ') | unique %} {% for san in apiserver_sans.split(' ') | unique %}
- {{ san }} - {{ san }}
{% endfor %} {% endfor %}
certificatesDir: {{ kube_config_dir }}/ssl certificatesDir: {{ kube_cert_dir }}
imageRepository: {{ kube_image_repo }} imageRepository: {{ kube_image_repo }}
unifiedControlPlaneImage: "" unifiedControlPlaneImage: ""
{% if kube_override_hostname|default('') %} {% if kube_override_hostname|default('') %}

View file

@ -192,7 +192,7 @@ apiServerCertSANs:
{% for san in apiserver_sans.split(' ') | unique %} {% for san in apiserver_sans.split(' ') | unique %}
- {{ san }} - {{ san }}
{% endfor %} {% endfor %}
certificatesDir: {{ kube_config_dir }}/ssl certificatesDir: {{ kube_cert_dir }}
imageRepository: {{ kube_image_repo }} imageRepository: {{ kube_image_repo }}
unifiedControlPlaneImage: "" unifiedControlPlaneImage: ""
nodeRegistration: nodeRegistration:

View file

@ -47,7 +47,7 @@ apiServerCertSANs:
{% for san in apiserver_sans.split(' ') | unique %} {% for san in apiserver_sans.split(' ') | unique %}
- {{ san }} - {{ san }}
{% endfor %} {% endfor %}
certificatesDir: {{ kube_config_dir }}/ssl certificatesDir: {{ kube_cert_dir }}
imageRepository: {{ kube_image_repo }} imageRepository: {{ kube_image_repo }}
unifiedControlPlaneImage: "" unifiedControlPlaneImage: ""
apiServerExtraArgs: apiServerExtraArgs:

View file

@ -1,18 +0,0 @@
apiVersion: v1
kind: Config
current-context: kubectl-to-{{ cluster_name }}
preferences: {}
clusters:
- cluster:
certificate-authority-data: {{ kube_node_cert|b64encode }}
server: {{ kube_apiserver_endpoint }}
name: {{ cluster_name }}
contexts:
- context:
cluster: {{ cluster_name }}
user: kubectl
name: kubectl-to-{{ cluster_name }}
users:
- name: kubectl
user:
token: {{ kubectl_token }}

View file

@ -1,237 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-apiserver
namespace: kube-system
labels:
k8s-app: kube-apiserver
kubespray: v2
annotations:
kubespray.etcd-cert/serial: "{{ etcd_client_cert_serial }}"
kubespray.apiserver-cert/serial: "{{ apiserver_cert_serial }}"
spec:
hostNetwork: true
{% if kube_version is version('v1.6', '>=') %}
dnsPolicy: ClusterFirst
{% endif %}
{% if kube_version is version('v1.11.1', '>=') %}
priorityClassName: system-node-critical
{% endif %}
containers:
- name: kube-apiserver
image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
limits:
cpu: {{ kube_apiserver_cpu_limit }}
memory: {{ kube_apiserver_memory_limit }}
requests:
cpu: {{ kube_apiserver_cpu_requests }}
memory: {{ kube_apiserver_memory_requests }}
command:
- /hyperkube
- apiserver
{% if kubernetes_audit %}
- --audit-log-path={{ audit_log_path }}
- --audit-log-maxage={{ audit_log_maxage }}
- --audit-log-maxbackup={{ audit_log_maxbackups }}
- --audit-log-maxsize={{ audit_log_maxsize }}
- --audit-policy-file={{ audit_policy_file }}
{% endif %}
- --advertise-address={{ ip | default(ansible_default_ipv4.address) }}
- --etcd-servers={{ etcd_access_addresses }}
{% if etcd_events_cluster_enabled %}
- --etcd-servers-overrides=/events#{{ etcd_events_access_addresses_semicolon }}
{% endif %}
{% if kube_version is version('v1.9', '<') %}
- --etcd-quorum-read=true
{% endif %}
- --etcd-cafile={{ etcd_cert_dir }}/ca.pem
- --etcd-certfile={{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem
- --etcd-keyfile={{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem
{% if kube_apiserver_insecure_port|string != "0" %}
- --insecure-bind-address={{ kube_apiserver_insecure_bind_address }}
{% endif %}
- --bind-address={{ kube_apiserver_bind_address }}
- --apiserver-count={{ kube_apiserver_count }}
{% if kube_version is version('v1.9', '>=') %}
- --endpoint-reconciler-type=lease
{% endif %}
{% if kube_version is version('v1.10', '<') %}
- --admission-control={{ kube_apiserver_admission_control | join(',') }}
{% else %}
{% if kube_apiserver_enable_admission_plugins|length > 0 %}
- --enable-admission-plugins={{ kube_apiserver_enable_admission_plugins | join(',') }}
{% endif %}
{% if kube_apiserver_disable_admission_plugins|length > 0 %}
- --disable-admission-plugins={{ kube_apiserver_disable_admission_plugins | join(',') }}
{% endif %}
{% endif %}
- --service-cluster-ip-range={{ kube_service_addresses }}
- --service-node-port-range={{ kube_apiserver_node_port_range }}
- --client-ca-file={{ kube_cert_dir }}/ca.pem
- --profiling={{ kube_profiling }}
- --repair-malformed-updates=false
- --kubelet-client-certificate={{ kube_cert_dir }}/node-{{ inventory_hostname }}.pem
- --kubelet-client-key={{ kube_cert_dir }}/node-{{ inventory_hostname }}-key.pem
- --service-account-lookup=true
- --kubelet-preferred-address-types={{ kubelet_preferred_address_types }}
- --request-timeout={{ kube_apiserver_request_timeout }}
{% if kube_basic_auth|default(true) %}
- --basic-auth-file={{ kube_users_dir }}/known_users.csv
{% endif %}
- --tls-cert-file={{ kube_cert_dir }}/apiserver.pem
- --tls-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
{% if kube_token_auth|default(true) %}
- --token-auth-file={{ kube_token_dir }}/known_tokens.csv
{% endif %}
- --service-account-key-file={{ kube_cert_dir }}/service-account-key.pem
{% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %}
- --oidc-issuer-url={{ kube_oidc_url }}
- --oidc-client-id={{ kube_oidc_client_id }}
{% if kube_oidc_ca_file is defined %}
- --oidc-ca-file={{ kube_oidc_ca_file }}
{% endif %}
{% if kube_oidc_username_claim is defined %}
- --oidc-username-claim={{ kube_oidc_username_claim }}
{% endif %}
{% if kube_oidc_username_prefix is defined %}
- "--oidc-username-prefix={{ kube_oidc_username_prefix }}"
{% endif %}
{% if kube_oidc_groups_claim is defined %}
- --oidc-groups-claim={{ kube_oidc_groups_claim }}
{% endif %}
{% if kube_oidc_groups_prefix is defined %}
- "--oidc-groups-prefix={{ kube_oidc_groups_prefix }}"
{% endif %}
{% endif %}
- --secure-port={{ kube_apiserver_port }}
- --insecure-port={{ kube_apiserver_insecure_port }}
- --storage-backend={{ kube_apiserver_storage_backend }}
{% if kube_api_runtime_config is defined %}
{% for conf in kube_api_runtime_config %}
- --runtime-config={{ conf }}
{% endfor %}
{% endif %}
{% if enable_network_policy %}
{% if kube_version is version('v1.8', '<') %}
- --runtime-config=extensions/v1beta1/networkpolicies=true
{% endif %}
{% endif %}
- --v={{ kube_log_level }}
- --allow-privileged=true
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws"] %}
- --cloud-provider={{ cloud_provider }}
- --cloud-config={{ kube_config_dir }}/cloud_config
{% endif %}
{% if kube_api_anonymous_auth is defined and kube_version is version('v1.5', '>=') %}
- --anonymous-auth={{ kube_api_anonymous_auth }}
{% endif %}
{% if authorization_modes %}
- --authorization-mode={{ authorization_modes|join(',') }}
{% endif %}
{% if kube_encrypt_secret_data %}
- --experimental-encryption-provider-config={{ kube_config_dir }}/ssl/secrets_encryption.yaml
{% endif %}
{% if kube_feature_gates %}
- --feature-gates={{ kube_feature_gates|join(',') }}
{% endif %}
{% if kube_version is version('v1.9', '>=') %}
- --requestheader-client-ca-file={{ kube_cert_dir }}/{{ kube_front_proxy_ca }}
{# FIXME(mattymo): Vault certs do not work with front-proxy-client #}
{% if cert_management == "vault" %}
- --requestheader-allowed-names=
{% else %}
- --requestheader-allowed-names=front-proxy-client
{% endif %}
- --requestheader-extra-headers-prefix=X-Remote-Extra-
- --requestheader-group-headers=X-Remote-Group
- --requestheader-username-headers=X-Remote-User
- --enable-aggregator-routing={{ kube_api_aggregator_routing }}
- --proxy-client-cert-file={{ kube_cert_dir }}/front-proxy-client.pem
- --proxy-client-key-file={{ kube_cert_dir }}/front-proxy-client-key.pem
{% else %}
- --proxy-client-cert-file={{ kube_cert_dir }}/apiserver.pem
- --proxy-client-key-file={{ kube_cert_dir }}/apiserver-key.pem
{% endif %}
{% if apiserver_custom_flags is string %}
- {{ apiserver_custom_flags }}
{% else %}
{% for flag in apiserver_custom_flags %}
- {{ flag }}
{% endfor %}
{% endif %}
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
{% if kube_apiserver_insecure_port|int == 0 %}
port: {{ kube_apiserver_port }}
scheme: HTTPS
{% else %}
port: {{ kube_apiserver_insecure_port }}
{% endif %}
failureThreshold: 8
initialDelaySeconds: 15
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 15
volumeMounts:
- mountPath: {{ kube_config_dir }}
name: kubernetes-config
readOnly: true
- mountPath: /etc/ssl
name: ssl-certs-host
readOnly: true
{% for dir in ssl_ca_dirs %}
- mountPath: {{ dir }}
name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
readOnly: true
{% endfor %}
- mountPath: {{ etcd_cert_dir }}
name: etcd-certs
readOnly: true
{% if cloud_provider is defined and cloud_provider == 'aws' and ansible_os_family == 'RedHat' %}
- mountPath: /etc/ssl/certs/ca-bundle.crt
name: rhel-ca-bundle
readOnly: true
{% endif %}
{% if kubernetes_audit %}
{% if audit_log_path != "-" %}
- mountPath: {{ audit_log_mountpath }}
name: {{ audit_log_name }}
Writable: true
{% endif %}
- mountPath: {{ audit_policy_mountpath }}
name: {{ audit_policy_name }}
{% endif %}
volumes:
- hostPath:
path: {{ kube_config_dir }}
name: kubernetes-config
- name: ssl-certs-host
hostPath:
path: /etc/ssl
{% for dir in ssl_ca_dirs %}
- name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
hostPath:
path: {{ dir }}
{% endfor %}
- hostPath:
path: {{ etcd_cert_dir }}
name: etcd-certs
{% if cloud_provider is defined and cloud_provider == 'aws' and ansible_os_family == 'RedHat' %}
- hostPath:
path: /etc/ssl/certs/ca-bundle.crt
name: rhel-ca-bundle
{% endif %}
{% if kubernetes_audit %}
{% if audit_log_path != "-" %}
- hostPath:
path: {{ audit_log_hostpath }}
name: {{ audit_log_name }}
{% endif %}
- hostPath:
path: {{ audit_policy_hostpath }}
name: {{ audit_policy_name }}
{% endif %}

View file

@ -1,132 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-controller-manager
namespace: kube-system
labels:
k8s-app: kube-controller-manager
annotations:
kubespray.etcd-cert/serial: "{{ etcd_client_cert_serial }}"
kubespray.controller-manager-cert/serial: "{{ controller_manager_cert_serial }}"
spec:
hostNetwork: true
{% if kube_version is version('v1.6', '>=') %}
dnsPolicy: ClusterFirst
{% endif %}
{% if kube_version is version('v1.11.1', '>=') %}
priorityClassName: system-node-critical
{% endif %}
containers:
- name: kube-controller-manager
image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
limits:
cpu: {{ kube_controller_cpu_limit }}
memory: {{ kube_controller_memory_limit }}
requests:
cpu: {{ kube_controller_cpu_requests }}
memory: {{ kube_controller_memory_requests }}
command:
- /hyperkube
- controller-manager
- --kubeconfig={{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml
- --leader-elect=true
- --service-account-private-key-file={{ kube_cert_dir }}/service-account-key.pem
- --root-ca-file={{ kube_cert_dir }}/ca.pem
- --cluster-signing-cert-file={{ kube_cert_dir }}/ca.pem
- --cluster-signing-key-file={{ kube_cert_dir }}/ca-key.pem
- --enable-hostpath-provisioner={{ kube_hostpath_dynamic_provisioner }}
- --node-monitor-grace-period={{ kube_controller_node_monitor_grace_period }}
- --node-monitor-period={{ kube_controller_node_monitor_period }}
- --pod-eviction-timeout={{ kube_controller_pod_eviction_timeout }}
- --profiling={{ kube_profiling }}
- --terminated-pod-gc-threshold={{ kube_controller_terminated_pod_gc_threshold }}
- --v={{ kube_log_level }}
{% if rbac_enabled %}
- --use-service-account-credentials=true
{% endif %}
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws"] %}
- --cloud-provider={{cloud_provider}}
- --cloud-config={{ kube_config_dir }}/cloud_config
{% elif cloud_provider is defined and cloud_provider in ["external", "oci"] %}
- --cloud-provider=external
{% endif %}
{% if kube_network_plugin is defined and kube_network_plugin == 'cloud' %}
- --configure-cloud-routes=true
{% else %}
- --configure-cloud-routes=false
{% endif %}
{% if kube_network_plugin is defined and kube_network_plugin in ["cloud", "flannel", "canal", "cilium", "kube-router"] %}
- --allocate-node-cidrs=true
- --cluster-cidr={{ kube_pods_subnet }}
- --service-cluster-ip-range={{ kube_service_addresses }}
- --node-cidr-mask-size={{ kube_network_node_prefix }}
{% endif %}
{% if kube_feature_gates %}
- --feature-gates={{ kube_feature_gates|join(',') }}
{% endif %}
{% if controller_mgr_custom_flags is string %}
- {{ controller_mgr_custom_flags }}
{% else %}
{% for flag in controller_mgr_custom_flags %}
- {{ flag }}
{% endfor %}
{% endif %}
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 10252
initialDelaySeconds: 30
timeoutSeconds: 10
volumeMounts:
- mountPath: /etc/ssl
name: ssl-certs-host
readOnly: true
{% for dir in ssl_ca_dirs %}
- mountPath: {{ dir }}
name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
readOnly: true
{% endfor %}
- mountPath: "{{kube_config_dir}}/ssl"
name: etc-kube-ssl
readOnly: true
- mountPath: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml"
name: kubeconfig
readOnly: true
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws"] %}
- mountPath: "{{ kube_config_dir }}/cloud_config"
name: cloudconfig
readOnly: true
{% endif %}
{% if cloud_provider is defined and cloud_provider in ["openstack"] and openstack_cacert is defined and openstack_cacert != "" %}
- mountPath: "{{ kube_config_dir }}/openstack-cacert.pem"
name: openstackcacert
readOnly: true
{% endif %}
volumes:
- name: ssl-certs-host
hostPath:
path: /etc/ssl
{% for dir in ssl_ca_dirs %}
- name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
hostPath:
path: {{ dir }}
{% endfor %}
- name: etc-kube-ssl
hostPath:
path: "{{ kube_config_dir }}/ssl"
- name: kubeconfig
hostPath:
path: "{{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml"
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere", "aws"] %}
- hostPath:
path: "{{ kube_config_dir }}/cloud_config"
name: cloudconfig
{% endif %}
{% if cloud_provider is defined and cloud_provider in ["openstack"] and openstack_cacert is defined and openstack_cacert != "" %}
- hostPath:
path: "{{ kube_config_dir }}/openstack-cacert.pem"
name: openstackcacert
{% endif %}

View file

@ -1,82 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-scheduler
namespace: kube-system
labels:
k8s-app: kube-scheduler
annotations:
kubespray.scheduler-cert/serial: "{{ scheduler_cert_serial }}"
spec:
hostNetwork: true
{% if kube_version is version('v1.6', '>=') %}
dnsPolicy: ClusterFirst
{% endif %}
{% if kube_version is version('v1.11.1', '>=') %}
priorityClassName: system-node-critical
{% endif %}
containers:
- name: kube-scheduler
image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
limits:
cpu: {{ kube_scheduler_cpu_limit }}
memory: {{ kube_scheduler_memory_limit }}
requests:
cpu: {{ kube_scheduler_cpu_requests }}
memory: {{ kube_scheduler_memory_requests }}
command:
- /hyperkube
- scheduler
- --leader-elect=true
- --kubeconfig={{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml
- --profiling={{ kube_profiling }}
- --v={{ kube_log_level }}
{% if kube_feature_gates %}
- --feature-gates={{ kube_feature_gates|join(',') }}
{% endif %}
{% if scheduler_custom_flags is string %}
- {{ scheduler_custom_flags }}
{% else %}
{% for flag in scheduler_custom_flags %}
- {{ flag }}
{% endfor %}
{% endif %}
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 10251
initialDelaySeconds: 30
timeoutSeconds: 10
volumeMounts:
- mountPath: /etc/ssl
name: ssl-certs-host
readOnly: true
{% for dir in ssl_ca_dirs %}
- mountPath: {{ dir }}
name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
readOnly: true
{% endfor %}
- mountPath: "{{ kube_config_dir }}/ssl"
name: etc-kube-ssl
readOnly: true
- mountPath: "{{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml"
name: kubeconfig
readOnly: true
volumes:
- name: ssl-certs-host
hostPath:
path: /etc/ssl
{% for dir in ssl_ca_dirs %}
- name: {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }}
hostPath:
path: {{ dir }}
{% endfor %}
- name: etc-kube-ssl
hostPath:
path: "{{ kube_config_dir }}/ssl"
- name: kubeconfig
hostPath:
path: "{{ kube_config_dir }}/kube-scheduler-kubeconfig.yaml"

View file

@ -1,7 +1,4 @@
--- ---
# Valid options: docker (default), rkt, or host
kubelet_deployment_type: host
# change to 0.0.0.0 to enable insecure access from anywhere (not recommended) # change to 0.0.0.0 to enable insecure access from anywhere (not recommended)
kube_apiserver_insecure_bind_address: 127.0.0.1 kube_apiserver_insecure_bind_address: 127.0.0.1
@ -90,12 +87,6 @@ kubelet_custom_flags: []
## Support custom flags to be passed to kubelet only on nodes, not masters ## Support custom flags to be passed to kubelet only on nodes, not masters
kubelet_node_custom_flags: [] kubelet_node_custom_flags: []
# This setting is used for rkt based kubelet for deploying hyperkube
# from a docker based registry ( controls --insecure and docker:// )
## Empty value for quay.io containers
## docker for docker registry containers
kube_hyperkube_image_repo: ""
# If non-empty, will use this string as identification instead of the actual hostname # If non-empty, will use this string as identification instead of the actual hostname
kube_override_hostname: >- kube_override_hostname: >-
{%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%} {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}

View file

@ -1,6 +0,0 @@
---
dependencies:
- role: kubernetes/secrets
when: not kubeadm_enabled
tags:
- k8s-secrets

View file

@ -1,11 +1,4 @@
--- ---
- name: Set kubelet deployment to host if kubeadm is enabled
set_fact:
kubelet_deployment_type: host
when: kubeadm_enabled
tags:
- kubeadm
- name: install | Copy kubeadm binary from download dir - name: install | Copy kubeadm binary from download dir
synchronize: synchronize:
src: "{{ local_release_dir }}/kubeadm" src: "{{ local_release_dir }}/kubeadm"
@ -15,7 +8,6 @@
owner: no owner: no
group: no group: no
delegate_to: "{{ inventory_hostname }}" delegate_to: "{{ inventory_hostname }}"
when: kubeadm_enabled
tags: tags:
- kubeadm - kubeadm
@ -24,15 +16,41 @@
path: "{{ bin_dir }}/kubeadm" path: "{{ bin_dir }}/kubeadm"
mode: "0755" mode: "0755"
state: file state: file
when: kubeadm_enabled
tags: tags:
- kubeadm - kubeadm
- include_tasks: "install_{{ kubelet_deployment_type }}.yml" - name: install | Copy kubelet binary from download dir
synchronize:
src: "{{ local_release_dir }}/hyperkube"
dest: "{{ bin_dir }}/kubelet"
compress: no
perms: yes
owner: no
group: no
delegate_to: "{{ inventory_hostname }}"
tags:
- hyperkube
- upgrade
notify: restart kubelet
- name: install | Set kubelet binary permissions
file:
path: "{{ bin_dir }}/kubelet"
mode: "0755"
state: file
tags:
- hyperkube
- upgrade
- name: install | Copy socat wrapper for Container Linux
command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/opt/bin {{ install_socat_image_repo }}:{{ install_socat_image_tag }}"
args:
creates: "{{ bin_dir }}/socat"
when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS']
- name: install | Write kubelet systemd init file - name: install | Write kubelet systemd init file
template: template:
src: "kubelet.{{ kubelet_deployment_type }}.service.j2" src: "kubelet.host.service.j2"
dest: "/etc/systemd/system/kubelet.service" dest: "/etc/systemd/system/kubelet.service"
backup: "yes" backup: "yes"
notify: restart kubelet notify: restart kubelet

View file

@ -1,9 +0,0 @@
---
- name: install | Install kubelet launch script
template:
src: kubelet-container.j2
dest: "{{ bin_dir }}/kubelet"
owner: kube
mode: 0755
backup: yes
notify: restart kubelet

View file

@ -1,30 +0,0 @@
---
- name: install | Copy kubelet binary from download dir
synchronize:
src: "{{ local_release_dir }}/hyperkube"
dest: "{{ bin_dir }}/kubelet"
compress: no
perms: yes
owner: no
group: no
delegate_to: "{{ inventory_hostname }}"
tags:
- hyperkube
- upgrade
notify: restart kubelet
- name: install | Set kubelet binary permissions
file:
path: "{{ bin_dir }}/kubelet"
mode: "0755"
state: file
tags:
- hyperkube
- upgrade
- name: install | Copy socat wrapper for Container Linux
command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/opt/bin {{ install_socat_image_repo }}:{{ install_socat_image_tag }}"
args:
creates: "{{ bin_dir }}/socat"
when: ansible_os_family in ['CoreOS', 'Container Linux by CoreOS']

View file

@ -1,32 +0,0 @@
---
- name: Trust kubelet container
command: >-
/usr/bin/rkt trust
--skip-fingerprint-review
--root
{{ item }}
register: kubelet_rkt_trust_result
until: kubelet_rkt_trust_result.rc == 0
with_items:
- "https://quay.io/aci-signing-key"
- "https://coreos.com/dist/pubkeys/aci-pubkeys.gpg"
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
changed_when: false
- name: create kubelet working directory
file:
state: directory
path: /var/lib/kubelet
- name: Create kubelet service systemd directory
file:
path: /etc/systemd/system/kubelet.service.d
state: directory
- name: Write kubelet proxy drop-in
template:
src: http-proxy.conf.j2
dest: /etc/systemd/system/kubelet.service.d/http-proxy.conf
when: http_proxy is defined or https_proxy is defined
notify: restart kubelet

View file

@ -22,16 +22,6 @@
tags: tags:
- nginx - nginx
- name: Write kubelet config file (non-kubeadm)
template:
src: kubelet.standard.env.j2
dest: "{{ kube_config_dir }}/kubelet.env"
backup: yes
when: not kubeadm_enabled
notify: restart kubelet
tags:
- kubelet
- name: Make sure dynamic kubelet configuration directory is writeable - name: Make sure dynamic kubelet configuration directory is writeable
file: file:
path: "{{ dynamic_kubelet_configuration_dir }}" path: "{{ dynamic_kubelet_configuration_dir }}"
@ -44,25 +34,11 @@
src: kubelet.kubeadm.env.j2 src: kubelet.kubeadm.env.j2
dest: "{{ kube_config_dir }}/kubelet.env" dest: "{{ kube_config_dir }}/kubelet.env"
backup: yes backup: yes
when: kubeadm_enabled
notify: restart kubelet notify: restart kubelet
tags: tags:
- kubelet - kubelet
- kubeadm - kubeadm
- name: write the kubecfg (auth) file for kubelet
template:
src: "{{ item }}-kubeconfig.yaml.j2"
dest: "{{ kube_config_dir }}/{{ item }}-kubeconfig.yaml"
backup: yes
with_items:
- node
- kube-proxy
when: not kubeadm_enabled
notify: restart kubelet
tags:
- kubelet
- name: Ensure nodePort range is reserved - name: Ensure nodePort range is reserved
sysctl: sysctl:
name: net.ipv4.ip_local_reserved_ports name: net.ipv4.ip_local_reserved_ports
@ -142,26 +118,17 @@
tags: tags:
- kube-proxy - kube-proxy
- name: Write proxy manifest
template:
src: manifests/kube-proxy.manifest.j2
dest: "{{ kube_manifest_dir }}/kube-proxy.manifest"
when:
- not (kubeadm_enabled or kube_proxy_remove)
tags:
- kube-proxy
- name: Purge proxy manifest for kubeadm or if proxy services being provided by other means, e.g. network_plugin - name: Purge proxy manifest for kubeadm or if proxy services being provided by other means, e.g. network_plugin
file: file:
path: "{{ kube_manifest_dir }}/kube-proxy.manifest" path: "{{ kube_manifest_dir }}/kube-proxy.manifest"
state: absent state: absent
when: when:
- kubeadm_enabled or kube_proxy_remove - kube_proxy_remove
tags: tags:
- kube-proxy - kube-proxy
- name: Cleanup kube-proxy leftovers from node - name: Cleanup kube-proxy leftovers from node
command: "{{ docker_bin_dir }}/docker run --rm --privileged -v /lib/modules:/lib/modules --net=host {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} kube-proxy --cleanup" command: "{{ local_release_dir }}/hyperkube kube-proxy --cleanup"
when: when:
- kube_proxy_remove - kube_proxy_remove
# `kube-proxy --cleanup`, being Ok as per shown WARNING, still returns 255 from above run (?) # `kube-proxy --cleanup`, being Ok as per shown WARNING, still returns 255 from above run (?)

View file

@ -16,7 +16,7 @@
service: service:
name: kubelet name: kubelet
state: stopped state: stopped
when: kubelet_deployment_type == 'host' and kubelet_container_check.rc == 0 when: kubelet_container_check.rc == 0
- name: "Pre-upgrade | ensure kubelet container is removed if using host deployment" - name: "Pre-upgrade | ensure kubelet container is removed if using host deployment"
command: docker rm -fv kubelet command: docker rm -fv kubelet
@ -26,4 +26,4 @@
retries: 4 retries: 4
until: remove_kubelet_container.rc == 0 until: remove_kubelet_container.rc == 0
delay: 5 delay: 5
when: kubelet_deployment_type == 'host' and kubelet_container_check.rc == 0 when: kubelet_container_check.rc == 0

View file

@ -1,18 +0,0 @@
apiVersion: v1
kind: Config
clusters:
- name: local
cluster:
certificate-authority: {{ kube_cert_dir }}/ca.pem
server: {{ kube_apiserver_endpoint }}
users:
- name: kube-proxy
user:
client-certificate: {{ kube_cert_dir }}/kube-proxy-{{ inventory_hostname }}.pem
client-key: {{ kube_cert_dir }}/kube-proxy-{{ inventory_hostname }}-key.pem
contexts:
- context:
cluster: local
user: kube-proxy
name: kube-proxy-{{ cluster_name }}
current-context: kube-proxy-{{ cluster_name }}

View file

@ -1,43 +0,0 @@
#!/bin/bash
{{ docker_bin_dir }}/docker run \
--net=host \
--pid=host \
--privileged \
--name=kubelet \
--restart=on-failure:5 \
--memory={{ kube_memory_reserved|regex_replace('Mi', 'M') }} \
--cpu-shares={{ kube_cpu_reserved|regex_replace('m', '') }} \
-v /dev:/dev:rw \
-v /etc/cni:/etc/cni:ro \
-v /opt/cni:/opt/cni:ro \
-v /etc/ssl:/etc/ssl:ro \
-v /etc/resolv.conf:/etc/resolv.conf \
{% for dir in ssl_ca_dirs -%}
-v {{ dir }}:{{ dir }}:ro \
{% endfor -%}
{% if kubelet_load_modules -%}
-v /lib/modules:/lib/modules:ro \
{% endif -%}
-v /sys:/sys:ro \
-v {{ docker_daemon_graph }}:{{ docker_daemon_graph }}:rw \
-v /var/log:/var/log:rw \
-v /var/lib/kubelet:/var/lib/kubelet:shared \
-v /var/lib/calico:/var/lib/calico:shared \
-v /var/lib/cni:/var/lib/cni:shared \
-v /var/run:/var/run:rw \
{# we can run into issues with double mounting /var/lib/kubelet #}
{# surely there's a better way to do this #}
{% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %}
-v {{ kubelet_flexvolumes_plugins_dir }}:{{ kubelet_flexvolumes_plugins_dir }}:rw \
{% endif -%}
{% if local_volume_provisioner_enabled -%}
{% for class in local_volume_provisioner_storage_classes -%}
-v {{ class.host_dir }}:{{ class.host_dir }}:rw \
-v {{ class.mount_dir }}:{{ class.mount_dir }}:rw \
{% endfor -%}
{% endif %}
-v {{kube_config_dir}}:{{kube_config_dir}}:ro \
-v /etc/os-release:/etc/os-release:ro \
{{ hyperkube_image_repo }}:{{ hyperkube_image_tag}} \
./hyperkube kubelet \
"$@"

View file

@ -1,31 +0,0 @@
[Unit]
Description=Kubernetes Kubelet Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Wants=docker.socket
[Service]
User=root
EnvironmentFile={{kube_config_dir}}/kubelet.env
ExecStart={{ bin_dir }}/kubelet \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBELET_API_SERVER \
$KUBELET_ADDRESS \
$KUBELET_PORT \
$KUBELET_HOSTNAME \
$KUBE_ALLOW_PRIV \
$KUBELET_ARGS \
$DOCKER_SOCKET \
$KUBELET_NETWORK_PLUGIN \
$KUBELET_VOLUME_PLUGIN \
$KUBELET_CLOUDPROVIDER
Restart=always
RestartSec=10s
ExecStartPre=-{{ docker_bin_dir }}/docker rm -f kubelet
ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }}
ExecReload={{ docker_bin_dir }}/docker restart kubelet
[Install]
WantedBy=multi-user.target

View file

@ -1,120 +0,0 @@
[Unit]
Description=Kubernetes Kubelet Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
Wants=network.target
[Service]
User=root
Restart=on-failure
RestartSec=10s
TimeoutStartSec=0
LimitNOFILE=40000
ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/run/kubelet.uuid
ExecStartPre=-/bin/mkdir -p /var/lib/kubelet
ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }}
EnvironmentFile={{kube_config_dir}}/kubelet.env
# stage1-fly mounts /proc /sys /dev so no need to duplicate the mounts
ExecStart=/usr/bin/rkt run \
{% if kubelet_load_modules == true %}
--volume lib-modules,kind=host,source=/lib/modules \
{% endif %}
--volume os-release,kind=host,source=/etc/os-release,readOnly=true \
--volume hosts,kind=host,source=/etc/hosts,readOnly=true \
--volume dns,kind=host,source=/etc/resolv.conf \
--volume etc-kubernetes,kind=host,source={{ kube_config_dir }},readOnly=false \
--volume etc-ssl-certs,kind=host,source=/etc/ssl/certs,readOnly=true \
--volume etcd-ssl,kind=host,source={{ etcd_config_dir }},readOnly=true \
--volume run,kind=host,source=/run,readOnly=false \
{% for dir in ssl_ca_dirs -%}
--volume {{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }},kind=host,source={{ dir }},readOnly=true \
{% endfor -%}
--volume var-lib-docker,kind=host,source={{ docker_daemon_graph }},readOnly=false \
--volume var-lib-kubelet,kind=host,source=/var/lib/kubelet,readOnly=false,recursive=true \
--volume var-log,kind=host,source=/var/log \
{% if kube_network_plugin in ["calico", "weave", "canal", "flannel", "contiv", "cilium", "kube-router"] %}
--volume etc-cni,kind=host,source=/etc/cni,readOnly=true \
--volume opt-cni,kind=host,source=/opt/cni,readOnly=true \
--volume var-lib-cni,kind=host,source=/var/lib/cni,readOnly=false \
{% endif %}
{% if kube_network_plugin in ["calico", "canal"] %}
--volume var-lib-calico,kind=host,source=/var/lib/calico,readOnly=false \
{% endif %}
{# we can run into issues with double mounting /var/lib/kubelet #}
{# surely there's a better way to do this #}
{% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %}
--volume flexvolumes,kind=host,source={{ kubelet_flexvolumes_plugins_dir }},readOnly=false \
{% endif -%}
{% if local_volume_provisioner_enabled %}
{% for class in local_volume_provisioner_storage_classes %}
--volume local-volume-provisioner-base-dir,kind=host,source={{ class.host_dir }},readOnly=false \
{# Not pretty, but needed to avoid double mount #}
{% if class.host_dir not in class.mount_dir and class.mount_dir not in class.host_dir %}
--volume local-volume-provisioner-mount-dir,kind=host,source={{ class.mount_dir }},readOnly=false \
{% endif %}
{% endfor %}
{% endif %}
{% if kubelet_load_modules == true %}
--mount volume=lib-modules,target=/lib/modules \
{% endif %}
--mount volume=etc-cni,target=/etc/cni \
--mount volume=opt-cni,target=/opt/cni \
--mount volume=var-lib-cni,target=/var/lib/cni \
{% if kube_network_plugin in ["calico", "canal"] %}
--mount volume=var-lib-calico,target=/var/lib/calico \
{% endif %}
--mount volume=os-release,target=/etc/os-release \
--mount volume=dns,target=/etc/resolv.conf \
--mount volume=etc-kubernetes,target={{ kube_config_dir }} \
--mount volume=etc-ssl-certs,target=/etc/ssl/certs \
--mount volume=etcd-ssl,target={{ etcd_config_dir }} \
--mount volume=run,target=/run \
{% for dir in ssl_ca_dirs -%}
--mount volume={{ dir | regex_replace('^/(.*)$', '\\1' ) | regex_replace('/', '-') }},target={{ dir }} \
{% endfor -%}
--mount volume=var-lib-docker,target=/var/lib/docker \
--mount volume=var-lib-kubelet,target=/var/lib/kubelet \
--mount volume=var-log,target=/var/log \
--mount volume=hosts,target=/etc/hosts \
{# we can run into issues with double mounting /var/lib/kubelet #}
{# surely there's a better way to do this #}
{% if '/var/lib/kubelet' not in kubelet_flexvolumes_plugins_dir %}
--mount volume=flexvolumes,target={{ kubelet_flexvolumes_plugins_dir }} \
{% endif -%}
{% if local_volume_provisioner_enabled %}
{% for class in local_volume_provisioner_storage_classes %}
--mount volume=local-volume-provisioner-base-dir,target={{ class.host_dir }} \
{# Not pretty, but needed to avoid double mount #}
{% if class.host_dir not in class.mount_dir and class.mount_dir not in class.host_dir %}
--mount volume=local-volume-provisioner-mount-dir,target={{ class.mount_dir }} \
{% endif %}
{% endfor %}
{% endif %}
--stage1-from-dir=stage1-fly.aci \
{% if kube_hyperkube_image_repo == "docker" %}
--insecure-options=image \
docker://{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} \
{% else %}
{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} \
{% endif %}
--uuid-file-save=/var/run/kubelet.uuid \
--debug --exec=/kubelet -- \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBELET_API_SERVER \
$KUBELET_ADDRESS \
$KUBELET_PORT \
$KUBELET_HOSTNAME \
$KUBE_ALLOW_PRIV \
$KUBELET_ARGS \
$DOCKER_SOCKET \
$KUBELET_REGISTER_NODE \
$KUBELET_NETWORK_PLUGIN \
$KUBELET_VOLUME_PLUGIN \
$KUBELET_CLOUDPROVIDER
ExecStop=-/usr/bin/rkt stop --uuid-file=/var/run/kubelet.uuid
[Install]
WantedBy=multi-user.target

View file

@ -1,151 +0,0 @@
# logging to stderr means we get it in the systemd journal
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v={{ kube_log_level }}"
# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
KUBELET_ADDRESS="--address={{ kubelet_bind_address }} --node-ip={{ kubelet_address }}"
# The port for the info server to serve on
# KUBELET_PORT="--port=10250"
{% if kube_override_hostname|default('') %}
# You may leave this blank to use the actual hostname
KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
{% endif %}
{# Base kubelet args #}
{% set kubelet_args_base %}
--pod-manifest-path={{ kube_manifest_dir }} \
{% if kube_version is version('v1.12.0', '<') %}
--cadvisor-port={{ kube_cadvisor_port }} \
{% endif %}
--pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }} \
--node-status-update-frequency={{ kubelet_status_update_frequency }} \
{% if container_manager == 'docker' and kube_version is version('v1.12.0', '<') %}
--docker-disable-shared-pid={{ kubelet_disable_shared_pid }} \
{% endif %}
--client-ca-file={{ kube_cert_dir }}/ca.pem \
--tls-cert-file={{ kube_cert_dir }}/node-{{ inventory_hostname }}.pem \
--tls-private-key-file={{ kube_cert_dir }}/node-{{ inventory_hostname }}-key.pem \
--anonymous-auth=false \
--read-only-port={{ kube_read_only_port }} \
{% if kube_version is version('v1.6', '>=') %}
{# flag got removed with 1.7.0 #}
{% if kube_version is version('v1.7', '<') %}
--enable-cri={{ kubelet_enable_cri }} \
{% endif %}
{% if container_manager == 'crio' %}
--container-runtime=remote \
--container-runtime-endpoint=/var/run/crio/crio.sock \
{% endif %}
--cgroup-driver={{ kubelet_cgroup_driver|default(kubelet_cgroup_driver_detected) }} \
--cgroups-per-qos={{ kubelet_cgroups_per_qos }} \
--max-pods={{ kubelet_max_pods }} \
{% if kube_version is version('v1.8', '<') %}
--experimental-fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \
{% else %}
--fail-swap-on={{ kubelet_fail_swap_on|default(true)}} \
{% endif %}
{% if kubelet_authentication_token_webhook %}
--authentication-token-webhook \
{% endif %}
{% if kubelet_authorization_mode_webhook %}
--authorization-mode=Webhook \
{% endif %}
{% if ansible_architecture == "aarch64" and ansible_os_family == "RedHat" %}
--cgroup-driver=systemd \
{% endif %}
--enforce-node-allocatable={{ kubelet_enforce_node_allocatable }} {% endif %}{% endset %}
{# DNS settings for kubelet #}
{% if dns_mode in ['kubedns', 'coredns'] %}
{% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }}{% endset %}
{% elif dns_mode == 'coredns_dual' %}
{% set kubelet_args_cluster_dns %}--cluster-dns={{ skydns_server }},{{ skydns_server_secondary }}{% endset %}
{% elif dns_mode == 'dnsmasq_kubedns' %}
{% set kubelet_args_cluster_dns %}--cluster-dns={{ dnsmasq_dns_server }}{% endset %}
{% elif dns_mode == 'manual' %}
{% set kubelet_args_cluster_dns %}--cluster-dns={{ manual_dns_server }}{% endset %}
{% else %}
{% set kubelet_args_cluster_dns %}{% endset %}
{% endif %}
{% set kubelet_args_dns %}{{ kubelet_args_cluster_dns }} --cluster-domain={{ dns_domain }} --resolv-conf={{ kube_resolv_conf }}{% endset %}
{# Location of the apiserver #}
{% if kube_version is version('v1.8', '<') %}
{% set kubelet_args_kubeconfig %}--kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --require-kubeconfig{% endset %}
{% else %}
{% set kubelet_args_kubeconfig %}--kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml{% endset %}
{% endif %}
{% set role_node_taints = [] %}
{% if standalone_kubelet|bool %}
{# We are on a master-only host. Make the master unschedulable in this case. #}
{% if kube_version is version('v1.6', '>=') %}
{# Set taints on the master so that it's unschedulable by default. Use node-role.kubernetes.io/master taint like kubeadm. #}
{% set dummy = role_node_taints.append('node-role.kubernetes.io/master=:NoSchedule') %}
{% else %}
{# --register-with-taints was added in 1.6 so just register unschedulable if Kubernetes < 1.6 #}
{% set kubelet_args_kubeconfig %}{{ kubelet_args_kubeconfig }} --register-schedulable=false{% endset %}
{% endif %}
{% endif %}
{% set all_node_taints = node_taints|default([]) + role_node_taints %}
{# Node reserved CPU/memory #}
{% if is_kube_master|bool %}
{% set kube_reserved %}--kube-reserved cpu={{ kube_master_cpu_reserved }},memory={{ kube_master_memory_reserved|regex_replace('Mi', 'M') }}{% endset %}
{% else %}
{% set kube_reserved %}--kube-reserved cpu={{ kube_cpu_reserved }},memory={{ kube_memory_reserved|regex_replace('Mi', 'M') }}{% endset %}
{% endif %}
{# Kubelet node labels #}
{% set role_node_labels = [] %}
{% if inventory_hostname in groups['kube-master'] %}
{% set dummy = role_node_labels.append("node-role.kubernetes.io/master=''") %}
{% if not standalone_kubelet|bool %}
{% set dummy = role_node_labels.append("node-role.kubernetes.io/node=''") %}
{% endif %}
{% else %}
{% set dummy = role_node_labels.append("node-role.kubernetes.io/node=''") %}
{% endif %}
{% if nvidia_gpu_nodes is defined and nvidia_accelerator_enabled|bool %}
{% if inventory_hostname in nvidia_gpu_nodes %}
{% set dummy = role_node_labels.append('nvidia.com/gpu=true') %}
{% endif %}
{% endif %}
{% set inventory_node_labels = [] %}
{% if node_labels is defined and node_labels is mapping %}
{% for labelname, labelvalue in node_labels.items() %}
{% set dummy = inventory_node_labels.append('%s=%s'|format(labelname, labelvalue)) %}
{% endfor %}
{% endif %}
{% set all_node_labels = role_node_labels + inventory_node_labels %}
{# Kubelet node taints for gpu #}
{% if nvidia_gpu_nodes is defined and nvidia_accelerator_enabled|bool %}
{% if inventory_hostname in nvidia_gpu_nodes %}
{% set kubelet_args_kubeconfig %}{{ kubelet_args_kubeconfig }} --register-with-taints=nvidia.com/gpu=:NoSchedule{% endset %}
{% endif %}
{% endif %}
KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ kube_reserved }} {% if all_node_taints %}--register-with-taints={{ all_node_taints | join(',') }} {% endif %}--node-labels={{ all_node_labels | join(',') }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}{% if inventory_hostname in groups['kube-node'] %}{% if kubelet_node_custom_flags is string %} {{kubelet_node_custom_flags}} {% else %}{% for flag in kubelet_node_custom_flags %} {{flag}} {% endfor %}{% endif %}{% endif %}"
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave", "contiv", "cilium", "kube-router"] %}
KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
{% elif kube_network_plugin is defined and kube_network_plugin == "weave" %}
DOCKER_SOCKET="--docker-endpoint=unix:/var/run/weave/weave.sock"
{% elif kube_network_plugin is defined and kube_network_plugin == "cloud" %}
KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kubenet"
{% endif %}
KUBELET_VOLUME_PLUGIN="--volume-plugin-dir={{ kubelet_flexvolumes_plugins_dir }}"
# Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow-privileged=true"
{% if cloud_provider is defined and cloud_provider in ["openstack", "vsphere", "aws"] %}
KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config"
{% elif cloud_provider is defined and cloud_provider in ["azure"] %}
KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config --azure-container-registry-config={{ kube_config_dir }}/cloud_config"
{% elif cloud_provider is defined and cloud_provider in ["oci", "external"] %}
KUBELET_CLOUDPROVIDER="--cloud-provider=external"
{% else %}
KUBELET_CLOUDPROVIDER=""
{% endif %}
PATH={{ bin_dir }}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin

View file

@ -1,110 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-proxy
namespace: kube-system
labels:
k8s-app: kube-proxy
annotations:
kubespray.kube-proxy-cert/serial: "{{ kube_proxy_cert_serial }}"
spec:
hostNetwork: true
{% if kube_version is version('v1.6', '>=') %}
dnsPolicy: ClusterFirst
{% endif %}
nodeSelector:
beta.kubernetes.io/os: linux
{% if kube_version is version('v1.11.1', '>=') %}
priorityClassName: system-node-critical
{% endif %}
containers:
- name: kube-proxy
image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
limits:
cpu: {{ kube_proxy_cpu_limit }}
memory: {{ kube_proxy_memory_limit }}
requests:
cpu: {{ kube_proxy_cpu_requests }}
memory: {{ kube_proxy_memory_requests }}
livenessProbe:
httpGet:
host: 127.0.0.1
path: /healthz
port: 10256
failureThreshold: 8
initialDelaySeconds: 15
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 15
command:
- /hyperkube
- proxy
- --v={{ kube_log_level }}
- --kubeconfig={{kube_config_dir}}/kube-proxy-kubeconfig.yaml
- --bind-address={{ ip | default(ansible_default_ipv4.address) }}
- --cluster-cidr={{ kube_pods_subnet }}
- --proxy-mode={{ kube_proxy_mode }}
- --oom-score-adj=-998
- --healthz-bind-address={{ kube_proxy_healthz_bind_address }}
- --resource-container=""
{% if kube_proxy_nodeport_addresses %}
- --nodeport-addresses={{ kube_proxy_nodeport_addresses_cidr }}
{% endif %}
{% if kube_proxy_masquerade_all and kube_proxy_mode == "iptables" %}
- --masquerade-all
{% elif kube_proxy_mode == 'ipvs' %}
- --masquerade-all
{% if kube_version is version('v1.10', '<') %}
- --feature-gates=SupportIPVSProxyMode=true
{% endif %}
- --ipvs-min-sync-period=5s
- --ipvs-sync-period=5s
- --ipvs-scheduler=rr
{% endif %}
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
- mountPath: "{{ kube_config_dir }}/ssl"
name: etc-kube-ssl
readOnly: true
- mountPath: "{{ kube_config_dir }}/kube-proxy-kubeconfig.yaml"
name: kubeconfig
readOnly: true
- mountPath: /var/run/dbus
name: var-run-dbus
readOnly: false
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
volumes:
- name: ssl-certs-host
hostPath:
{% if ansible_os_family == 'RedHat' %}
path: /etc/pki/tls
{% else %}
path: /usr/share/ca-certificates
{% endif %}
- name: etc-kube-ssl
hostPath:
path: "{{ kube_config_dir }}/ssl"
- name: kubeconfig
hostPath:
path: "{{ kube_config_dir }}/kube-proxy-kubeconfig.yaml"
- name: var-run-dbus
hostPath:
path: /var/run/dbus
- hostPath:
path: /lib/modules
name: lib-modules
- hostPath:
path: /run/xtables.lock
type: FileOrCreate
name: xtables-lock

View file

@ -44,7 +44,6 @@
msg: "{{item.value}} isn't a bool" msg: "{{item.value}} isn't a bool"
run_once: yes run_once: yes
with_items: with_items:
- { name: kubeadm_enabled, value: "{{ kubeadm_enabled }}" }
- { name: download_run_once, value: "{{ download_run_once }}" } - { name: download_run_once, value: "{{ download_run_once }}" }
- { name: deploy_netchecker, value: "{{ deploy_netchecker }}" } - { name: deploy_netchecker, value: "{{ deploy_netchecker }}" }
- { name: download_always_pull, value: "{{ download_always_pull }}" } - { name: download_always_pull, value: "{{ download_always_pull }}" }
@ -141,6 +140,8 @@
register: calico_version_on_server register: calico_version_on_server
run_once: yes run_once: yes
delegate_to: "{{ groups['kube-master'][0] }}" delegate_to: "{{ groups['kube-master'][0] }}"
when:
- kube_network_plugin == 'calico'
- name: "Check that calico version is enough for upgrade" - name: "Check that calico version is enough for upgrade"
assert: assert:
@ -148,6 +149,7 @@
- calico_version_on_server.stdout is version('v2.6.5', '>=') - calico_version_on_server.stdout is version('v2.6.5', '>=')
msg: "Your version of calico is not fresh enough for upgrade. Minimum version v2.6.5" msg: "Your version of calico is not fresh enough for upgrade. Minimum version v2.6.5"
when: when:
- kube_network_plugin == 'calico'
- 'calico_version_on_server.stdout is defined' - 'calico_version_on_server.stdout is defined'
- 'calico_version_on_server.stdout != ""' - 'calico_version_on_server.stdout != ""'
- inventory_hostname == groups['kube-master'][0] - inventory_hostname == groups['kube-master'][0]

View file

@ -170,7 +170,6 @@
set_fact: set_fact:
kube_proxy_mode: 'ipvs' kube_proxy_mode: 'ipvs'
when: when:
- kubeadm_enabled
- kube_proxy_remove - kube_proxy_remove
tags: tags:
- facts - facts

View file

@ -1,2 +0,0 @@
---
kube_cert_group: kube-cert

View file

@ -1,15 +0,0 @@
---
- name: set secret_changed
command: /bin/true
notify:
- set secret_changed to true
- clear kubeconfig for root user
- name: set secret_changed to true
set_fact:
secret_changed: true
- name: clear kubeconfig for root user
file:
path: /root/.kube/config
state: absent

View file

@ -1 +0,0 @@
---

View file

@ -1,82 +0,0 @@
---
- name: "Check_certs | check if the certs have already been generated on first master"
find:
paths: "{{ kube_cert_dir }}"
patterns: "*.pem"
get_checksum: true
delegate_to: "{{groups['kube-master'][0]}}"
register: kubecert_master
run_once: true
- name: "Check_certs | Set default value for 'sync_certs', 'gen_certs', and 'secret_changed' to false"
set_fact:
sync_certs: false
gen_certs: false
secret_changed: false
- name: "Check_certs | Set 'gen_certs' to true"
set_fact:
gen_certs: true
when: "not item in kubecert_master.files|map(attribute='path') | list"
run_once: true
with_items: >-
['{{ kube_cert_dir }}/ca.pem',
'{{ kube_cert_dir }}/apiserver.pem',
'{{ kube_cert_dir }}/apiserver-key.pem',
'{{ kube_cert_dir }}/kube-scheduler.pem',
'{{ kube_cert_dir }}/kube-scheduler-key.pem',
'{{ kube_cert_dir }}/kube-controller-manager.pem',
'{{ kube_cert_dir }}/kube-controller-manager-key.pem',
'{{ kube_cert_dir }}/front-proxy-ca.pem',
'{{ kube_cert_dir }}/front-proxy-ca-key.pem',
'{{ kube_cert_dir }}/front-proxy-client.pem',
'{{ kube_cert_dir }}/front-proxy-client-key.pem',
'{{ kube_cert_dir }}/service-account-key.pem',
{% for host in groups['kube-master'] %}
'{{ kube_cert_dir }}/admin-{{ host }}.pem',
'{{ kube_cert_dir }}/admin-{{ host }}-key.pem'
{% if not loop.last %}{{','}}{% endif %}
{% endfor %},
{% for host in groups['k8s-cluster'] %}
'{{ kube_cert_dir }}/node-{{ host }}.pem',
'{{ kube_cert_dir }}/node-{{ host }}-key.pem',
'{{ kube_cert_dir }}/kube-proxy-{{ host }}.pem',
'{{ kube_cert_dir }}/kube-proxy-{{ host }}-key.pem'
{% if not loop.last %}{{','}}{% endif %}
{% endfor %}]
- name: "Check_certs | Set 'gen_master_certs' to true"
set_fact:
gen_master_certs: |-
{%- set gen = False -%}
{% set existing_certs = kubecert_master.files|map(attribute='path')|list|sort %}
{% for cert in ['apiserver.pem', 'apiserver-key.pem',
'kube-scheduler.pem','kube-scheduler-key.pem',
'kube-controller-manager.pem','kube-controller-manager-key.pem',
'front-proxy-ca.pem','front-proxy-ca-key.pem',
'front-proxy-client.pem','front-proxy-client-key.pem',
'service-account-key.pem'] -%}
{% set cert_file = "%s/%s.pem"|format(kube_cert_dir, cert) %}
{% if not cert_file in existing_certs -%}
{%- set gen = True -%}
{% endif -%}
{% endfor %}
{{ gen }}
run_once: true
- name: "Check_certs | Set 'gen_node_certs' to true"
set_fact:
gen_node_certs: |-
{
{% set existing_certs = kubecert_master.files|map(attribute='path')|list|sort %}
{% for host in groups['k8s-cluster'] -%}
{% set host_cert = "%s/node-%s-key.pem"|format(kube_cert_dir, host) %}
{% set kube_proxy_cert = "%s/kube-proxy-%s-key.pem"|format(kube_cert_dir, host) %}
{% if host_cert in existing_certs and kube_proxy_cert in existing_certs -%}
"{{ host }}": False,
{% else -%}
"{{ host }}": True,
{% endif -%}
{% endfor %}
}
run_once: true

View file

@ -1,227 +0,0 @@
---
- name: "Gen_certs | Create kubernetes config directory (on {{groups['kube-master'][0]}})"
file:
path: "{{ kube_config_dir }}"
state: directory
owner: kube
run_once: yes
delegate_to: "{{groups['kube-master'][0]}}"
when: gen_certs|default(false)
tags:
- kubelet
- k8s-secrets
- kube-controller-manager
- kube-apiserver
- apps
- network
- master
- node
- name: "Gen_certs | Create kubernetes script directory (on {{groups['kube-master'][0]}})"
file:
path: "{{ kube_script_dir }}"
state: directory
owner: kube
run_once: yes
delegate_to: "{{groups['kube-master'][0]}}"
when: gen_certs|default(false)
tags:
- k8s-secrets
- name: Gen_certs | write masters openssl config
template:
src: "openssl-master.conf.j2"
dest: "{{ kube_config_dir }}/openssl-master.conf"
run_once: yes
delegate_to: "{{ groups['kube-master']|first }}"
when: gen_certs|default(false)
- name: Gen_certs | write nodes openssl config
template:
src: "openssl-node.conf.j2"
dest: "{{ kube_config_dir }}/{{ inventory_hostname }}-openssl.conf"
delegate_to: "{{ groups['kube-master']|first }}"
when: gen_certs|default(false) and inventory_hostname in groups['k8s-cluster']
- name: Gen_certs | copy certs generation script
template:
src: "make-ssl.sh.j2"
dest: "{{ kube_script_dir }}/make-ssl.sh"
mode: 0700
run_once: yes
delegate_to: "{{groups['kube-master'][0]}}"
when: gen_certs|default(false)
- name: Gen_certs | run master cert generation script
command: "{{ kube_script_dir }}/make-ssl.sh -f {{ kube_config_dir }}/openssl-master.conf -d {{ kube_cert_dir }}"
environment:
- MASTERS: "{% for m in groups['kube-master'] %}
{% if gen_master_certs|default(false) %}
{{ m }}
{% endif %}
{% endfor %}"
delegate_to: "{{ groups['kube-master']|first }}"
run_once: true
when: gen_certs|default(false)
notify: set secret_changed
- name: Gen_certs | run nodes cert generation script
command: "{{ kube_script_dir }}/make-ssl.sh -f {{ kube_config_dir }}/{{ inventory_hostname }}-openssl.conf -d {{ kube_cert_dir }}"
environment:
- HOSTS: "{{ inventory_hostname }}"
delegate_to: "{{ groups['kube-master']|first }}"
when: gen_certs|default(false) and inventory_hostname in groups['k8s-cluster']
notify: set secret_changed
- set_fact:
all_master_certs: "['ca-key.pem',
'apiserver.pem',
'apiserver-key.pem',
'kube-scheduler.pem',
'kube-scheduler-key.pem',
'kube-controller-manager.pem',
'kube-controller-manager-key.pem',
'front-proxy-ca.pem',
'front-proxy-ca-key.pem',
'front-proxy-client.pem',
'front-proxy-client-key.pem',
'service-account-key.pem',
{% for node in groups['kube-master'] %}
'admin-{{ node }}.pem',
'admin-{{ node }}-key.pem',
{% endfor %}]"
my_master_certs: ['ca-key.pem',
'admin-{{ inventory_hostname }}.pem',
'admin-{{ inventory_hostname }}-key.pem',
'apiserver.pem',
'apiserver-key.pem',
'front-proxy-ca.pem',
'front-proxy-ca-key.pem',
'front-proxy-client.pem',
'front-proxy-client-key.pem',
'service-account-key.pem',
'kube-scheduler.pem',
'kube-scheduler-key.pem',
'kube-controller-manager.pem',
'kube-controller-manager-key.pem']
all_node_certs: "['ca.pem',
{% for node in groups['k8s-cluster'] %}
'node-{{ node }}.pem',
'node-{{ node }}-key.pem',
'kube-proxy-{{ node }}.pem',
'kube-proxy-{{ node }}-key.pem',
{% endfor %}]"
my_node_certs: ['ca.pem',
'node-{{ inventory_hostname }}.pem',
'node-{{ inventory_hostname }}-key.pem',
'kube-proxy-{{ inventory_hostname }}.pem',
'kube-proxy-{{ inventory_hostname }}-key.pem']
tags:
- facts
- name: "Check certs | check if a cert already exists on node"
find:
paths: "{{ kube_cert_dir }}"
patterns: "*.pem"
get_checksum: true
register: kubecert_node
when: inventory_hostname != groups['kube-master'][0]
- name: "Check_certs | Set 'sync_certs' to true on masters"
set_fact:
sync_certs: true
when: inventory_hostname in groups['kube-master'] and
inventory_hostname != groups['kube-master'][0] and
(not item in kubecert_node.files | map(attribute='path') | map("basename") | list or
kubecert_node.files | selectattr("path", "equalto", '%s/%s'|format(kube_cert_dir, item)) | map(attribute="checksum")|first|default('') != kubecert_master.files | selectattr("path", "equalto", '%s/%s'|format(kube_cert_dir, item)) | map(attribute="checksum")|first|default(''))
with_items:
- "{{ my_master_certs + all_node_certs }}"
- name: "Check_certs | Set 'sync_certs' to true on nodes"
set_fact:
sync_certs: true
when: inventory_hostname in groups['kube-node'] and
inventory_hostname != groups['kube-master'][0] and
(not item in kubecert_node.files | map(attribute='path') | map("basename") | list or
kubecert_node.files | selectattr("path", "equalto", '%s/%s'|format(kube_cert_dir, item)) | map(attribute="checksum")|first|default('') != kubecert_master.files | selectattr("path", "equalto", '%s/%s'|format(kube_cert_dir, item)) | map(attribute="checksum")|first|default(''))
with_items:
- "{{ my_node_certs }}"
- name: Gen_certs | Gather master certs
shell: "tar cfz - -C {{ kube_cert_dir }} -T /dev/stdin <<< {{ my_master_certs|join(' ') }} {{ all_node_certs|join(' ') }} | base64 --wrap=0"
args:
executable: /bin/bash
no_log: true
register: master_cert_data
check_mode: no
delegate_to: "{{groups['kube-master'][0]}}"
when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and
inventory_hostname != groups['kube-master'][0]
- name: Gen_certs | Gather node certs
shell: "tar cfz - -C {{ kube_cert_dir }} -T /dev/stdin <<< {{ my_node_certs|join(' ') }} | base64 --wrap=0"
args:
executable: /bin/bash
no_log: true
register: node_cert_data
check_mode: no
delegate_to: "{{groups['kube-master'][0]}}"
when: inventory_hostname in groups['kube-node'] and
sync_certs|default(false) and
inventory_hostname != groups['kube-master'][0]
# NOTE(mattymo): Use temporary file to copy master certs because we have a ~200k
# char limit when using shell command
# FIXME(mattymo): Use tempfile module in ansible 2.3
- name: Gen_certs | Prepare tempfile for unpacking certs on masters
command: mktemp /tmp/certsXXXXX.tar.gz
register: cert_tempfile
when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and
inventory_hostname != groups['kube-master'][0]
- name: Gen_certs | Write master certs to tempfile
copy:
content: "{{master_cert_data.stdout}}"
dest: "{{cert_tempfile.stdout}}"
owner: root
mode: "0600"
when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and
inventory_hostname != groups['kube-master'][0]
- name: Gen_certs | Unpack certs on masters
shell: "base64 -d < {{ cert_tempfile.stdout }} | tar xz -C {{ kube_cert_dir }}"
no_log: true
changed_when: false
check_mode: no
when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and
inventory_hostname != groups['kube-master'][0]
notify: set secret_changed
- name: Gen_certs | Cleanup tempfile on masters
file:
path: "{{cert_tempfile.stdout}}"
state: absent
when: inventory_hostname in groups['kube-master'] and sync_certs|default(false) and
inventory_hostname != groups['kube-master'][0]
- name: Gen_certs | Copy certs on nodes
shell: "base64 -d <<< '{{node_cert_data.stdout|quote}}' | tar xz -C {{ kube_cert_dir }}"
args:
executable: /bin/bash
no_log: true
changed_when: false
check_mode: no
when: inventory_hostname in groups['kube-node'] and
sync_certs|default(false) and
inventory_hostname != groups['kube-master'][0]
notify: set secret_changed
- name: Gen_certs | check certificate permissions
file:
path: "{{ kube_cert_dir }}"
group: "{{ kube_cert_group }}"
state: directory
owner: kube
mode: "u=rwX,g-rwx,o-rwx"
recurse: yes

View file

@ -1,109 +0,0 @@
---
- import_tasks: check-certs.yml
tags:
- k8s-secrets
- k8s-gen-certs
- facts
- name: Make sure the certificate directory exits
file:
path: "{{ kube_cert_dir }}"
state: directory
mode: o-rwx
group: "{{ kube_cert_group }}"
#
# The following directory creates make sure that the directories
# exist on the first master for cases where the first master isn't
# being run.
#
- name: "Gen_certs | Create kubernetes config directory (on {{groups['kube-master'][0]}})"
file:
path: "{{ kube_config_dir }}"
state: directory
owner: kube
run_once: yes
delegate_to: "{{groups['kube-master'][0]}}"
when: gen_certs|default(false)
tags:
- kubelet
- k8s-secrets
- kube-controller-manager
- kube-apiserver
- apps
- network
- master
- node
- name: "Gen_certs | Create kubernetes script directory (on {{groups['kube-master'][0]}})"
file:
path: "{{ kube_script_dir }}"
state: directory
owner: kube
run_once: yes
delegate_to: "{{groups['kube-master'][0]}}"
when: gen_certs|default(false)
tags:
- k8s-secrets
- include_tasks: "gen_certs_script.yml"
when:
- cert_management |d('script') == 'script'
tags:
- k8s-secrets
- k8s-gen-certs
- import_tasks: upd_ca_trust.yml
tags:
- k8s-secrets
- k8s-gen-certs
- name: "Gen_certs | Get certificate serials on kube masters"
shell: "openssl x509 -in {{ kube_cert_dir }}/{{ item }} -noout -serial | cut -d= -f2"
register: "master_certificate_serials"
changed_when: false
with_items:
- "admin-{{ inventory_hostname }}.pem"
- "apiserver.pem"
- "kube-controller-manager.pem"
- "kube-scheduler.pem"
when: inventory_hostname in groups['kube-master']
tags:
- master
- kubelet
- node
- name: "Gen_certs | set kube master certificate serial facts"
set_fact:
etcd_admin_cert_serial: "{{ master_certificate_serials.results[0].stdout|default() }}"
apiserver_cert_serial: "{{ master_certificate_serials.results[1].stdout|default() }}"
controller_manager_cert_serial: "{{ master_certificate_serials.results[2].stdout|default() }}"
scheduler_cert_serial: "{{ master_certificate_serials.results[3].stdout|default() }}"
when: inventory_hostname in groups['kube-master']
tags:
- master
- kubelet
- node
- name: "Gen_certs | Get certificate serials on kube nodes"
shell: "openssl x509 -in {{ kube_cert_dir }}/{{ item }} -noout -serial | cut -d= -f2"
register: "node_certificate_serials"
changed_when: false
with_items:
- "node-{{ inventory_hostname }}.pem"
- "kube-proxy-{{ inventory_hostname }}.pem"
when:
- inventory_hostname in groups['k8s-cluster']
tags:
- node
- kube-proxy
- name: "Gen_certs | set kube node certificate serial facts"
set_fact:
kubelet_cert_serial: "{{ node_certificate_serials.results[0].stdout|default() }}"
kube_proxy_cert_serial: "{{ node_certificate_serials.results[1].stdout|default() }}"
when: inventory_hostname in groups['k8s-cluster']
tags:
- kubelet
- node
- kube-proxy

View file

@ -1,30 +0,0 @@
---
- name: Gen_certs | target ca-certificates path
set_fact:
ca_cert_path: |-
{% if ansible_os_family == "Debian" -%}
/usr/local/share/ca-certificates/kube-ca.crt
{%- elif ansible_os_family == "RedHat" -%}
/etc/pki/ca-trust/source/anchors/kube-ca.crt
{%- elif ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] -%}
/etc/ssl/certs/kube-ca.pem
{%- elif ansible_os_family == "Suse" -%}
/etc/pki/trust/anchors/kube-ca.pem
{%- endif %}
tags:
- facts
- name: Gen_certs | add CA to trusted CA dir
copy:
src: "{{ kube_cert_dir }}/ca.pem"
dest: "{{ ca_cert_path }}"
remote_src: true
register: kube_ca_cert
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/SUSE/Container Linux by CoreOS)
command: update-ca-certificates
when: kube_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS", "Container Linux by CoreOS", "Suse"]
- name: Gen_certs | update ca-certificates (RedHat)
command: update-ca-trust extract
when: kube_ca_cert.changed and ansible_os_family == "RedHat"

View file

@ -1,151 +0,0 @@
#!/bin/bash
# Author: Smana smainklh@gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o pipefail
usage()
{
cat << EOF
Create self signed certificates
Usage : $(basename $0) -f <config> [-d <ssldir>]
-h | --help : Show this message
-f | --config : Openssl configuration file
-d | --ssldir : Directory where the certificates will be installed
Environmental variables MASTERS and HOSTS should be set to generate keys
for each host.
ex :
MASTERS=node1 HOSTS="node1 node2" $(basename $0) -f openssl.conf -d /srv/ssl
EOF
}
# Options parsing
while (($#)); do
case "$1" in
-h | --help) usage; exit 0;;
-f | --config) CONFIG=${2}; shift 2;;
-d | --ssldir) SSLDIR="${2}"; shift 2;;
*)
usage
echo "ERROR : Unknown option"
exit 3
;;
esac
done
if [ -z ${CONFIG} ]; then
echo "ERROR: the openssl configuration file is missing. option -f"
exit 1
fi
if [ -z ${SSLDIR} ]; then
SSLDIR="/etc/kubernetes/certs"
fi
tmpdir=$(mktemp -d /tmp/kubernetes_cacert.XXXXXX)
trap 'rm -rf "${tmpdir}"' EXIT
cd "${tmpdir}"
mkdir -p "${SSLDIR}"
# Root CA
if [ -e "$SSLDIR/ca-key.pem" ]; then
# Reuse existing CA
cp $SSLDIR/{ca.pem,ca-key.pem} .
else
openssl genrsa -out ca-key.pem {{certificates_key_size}} > /dev/null 2>&1
openssl req -x509 -new -nodes -key ca-key.pem -days {{certificates_duration}} -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1
fi
# Front proxy client CA
if [ -e "$SSLDIR/front-proxy-ca-key.pem" ]; then
# Reuse existing front proxy CA
cp $SSLDIR/{front-proxy-ca.pem,front-proxy-ca-key.pem} .
else
openssl genrsa -out front-proxy-ca-key.pem {{certificates_key_size}} > /dev/null 2>&1
openssl req -x509 -new -nodes -key front-proxy-ca-key.pem -days {{certificates_duration}} -out front-proxy-ca.pem -subj "/CN=front-proxy-ca" > /dev/null 2>&1
fi
gen_key_and_cert() {
local name=$1
local subject=$2
openssl genrsa -out ${name}-key.pem {{certificates_key_size}} > /dev/null 2>&1
openssl req -new -key ${name}-key.pem -out ${name}.csr -subj "${subject}" -config ${CONFIG} > /dev/null 2>&1
openssl x509 -req -in ${name}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${name}.pem -days {{certificates_duration}} -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
}
gen_key_and_cert_front_proxy() {
local name=$1
local subject=$2
openssl genrsa -out ${name}-key.pem {{certificates_key_size}} > /dev/null 2>&1
openssl req -new -key ${name}-key.pem -out ${name}.csr -subj "${subject}" -config ${CONFIG} > /dev/null 2>&1
openssl x509 -req -in ${name}.csr -CA front-proxy-ca.pem -CAkey front-proxy-ca-key.pem -CAcreateserial -out ${name}.pem -days {{certificates_duration}} -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1
}
# Admins
if [ -n "$MASTERS" ]; then
# service-account
# If --service-account-private-key-file was previously configured to use apiserver-key.pem then copy that to the new dedicated service-account signing key location to avoid disruptions
if [ -e "$SSLDIR/apiserver-key.pem" ] && ! [ -e "$SSLDIR/service-account-key.pem" ]; then
cp $SSLDIR/apiserver-key.pem $SSLDIR/service-account-key.pem
fi
# Generate dedicated service account signing key if one doesn't exist
if ! [ -e "$SSLDIR/apiserver-key.pem" ] && ! [ -e "$SSLDIR/service-account-key.pem" ]; then
openssl genrsa -out service-account-key.pem {{certificates_key_size}} > /dev/null 2>&1
fi
# kube-apiserver
# Generate only if we don't have existing ca and apiserver certs
if ! [ -e "$SSLDIR/ca-key.pem" ] || ! [ -e "$SSLDIR/apiserver-key.pem" ]; then
gen_key_and_cert "apiserver" "/CN=kube-apiserver"
cat ca.pem >> apiserver.pem
fi
# If any host requires new certs, just regenerate scheduler and controller-manager master certs
# kube-scheduler
gen_key_and_cert "kube-scheduler" "/CN=system:kube-scheduler"
# kube-controller-manager
gen_key_and_cert "kube-controller-manager" "/CN=system:kube-controller-manager"
# metrics aggregator
gen_key_and_cert_front_proxy "front-proxy-client" "/CN=front-proxy-client"
for host in $MASTERS; do
cn="${host}"
# admin
gen_key_and_cert "admin-${host}" "/CN=kube-admin-${cn}/O=system:masters"
done
fi
# Nodes
if [ -n "$HOSTS" ]; then
for host in $HOSTS; do
cn="${host}"
gen_key_and_cert "node-${host}" "/CN=system:node:${cn,,}/O=system:nodes"
done
fi
# system:node-proxier
if [ -n "$HOSTS" ]; then
for host in $HOSTS; do
# kube-proxy
gen_key_and_cert "kube-proxy-${host}" "/CN=system:kube-proxy/O=system:node-proxier"
done
fi
# Install certs
mv *.pem ${SSLDIR}/

View file

@ -1,42 +0,0 @@
{% set counter = {'dns': 6,'ip': 1,} %}{% macro increment(dct, key, inc=1)%}{% if dct.update({key: dct[key] + inc}) %} {% endif %}{% endmacro %}[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = kubernetes
DNS.2 = kubernetes.default
DNS.3 = kubernetes.default.svc
DNS.4 = kubernetes.default.svc.{{ dns_domain }}
DNS.5 = localhost
{% for host in groups['kube-master'] %}
DNS.{{ counter["dns"] }} = {{ host }}{{ increment(counter, 'dns') }}
{% endfor %}
{% if apiserver_loadbalancer_domain_name is defined %}
DNS.{{ counter["dns"] }} = {{ apiserver_loadbalancer_domain_name }}{{ increment(counter, 'dns') }}
{% endif %}
{% for host in groups['kube-master'] %}
{% if hostvars[host]['access_ip'] is defined %}
IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip'] }}{{ increment(counter, 'ip') }}
{% endif %}
IP.{{ counter["ip"] }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}{{ increment(counter, 'ip') }}
{% endfor %}
{% if kube_apiserver_ip is defined %}
IP.{{ counter["ip"] }} = {{ kube_apiserver_ip }}{{ increment(counter, 'ip') }}
{% endif %}
{% if loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined %}
IP.{{ counter["ip"] }} = {{ loadbalancer_apiserver.address }}{{ increment(counter, 'ip') }}
{% endif %}
{% if supplementary_addresses_in_ssl_keys is defined %}
{% for addr in supplementary_addresses_in_ssl_keys %}
{% if addr | ipaddr %}
IP.{{ counter["ip"] }} = {{ addr }}{{ increment(counter, 'ip') }}
{% else %}
DNS.{{ counter["dns"] }} = {{ addr }}{{ increment(counter, 'dns') }}
{% endif %}
{% endfor %}
{% endif %}
IP.{{ counter["ip"] }} = 127.0.0.1

View file

@ -1,20 +0,0 @@
{% set counter = {'dns': 6,'ip': 1,} %}{% macro increment(dct, key, inc=1)%}{% if dct.update({key: dct[key] + inc}) %} {% endif %}{% endmacro %}[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = kubernetes
DNS.2 = kubernetes.default
DNS.3 = kubernetes.default.svc
DNS.4 = kubernetes.default.svc.{{ dns_domain }}
DNS.5 = localhost
DNS.{{ counter["dns"] }} = {{ inventory_hostname }}{{ increment(counter, 'dns') }}
{% if hostvars[inventory_hostname]['access_ip'] is defined %}
IP.{{ counter["ip"] }} = {{ hostvars[inventory_hostname]['access_ip'] }}{{ increment(counter, 'ip') }}
{% endif %}
IP.{{ counter["ip"] }} = {{ hostvars[inventory_hostname]['ip'] | default(hostvars[inventory_hostname]['ansible_default_ipv4']['address']) }}{{ increment(counter, 'ip') }}
IP.{{ counter["ip"] }} = 127.0.0.1

View file

@ -226,14 +226,10 @@ docker_options: >-
# Settings for containerized control plane (etcd/kubelet/secrets) # Settings for containerized control plane (etcd/kubelet/secrets)
etcd_deployment_type: docker etcd_deployment_type: docker
kubelet_deployment_type: docker
cert_management: script cert_management: script
helm_deployment_type: host helm_deployment_type: host
# Enable kubeadm deployment
kubeadm_enabled: true
# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts # Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
kubeconfig_localhost: false kubeconfig_localhost: false
# Download kubectl onto the host that runs Ansible in {{ bin_dir }} # Download kubectl onto the host that runs Ansible in {{ bin_dir }}
@ -282,7 +278,7 @@ openstack_cacert: "{{ lookup('env','OS_CACERT') }}"
## the k8s cluster. Only 'AlwaysAllow', 'AlwaysDeny', 'Node' and ## the k8s cluster. Only 'AlwaysAllow', 'AlwaysDeny', 'Node' and
## 'RBAC' modes are tested. Order is important. ## 'RBAC' modes are tested. Order is important.
authorization_modes: ['Node', 'RBAC'] authorization_modes: ['Node', 'RBAC']
rbac_enabled: "{{ 'RBAC' in authorization_modes or kubeadm_enabled }}" rbac_enabled: "{{ 'RBAC' in authorization_modes }}"
# When enabled, API bearer tokens (including service account tokens) can be used to authenticate to the kubelets HTTPS endpoint # When enabled, API bearer tokens (including service account tokens) can be used to authenticate to the kubelets HTTPS endpoint
kubelet_authentication_token_webhook: true kubelet_authentication_token_webhook: true
@ -395,18 +391,8 @@ kube_apiserver_endpoint: |-
{%- endif %} {%- endif %}
kube_apiserver_insecure_endpoint: >- kube_apiserver_insecure_endpoint: >-
http://{{ kube_apiserver_insecure_bind_address | regex_replace('0\.0\.0\.0','127.0.0.1') }}:{{ kube_apiserver_insecure_port }} http://{{ kube_apiserver_insecure_bind_address | regex_replace('0\.0\.0\.0','127.0.0.1') }}:{{ kube_apiserver_insecure_port }}
kube_apiserver_client_cert: |- kube_apiserver_client_cert: "{{ kube_cert_dir }}/ca.crt"
{% if kubeadm_enabled -%} kube_apiserver_client_key: "{{ kube_cert_dir }}/ca.key"
{{ kube_cert_dir }}/ca.crt
{%- else -%}
{{ kube_cert_dir }}/apiserver.pem
{%- endif %}
kube_apiserver_client_key: |-
{% if kubeadm_enabled -%}
{{ kube_cert_dir }}/ca.key
{%- else -%}
{{ kube_cert_dir }}/apiserver-key.pem
{%- endif %}
# Set to true to deploy etcd-events cluster # Set to true to deploy etcd-events cluster
etcd_events_cluster_enabled: false etcd_events_cluster_enabled: false

View file

@ -2,7 +2,7 @@
- name: remove-node | Drain node except daemonsets resource - name: remove-node | Drain node except daemonsets resource
command: >- command: >-
{{ bin_dir }}/kubectl drain {{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf drain
--force --force
--ignore-daemonsets --ignore-daemonsets
--grace-period {{ drain_grace_period }} --grace-period {{ drain_grace_period }}

View file

@ -1,6 +1,6 @@
--- ---
- name: Uncordon node - name: Uncordon node
command: "{{ bin_dir }}/kubectl uncordon {{ inventory_hostname }}" command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf uncordon {{ inventory_hostname }}"
delegate_to: "{{ groups['kube-master'][0] }}" delegate_to: "{{ groups['kube-master'][0] }}"
when: when:
- needs_cordoning|default(false) - needs_cordoning|default(false)

View file

@ -15,11 +15,11 @@
dest: "{{ kubernetes_user_manifests_path }}/hostnameOverride-patch.json" dest: "{{ kubernetes_user_manifests_path }}/hostnameOverride-patch.json"
- name: Check current command for kube-proxy daemonset - name: Check current command for kube-proxy daemonset
shell: "{{bin_dir}}/kubectl get ds kube-proxy --namespace=kube-system -o jsonpath='{.spec.template.spec.containers[0].command}'" shell: "{{bin_dir}}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get ds kube-proxy --namespace=kube-system -o jsonpath='{.spec.template.spec.containers[0].command}'"
register: current_kube_proxy_command register: current_kube_proxy_command
- name: Apply hostnameOverride patch for kube-proxy daemonset - name: Apply hostnameOverride patch for kube-proxy daemonset
shell: "{{bin_dir}}/kubectl patch ds kube-proxy --namespace=kube-system --type=json -p \"$(cat hostnameOverride-patch.json)\"" shell: "{{bin_dir}}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf patch ds kube-proxy --namespace=kube-system --type=json -p \"$(cat hostnameOverride-patch.json)\""
args: args:
chdir: "{{ kubernetes_user_manifests_path }}" chdir: "{{ kubernetes_user_manifests_path }}"
register: patch_kube_proxy_command register: patch_kube_proxy_command
@ -43,11 +43,11 @@
# Due to https://github.com/kubernetes/kubernetes/issues/58212 we cannot rely on exit code for "kubectl patch" # Due to https://github.com/kubernetes/kubernetes/issues/58212 we cannot rely on exit code for "kubectl patch"
- name: Check current nodeselector for kube-proxy daemonset - name: Check current nodeselector for kube-proxy daemonset
shell: "{{bin_dir}}/kubectl get ds kube-proxy --namespace=kube-system -o jsonpath='{.spec.template.spec.nodeSelector.beta.kubernetes.io/os}'" shell: "{{bin_dir}}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get ds kube-proxy --namespace=kube-system -o jsonpath='{.spec.template.spec.nodeSelector.beta.kubernetes.io/os}'"
register: current_kube_proxy_state register: current_kube_proxy_state
- name: Apply nodeselector patch for kube-proxy daemonset - name: Apply nodeselector patch for kube-proxy daemonset
shell: "{{bin_dir}}/kubectl patch ds kube-proxy --namespace=kube-system --type=strategic -p \"$(cat nodeselector-os-linux-patch.json)\"" shell: "{{bin_dir}}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf patch ds kube-proxy --namespace=kube-system --type=strategic -p \"$(cat nodeselector-os-linux-patch.json)\""
args: args:
chdir: "{{ kubernetes_user_manifests_path }}" chdir: "{{ kubernetes_user_manifests_path }}"
register: patch_kube_proxy_state register: patch_kube_proxy_state

View file

@ -13,19 +13,6 @@
vars: vars:
ansible_connection: local ansible_connection: local
- hosts: localhost
tasks:
- name: deploy warning for non kubeadm
debug:
msg: "DEPRECATION: non-kubeadm deployment is deprecated from v2.9. Will be removed in next release."
when: not kubeadm_enabled and not skip_non_kubeadm_warning
- name: deploy cluster for non kubeadm
pause:
prompt: "Are you sure you want to deploy cluster using the deprecated non-kubeadm mode."
echo: no
when: not kubeadm_enabled and not skip_non_kubeadm_warning
- hosts: bastion[0] - hosts: bastion[0]
gather_facts: False gather_facts: False
roles: roles:
@ -66,6 +53,6 @@
- { role: download, tags: download, when: "not skip_downloads" } - { role: download, tags: download, when: "not skip_downloads" }
- { role: etcd, tags: etcd, etcd_cluster_setup: false } - { role: etcd, tags: etcd, etcd_cluster_setup: false }
- { role: kubernetes/node, tags: node } - { role: kubernetes/node, tags: node }
- { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" } - { role: kubernetes/kubeadm, tags: kubeadm }
- { role: network_plugin, tags: network } - { role: network_plugin, tags: network }
environment: "{{proxy_env}}" environment: "{{proxy_env}}"

View file

@ -18,6 +18,21 @@
k8s-{{test_name}}-1,k8s-{{test_name}}-2 k8s-{{test_name}}-1,k8s-{{test_name}}-2
{%- endif -%} {%- endif -%}
- name: stop gce instances
gce:
instance_names: "{{instance_names}}"
image: "{{ cloud_image | default(omit) }}"
service_account_email: "{{ gce_service_account_email }}"
pem_file: "{{ gce_pem_file | default(omit)}}"
credentials_file: "{{gce_credentials_file | default(omit)}}"
project_id: "{{ gce_project_id }}"
zone: "{{cloud_region | default('europe-west1-b')}}"
state: 'stopped'
async: 120
poll: 3
retries: 3
register: gce
- name: delete gce instances - name: delete gce instances
gce: gce:
instance_names: "{{instance_names}}" instance_names: "{{instance_names}}"

View file

@ -6,7 +6,7 @@ mode: ha
# Deployment settings # Deployment settings
kube_network_plugin: flannel kube_network_plugin: flannel
kubeadm_enabled: false kubeadm_enabled: true
skip_non_kubeadm_warning: true skip_non_kubeadm_warning: true
deploy_netchecker: true deploy_netchecker: true
dns_min_replicas: 1 dns_min_replicas: 1

View file

@ -9,4 +9,3 @@
password: "{{ lookup('password', credentials_dir + '/kube_user.creds length=15 chars=ascii_letters,digits') }}" password: "{{ lookup('password', credentials_dir + '/kube_user.creds length=15 chars=ascii_letters,digits') }}"
validate_certs: no validate_certs: no
status_code: 200,401 status_code: 200,401
when: not kubeadm_enabled|default(false)

View file

@ -13,19 +13,6 @@
vars: vars:
ansible_connection: local ansible_connection: local
- hosts: localhost
tasks:
- name: deploy warning for non kubeadm
debug:
msg: "DEPRECATION: non-kubeadm deployment is deprecated from v2.9. Will be removed in next release."
when: not kubeadm_enabled and not skip_non_kubeadm_warning
- name: deploy cluster for non kubeadm
pause:
prompt: "Are you sure you want to deploy cluster using the deprecated non-kubeadm mode."
echo: no
when: not kubeadm_enabled and not skip_non_kubeadm_warning
- hosts: bastion[0] - hosts: bastion[0]
gather_facts: False gather_facts: False
roles: roles:
@ -109,7 +96,7 @@
- { role: kubespray-defaults} - { role: kubespray-defaults}
- { role: upgrade/pre-upgrade, tags: pre-upgrade } - { role: upgrade/pre-upgrade, tags: pre-upgrade }
- { role: kubernetes/node, tags: node } - { role: kubernetes/node, tags: node }
- { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" } - { role: kubernetes/kubeadm, tags: kubeadm }
- { role: upgrade/post-upgrade, tags: post-upgrade } - { role: upgrade/post-upgrade, tags: post-upgrade }
environment: "{{proxy_env}}" environment: "{{proxy_env}}"