From 6fa44458db80dc6929a9b64cceee2d6a59c67664 Mon Sep 17 00:00:00 2001 From: Bogdan Dobrelya Date: Tue, 18 Oct 2016 15:03:07 +0200 Subject: [PATCH] Add kubeadm init, join * Implement kubeadm init,join for Debian OS family (PoC) with the external etcd option set. * Make certs/tokens management optional and depending on the use_kubeadm var * Do not delegate static pods and config management to kubeadm and remove produced artifacts to be regenerated by ansible. * Add new set of system pods manifests templates based on that kubeadm produces by default and parametrize it by ansible vars * Fix apiserver container logging to follow 12-factor apps and scheduler/controller-manager logging setup Signed-off-by: Bogdan Dobrelya --- cluster.yml | 2 + inventory/group_vars/all.yml | 5 + roles/download/defaults/main.yml | 26 +++-- .../kube-join/tasks/kubeadm-system-pods.yml | 8 ++ roles/kubernetes/kube-join/tasks/main.yml | 18 ++++ .../manifests/kubeadm-kube-proxy.json.j2 | 46 +++++++++ roles/kubernetes/master/tasks/kube-init.yml | 25 +++++ .../master/tasks/kubeadm-system-pods.yml | 21 ++++ roles/kubernetes/master/tasks/main.yml | 43 ++------ roles/kubernetes/master/tasks/system-pods.yml | 38 +++++++ .../manifests/kube-apiserver.manifest.j2 | 10 +- .../manifests/kubeadm-kube-apiserver.json.j2 | 98 +++++++++++++++++++ .../kubeadm-kube-controller-manager.json.j2 | 88 +++++++++++++++++ .../manifests/kubeadm-kube-scheduler.json.j2 | 45 +++++++++ roles/kubernetes/node/meta/main.yml | 3 + roles/kubernetes/node/tasks/install.yml | 7 ++ roles/kubernetes/node/tasks/main.yml | 2 + .../secrets/files/kube-gen-token.sh | 2 +- .../kubernetes/secrets/tasks/check-tokens.yml | 2 +- 19 files changed, 438 insertions(+), 51 deletions(-) create mode 100644 roles/kubernetes/kube-join/tasks/kubeadm-system-pods.yml create mode 100644 roles/kubernetes/kube-join/tasks/main.yml create mode 100644 roles/kubernetes/kube-join/templates/manifests/kubeadm-kube-proxy.json.j2 create mode 100644 roles/kubernetes/master/tasks/kube-init.yml create mode 100644 roles/kubernetes/master/tasks/kubeadm-system-pods.yml create mode 100644 roles/kubernetes/master/tasks/system-pods.yml create mode 100644 roles/kubernetes/master/templates/manifests/kubeadm-kube-apiserver.json.j2 create mode 100644 roles/kubernetes/master/templates/manifests/kubeadm-kube-controller-manager.json.j2 create mode 100644 roles/kubernetes/master/templates/manifests/kubeadm-kube-scheduler.json.j2 diff --git a/cluster.yml b/cluster.yml index 208382354..30e704eef 100644 --- a/cluster.yml +++ b/cluster.yml @@ -29,6 +29,8 @@ - hosts: k8s-cluster roles: + - { role: kubernetes/preinstall, tags: preinstall } + - { role: kubernetes/kube-join, tags: kube-join } - { role: dnsmasq, tags: dnsmasq } - hosts: kube-master[0] diff --git a/inventory/group_vars/all.yml b/inventory/group_vars/all.yml index cbf2e63a2..bf07543e4 100644 --- a/inventory/group_vars/all.yml +++ b/inventory/group_vars/all.yml @@ -18,6 +18,11 @@ retry_stagger: 5 # cert files to. Not really changable... kube_cert_group: kube-cert +# Enables native tooling and delegates certs management to it +use_kubeadm: true +# Initial cluster token for kubeadm +init_token: fa7ed3.c5debcef8dd01970 + # Cluster Loglevel configuration kube_log_level: 2 diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index cbe053fa0..12780d6ac 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -6,6 +6,9 @@ download_run_once: False # Versions kube_version: v1.4.3 +# TODO(bogdando) figure out a better way to download kubeadm binary for all supported OS types +# See https://packages.cloud.google.com/apt/dists/kubernetes-xenial/main/binary-amd64/Packages +kubeadm_version: 1.5.0-alpha.0-1534-gcf7301f-00 etcd_version: v3.0.6 #TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults @@ -17,17 +20,19 @@ flannel_version: v0.6.2 flannel_server_helper_version: 0.1 pod_infra_version: 3.0 -# Download URL's -etcd_download_url: "https://storage.googleapis.com/kargo/{{etcd_version}}_etcd" -calico_cni_download_url: "https://storage.googleapis.com/kargo/{{calico_cni_version}}_calico-cni-plugin" -calico_cni_ipam_download_url: "https://storage.googleapis.com/kargo/{{calico_cni_version}}_calico-cni-plugin-ipam" -weave_download_url: "https://storage.googleapis.com/kargo/{{weave_version}}_weave" - # Checksums calico_cni_checksum: "9cab29764681e9d80da826e4b2cd10841cc01a749e0018867d96dd76a4691548" calico_cni_ipam_checksum: "09d076b15b791956efee91646e47fdfdcf382db16082cef4f542a9fff7bae172" weave_checksum: "9bf9d6e5a839e7bcbb28cc00c7acae9d09284faa3e7a3720ca9c2b9e93c68580" etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485" +kubeadm_checksum: "9af7c4e3a0daa4f8b2463c1bd39fe28d6b68535042777bca89c917a0e4ebdbf7" + +# Download URL's +etcd_download_url: "https://storage.googleapis.com/kargo/{{etcd_version}}_etcd" +calico_cni_download_url: "https://storage.googleapis.com/kargo/{{calico_cni_version}}_calico-cni-plugin" +calico_cni_ipam_download_url: "https://storage.googleapis.com/kargo/{{calico_cni_version}}_calico-cni-plugin-ipam" +weave_download_url: "https://storage.googleapis.com/kargo/{{weave_version}}_weave" +kubeadm_download_url: "http://apt.kubernetes.io/pool/kubeadm_{{kubeadm_version}}_amd64_{{kubeadm_checksum}}.deb" # Containers # Possible values: host, docker @@ -48,6 +53,15 @@ pod_infra_image_repo: "gcr.io/google_containers/pause-amd64" pod_infra_image_tag: "{{ pod_infra_version }}" downloads: + kubeadm: + dest: kubeadm/deb/kubeadm.deb + version: "{{kubeadm_version}}" + sha256: "{{ kubeadm_checksum }}" + source_url: "{{ kubeadm_download_url }}" + url: "{{ kubeadm_download_url }}" + owner: "root" + mode: "0755" + enabled: "{{ use_kubeadm|bool }}" calico_cni_plugin: dest: calico/bin/calico version: "{{calico_cni_version}}" diff --git a/roles/kubernetes/kube-join/tasks/kubeadm-system-pods.yml b/roles/kubernetes/kube-join/tasks/kubeadm-system-pods.yml new file mode 100644 index 000000000..04cd3399e --- /dev/null +++ b/roles/kubernetes/kube-join/tasks/kubeadm-system-pods.yml @@ -0,0 +1,8 @@ +--- +- set_fact: + kubeadm_certs_dir: "{{ kube_config_dir }}/pki" + +- name: Write kube-proxy manifest + template: + src: manifests/kubeadm-kube-proxy.json.j2 + dest: "{{ kube_manifest_dir }}/kube-proxy.manifest" diff --git a/roles/kubernetes/kube-join/tasks/main.yml b/roles/kubernetes/kube-join/tasks/main.yml new file mode 100644 index 000000000..baa105371 --- /dev/null +++ b/roles/kubernetes/kube-join/tasks/main.yml @@ -0,0 +1,18 @@ +--- +# TODO(bogdando) kubeadm init/join/taint on RedHat/CoreOS +- name: join workers to the initialized kubernetes cluster + command: "kubeadm join --token {{ init_token }} {{ first_kube_master }}" + when: (ansible_os_family in ["Debian"]) and ({{ use_kubeadm|bool }}) + +# NOTE(bogdando) we want ansible to control manifests instead of kubeadm, yet +- name: purge kube manifests created by kubeadm + file: path="{{ kube_manifest_dir }}/{{ item }}.json" state=absent + with_items: [ "kube-proxy" ] + +- name: allow all nodes to be picked by schedulers + command: "kubectl taint nodes --all dedicated-" + when: (ansible_os_family in ["Debian"]) and ({{ use_kubeadm|bool }}) + ignore_errors: true + +- include: kubeadm-system-pods.yml + when: (ansible_os_family in ["Debian"]) and ({{ use_kubeadm|bool }}) diff --git a/roles/kubernetes/kube-join/templates/manifests/kubeadm-kube-proxy.json.j2 b/roles/kubernetes/kube-join/templates/manifests/kubeadm-kube-proxy.json.j2 new file mode 100644 index 000000000..7abffe053 --- /dev/null +++ b/roles/kubernetes/kube-join/templates/manifests/kubeadm-kube-proxy.json.j2 @@ -0,0 +1,46 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kube-proxy + namespace: kube-system +spec: + hostNetwork: true + containers: + - name: kube-proxy + image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} + command: + - /hyperkube + - proxy + - --v={{ kube_log_level | default('2') }} + - --master={{ kube_apiserver_endpoint }} +{% if not is_kube_master %} + - --kubeconfig=/etc/kubernetes/node-kubeconfig.yaml +{% endif %} + - --bind-address={{ ip | default(ansible_default_ipv4.address) }} + - --cluster-cidr={{ kube_pods_subnet }} + - --proxy-mode={{ kube_proxy_mode }} +{% if kube_proxy_masquerade_all and kube_proxy_mode == "iptables" %} + - --masquerade-all +{% endif %} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/ssl/certs + name: ssl-certs-host + readOnly: true + - mountPath: /etc/kubernetes/node-kubeconfig.yaml + name: "kubeconfig" + readOnly: true + - mountPath: /etc/kubernetes/ssl + name: "etc-kube-ssl" + readOnly: true + volumes: + - name: ssl-certs-host + hostPath: + path: /usr/share/ca-certificates + - name: "kubeconfig" + hostPath: + path: "/etc/kubernetes/node-kubeconfig.yaml" + - name: "etc-kube-ssl" + hostPath: + path: "/etc/kubernetes/ssl" diff --git a/roles/kubernetes/master/tasks/kube-init.yml b/roles/kubernetes/master/tasks/kube-init.yml new file mode 100644 index 000000000..c3677d80e --- /dev/null +++ b/roles/kubernetes/master/tasks/kube-init.yml @@ -0,0 +1,25 @@ +--- +- name: check for bootstrap + command: kubectl get nodes + register: kube_bootstrap + ignore_errors: true + +- name: initialize the kubernetes master + command: "kubeadm init --token {{ init_token }} \ + --api-advertise-addresses {{ kube_apiserver_access_address }} \ + --service-cidr {{ kube_service_addresses }} \ + --external-etcd-endpoints {{ etcd_access_addresses }} \ + --service-dns-domain {{ cluster_name }} \ + {% if cloud_provider is defined %}--cloud-provider {{ cloud_provider }}{% endif %}" + when: "'localhost:8080 was refused' in kube_bootstrap.stderr" + register: initout + ignore_errors: true + +# NOTE(bogdando) we want ansible to control configs/manifests instead of kubeadm, yet +- name: purge kube manifests created by kubeadm + file: path="{{ kube_manifest_dir }}/{{ item }}.json" state=absent + with_items: [ "kube-controller-manager", "kube-apiserver", "kube-scheduler" ] + +- name: purge kube configs created by kubeadm + file: path="{{ kube_config_dir }}/{{ item }}.conf" state=absent + with_items: [ "kubelet", "admin" ] diff --git a/roles/kubernetes/master/tasks/kubeadm-system-pods.yml b/roles/kubernetes/master/tasks/kubeadm-system-pods.yml new file mode 100644 index 000000000..bf8e6310c --- /dev/null +++ b/roles/kubernetes/master/tasks/kubeadm-system-pods.yml @@ -0,0 +1,21 @@ +--- +- set_fact: + kubeadm_certs_dir: "{{ kube_config_dir }}/pki" + +- name: Write kube-apiserver manifest + template: + src: manifests/kubeadm-kube-apiserver.json.j2 + dest: "{{ kube_manifest_dir }}/kube-apiserver.manifest" + notify: Master | wait for the apiserver to be running + +- name: Write kube-controller-manager manifest + template: + src: manifests/kubeadm-kube-controller-manager.json.j2 + dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest" + notify: Master | wait for kube-controller-manager + +- name: Write kube-scheduler manifest + template: + src: manifests/kubeadm-kube-scheduler.json.j2 + dest: "{{ kube_manifest_dir }}/kube-scheduler.manifest" + notify: Master | wait for kube-scheduler diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml index 419be1f5a..b91521bde 100644 --- a/roles/kubernetes/master/tasks/main.yml +++ b/roles/kubernetes/master/tasks/main.yml @@ -15,40 +15,13 @@ delay: "{{ retry_stagger | random + 3 }}" changed_when: false -- name: Write kube-apiserver manifest - template: - src: manifests/kube-apiserver.manifest.j2 - dest: "{{ kube_manifest_dir }}/kube-apiserver.manifest" - notify: Master | wait for the apiserver to be running +- include: system-pods.yml + when: ({{ not use_kubeadm|bool }}) -- meta: flush_handlers -# Create kube-system namespace -- name: copy 'kube-system' namespace manifest - copy: src=namespace.yml dest=/etc/kubernetes/kube-system-ns.yml - run_once: yes - when: inventory_hostname == groups['kube-master'][0] +# TODO(bogdando) kubeadm init/update pods on RedHat/CoreOS +- include: kube-init.yml + when: (ansible_os_family in ["Debian"]) and ({{ use_kubeadm|bool }}) and (inventory_hostname == groups['kube-master'][0]) + run_once: true -- name: Check if kube-system exists - command: "{{ bin_dir }}/kubectl get ns kube-system" - register: 'kubesystem' - changed_when: False - failed_when: False - run_once: yes - -- name: Create 'kube-system' namespace - command: "{{ bin_dir }}/kubectl create -f /etc/kubernetes/kube-system-ns.yml" - changed_when: False - when: kubesystem|failed and inventory_hostname == groups['kube-master'][0] - -# Write other manifests -- name: Write kube-controller-manager manifest - template: - src: manifests/kube-controller-manager.manifest.j2 - dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest" - notify: Master | wait for kube-controller-manager - -- name: Write kube-scheduler manifest - template: - src: manifests/kube-scheduler.manifest.j2 - dest: "{{ kube_manifest_dir }}/kube-scheduler.manifest" - notify: Master | wait for kube-scheduler +- include: kubeadm-system-pods.yml + when: (ansible_os_family in ["Debian"]) and ({{ use_kubeadm|bool }}) diff --git a/roles/kubernetes/master/tasks/system-pods.yml b/roles/kubernetes/master/tasks/system-pods.yml new file mode 100644 index 000000000..71f8fedd8 --- /dev/null +++ b/roles/kubernetes/master/tasks/system-pods.yml @@ -0,0 +1,38 @@ +--- +- name: Write kube-apiserver manifest + template: + src: manifests/kube-apiserver.manifest.j2 + dest: "{{ kube_manifest_dir }}/kube-apiserver.manifest" + notify: Master | wait for the apiserver to be running + +- meta: flush_handlers +# Create kube-system namespace +- name: copy 'kube-system' namespace manifest + copy: src=namespace.yml dest=/etc/kubernetes/kube-system-ns.yml + run_once: yes + when: inventory_hostname == groups['kube-master'][0] + +- name: Check if kube-system exists + command: "{{ bin_dir }}/kubectl get ns kube-system" + register: 'kubesystem' + changed_when: False + failed_when: False + run_once: yes + +- name: Create 'kube-system' namespace + command: "{{ bin_dir }}/kubectl create -f /etc/kubernetes/kube-system-ns.yml" + changed_when: False + when: kubesystem|failed and inventory_hostname == groups['kube-master'][0] + +# Write other manifests +- name: Write kube-controller-manager manifest + template: + src: manifests/kube-controller-manager.manifest.j2 + dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest" + notify: Master | wait for kube-controller-manager + +- name: Write kube-scheduler manifest + template: + src: manifests/kube-scheduler.manifest.j2 + dest: "{{ kube_manifest_dir }}/kube-scheduler.manifest" + notify: Master | wait for kube-scheduler diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index ddd6f2085..9ce1433a7 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -19,10 +19,10 @@ spec: - --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,ServiceAccount,ResourceQuota - --service-cluster-ip-range={{ kube_service_addresses }} - --client-ca-file={{ kube_cert_dir }}/ca.pem - - --basic-auth-file={{ kube_users_dir }}/known_users.csv + - --basic-auth-file={{ kube_users_dir }}/tokens.csv - --tls-cert-file={{ kube_cert_dir }}/apiserver.pem - --tls-private-key-file={{ kube_cert_dir }}/apiserver-key.pem - - --token-auth-file={{ kube_token_dir }}/known_tokens.csv + - --token-auth-file={{ kube_token_dir }}/tokens.csv - --service-account-key-file={{ kube_cert_dir }}/apiserver-key.pem - --secure-port={{ kube_apiserver_port }} - --insecure-port={{ kube_apiserver_insecure_port }} @@ -42,7 +42,6 @@ spec: {% elif cloud_provider is defined and cloud_provider == "aws" %} - --cloud-provider={{ cloud_provider }} {% endif %} - - 2>&1 >> {{ kube_log_dir }}/kube-apiserver.log volumeMounts: - mountPath: {{ kube_config_dir }} name: kubernetes-config @@ -50,8 +49,6 @@ spec: - mountPath: /etc/ssl/certs name: ssl-certs-host readOnly: true - - mountPath: /var/log/ - name: logfile volumes: - hostPath: path: {{ kube_config_dir }} @@ -59,6 +56,3 @@ spec: - hostPath: path: /etc/ssl/certs/ name: ssl-certs-host - - hostPath: - path: /var/log/ - name: logfile diff --git a/roles/kubernetes/master/templates/manifests/kubeadm-kube-apiserver.json.j2 b/roles/kubernetes/master/templates/manifests/kubeadm-kube-apiserver.json.j2 new file mode 100644 index 000000000..acc1f8d6f --- /dev/null +++ b/roles/kubernetes/master/templates/manifests/kubeadm-kube-apiserver.json.j2 @@ -0,0 +1,98 @@ +{ + "kind": "Pod", + "apiVersion": "v1", + "metadata": { + "name": "kube-apiserver", + "namespace": "kube-system", + "creationTimestamp": null, + "labels": { + "component": "kube-apiserver", + "tier": "control-plane" + } + }, + "spec": { + "volumes": [ + { + "name": "certs", + "hostPath": { + "path": "/etc/ssl/certs" + } + }, + { + "name": "pki", + "hostPath": { + "path": "{{ kube_config_dir }}" + } + } + ], + "containers": [ + { + "name": "kube-apiserver", + "image": "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}", + "command": [ + "/hyperkube", + "apiserver", + "--v={{ kube_log_level | default('2') }}", + "--advertise-address={{ ip | default(ansible_default_ipv4.address) }}", + "--apiserver-count={{ kube_apiserver_count }}", + "--insecure-bind-address={{ kube_apiserver_insecure_bind_address }}", + "--etcd-servers={{ etcd_access_endpoint }}", + "--etcd-quorum-read=true", + "--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota", + "--service-cluster-ip-range={{ kube_service_addresses }}", + "--service-account-key-file={{ kubeadm_certs_dir }}/apiserver-key.pem", + "--client-ca-file={{ kubeadm_certs_dir }}/ca.pem", + "--tls-cert-file={{ kubeadm_certs_dir }}/apiserver.pem", + "--tls-private-key-file={{ kubeadm_certs_dir }}/apiserver-key.pem", + "--token-auth-file={{ kubeadm_certs_dir }}/tokens.csv", + "--basic-auth-file={{ kubeadm_certs_dir }}/tokens.csv", + "--secure-port={{ kube_apiserver_port }}", +{% if kube_api_runtime_config is defined %} +{% for conf in kube_api_runtime_config %} + "--runtime-config={{ conf }}", +{% endfor %} +{% endif %} +{% if enable_network_policy is defined and enable_network_policy == True %} + "--runtime-config=extensions/v1beta1/networkpolicies=true", +{% endif %} + "--v={{ kube_log_level | default('2') }}", + "--allow-privileged=true", +{% if cloud_provider is defined and cloud_provider == "openstack" %} + "--cloud-provider={{ cloud_provider }}", + "--cloud-config={{ kube_config_dir }}/cloud_config", +{% elif cloud_provider is defined and cloud_provider == "aws" %} + "--cloud-provider={{ cloud_provider }}," +{% endif %} + "--insecure-port={{ kube_apiserver_insecure_port }}" + ], + "resources": { + "requests": { + "cpu": "250m" + } + }, + "volumeMounts": [ + { + "name": "certs", + "mountPath": "/etc/ssl/certs" + }, + { + "name": "pki", + "readOnly": true, + "mountPath": "{{ kube_config_dir }}" + } + ], + "livenessProbe": { + "httpGet": { + "path": "/healthz", + "port": {{ kube_apiserver_insecure_port }}, + "host": "{{ kube_apiserver_insecure_bind_address }}" + }, + "initialDelaySeconds": 15, + "timeoutSeconds": 15 + } + } + ], + "hostNetwork": true + }, + "status": {} +} diff --git a/roles/kubernetes/master/templates/manifests/kubeadm-kube-controller-manager.json.j2 b/roles/kubernetes/master/templates/manifests/kubeadm-kube-controller-manager.json.j2 new file mode 100644 index 000000000..790f56e66 --- /dev/null +++ b/roles/kubernetes/master/templates/manifests/kubeadm-kube-controller-manager.json.j2 @@ -0,0 +1,88 @@ +{ + "kind": "Pod", + "apiVersion": "v1", + "metadata": { + "name": "kube-controller-manager", + "namespace": "kube-system", + "creationTimestamp": null, + "labels": { + "component": "kube-controller-manager", + "tier": "control-plane" + } + }, + "spec": { + "volumes": [ +{% if cloud_provider is defined and cloud_provider == "openstack" %} + { + "name": "cloudconfig", + "hostPath": { + "path": "{{ kube_config_dir }}/cloud_config" + } + }, +{% endif %} + { + "name": "pki", + "hostPath": { + "path": "{{ kube_config_dir }}" + } + } + ], + "containers": [ + { + "name": "kube-controller-manager", + "image": "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}", + "command": [ + "/hyperkube", + "controller-manager", + "--v={{ kube_log_level | default('2') }}", + "--address=127.0.0.1", + "--leader-elect", + "--master={{ kube_apiserver_endpoint }}", + "--enable-hostpath-provisioner={{ kube_hostpath_dynamic_provisioner }}", + "--cluster-name=kubernetes", + "--root-ca-file={{ kubeadm_certs_dir }}/ca.pem", + "--service-account-private-key-file={{ kubeadm_certs_dir }}/apiserver-key.pem", + "--cluster-signing-cert-file={{ kubeadm_certs_dir }}/ca.pem", + "--cluster-signing-key-file={{ kubeadm_certs_dir }}/ca-key.pem", +{% if cloud_provider is defined and cloud_provider == "openstack" %} + "--cloud-provider={{cloud_provider}}", + "--cloud-config={{ kube_config_dir }}/cloud_config", +{% elif cloud_provider is defined and cloud_provider == "aws" %} + "--cloud-provider={{cloud_provider}}", +{% endif %} + "--insecure-experimental-approve-all-kubelet-csrs-for-group=system:kubelet-bootstrap" + ], + "resources": { + "requests": { + "cpu": "200m" + } + }, + "volumeMounts": [ +{% if cloud_provider is defined and cloud_provider == "openstack" %} + { + "name": "cloudconfig", + "readOnly": true, + "mountPath": "{{ kube_config_dir }}/cloud_config" + }, +{% endif %} + { + "name": "pki", + "readOnly": true, + "mountPath": "{{ kube_config_dir }}" + } + ], + "livenessProbe": { + "httpGet": { + "path": "/healthz", + "port": 10252, + "host": "127.0.0.1" + }, + "initialDelaySeconds": 15, + "timeoutSeconds": 15 + } + } + ], + "hostNetwork": true + }, + "status": {} +} diff --git a/roles/kubernetes/master/templates/manifests/kubeadm-kube-scheduler.json.j2 b/roles/kubernetes/master/templates/manifests/kubeadm-kube-scheduler.json.j2 new file mode 100644 index 000000000..c4438e809 --- /dev/null +++ b/roles/kubernetes/master/templates/manifests/kubeadm-kube-scheduler.json.j2 @@ -0,0 +1,45 @@ +{ + "kind": "Pod", + "apiVersion": "v1", + "metadata": { + "name": "kube-scheduler", + "namespace": "kube-system", + "creationTimestamp": null, + "labels": { + "component": "kube-scheduler", + "tier": "control-plane" + } + }, + "spec": { + "containers": [ + { + "name": "kube-scheduler", + "image": "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}", + "command": [ + "/hyperkube", + "scheduler", + "--v={{ kube_log_level | default('2') }}", + "--address=127.0.0.1", + "--leader-elect", + "--master={{ kube_apiserver_endpoint }}" + ], + "resources": { + "requests": { + "cpu": "100m" + } + }, + "livenessProbe": { + "httpGet": { + "path": "/healthz", + "port": 10251, + "host": "127.0.0.1" + }, + "initialDelaySeconds": 15, + "timeoutSeconds": 15 + } + } + ], + "hostNetwork": true + }, + "status": {} +} diff --git a/roles/kubernetes/node/meta/main.yml b/roles/kubernetes/node/meta/main.yml index 9c52b2d80..926232c8c 100644 --- a/roles/kubernetes/node/meta/main.yml +++ b/roles/kubernetes/node/meta/main.yml @@ -2,6 +2,9 @@ dependencies: - role: download file: "{{ downloads.hyperkube }}" + - role: download + file: "{{ downloads.kubeadm }}" - role: download file: "{{ downloads.pod_infra }}" - role: kubernetes/secrets + when: ({{ not use_kubeadm|bool }}) diff --git a/roles/kubernetes/node/tasks/install.yml b/roles/kubernetes/node/tasks/install.yml index 6b5fe5bb4..53bc0ca75 100644 --- a/roles/kubernetes/node/tasks/install.yml +++ b/roles/kubernetes/node/tasks/install.yml @@ -17,3 +17,10 @@ - name: install | Install kubelet launch script template: src=kubelet-container.j2 dest="{{ bin_dir }}/kubelet" owner=kube mode=0755 backup=yes notify: restart kubelet + +# TODO(bogdando) rework to consume the kubeadm from hypercube. +# This hack works for debian OS family only and complements the downloads role. +- name: install | Copy kubeadm binary from downloaddir + command: sh -c "dpkg -x {{local_release_dir}}/kubeadm/deb/kubeadm.deb {{local_release_dir}}/kubeadm && + rsync -piu {{local_release_dir}}/kubeadm/usr/bin/kubeadm {{ bin_dir }}/kubeadm" + when: (ansible_os_family in ["Debian"]) and ({{ use_kubeadm|bool }}) diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index a8cb6ce5a..50128f889 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -1,4 +1,5 @@ --- +# TODO(bogdando) rework for kubeadm overlaps with kubelet setup tasks - include: install.yml - include: nginx-proxy.yml @@ -25,6 +26,7 @@ template: src: manifests/kube-proxy.manifest.j2 dest: "{{ kube_manifest_dir }}/kube-proxy.manifest" + when: ({{ not use_kubeadm|bool }}) # reload-systemd - meta: flush_handlers diff --git a/roles/kubernetes/secrets/files/kube-gen-token.sh b/roles/kubernetes/secrets/files/kube-gen-token.sh index 121b52263..09099a7e2 100755 --- a/roles/kubernetes/secrets/files/kube-gen-token.sh +++ b/roles/kubernetes/secrets/files/kube-gen-token.sh @@ -15,7 +15,7 @@ # limitations under the License. token_dir=${TOKEN_DIR:-/var/srv/kubernetes} -token_file="${token_dir}/known_tokens.csv" +token_file="${token_dir}/tokens.csv" create_accounts=($@) diff --git a/roles/kubernetes/secrets/tasks/check-tokens.yml b/roles/kubernetes/secrets/tasks/check-tokens.yml index 1ecaa7006..3bb5386d4 100644 --- a/roles/kubernetes/secrets/tasks/check-tokens.yml +++ b/roles/kubernetes/secrets/tasks/check-tokens.yml @@ -1,7 +1,7 @@ --- - name: "Check_tokens | check if the tokens have already been generated on first master" stat: - path: "{{ kube_token_dir }}/known_tokens.csv" + path: "{{ kube_token_dir }}/tokens.csv" delegate_to: "{{groups['kube-master'][0]}}" register: known_tokens_master run_once: true