Add kubeadm init, join

* Implement kubeadm init,join for Debian OS family (PoC) with
  the external etcd option set.
* Make certs/tokens management optional and depending on
  the use_kubeadm var
* Do not delegate static pods and config management to kubeadm
  and remove produced artifacts to be regenerated by ansible.
* Add new set of system pods manifests templates based on that kubeadm
  produces by default and parametrize it by ansible vars
* Fix apiserver container logging to follow 12-factor apps
  and scheduler/controller-manager logging setup

Signed-off-by: Bogdan Dobrelya <bdobrelia@mirantis.com>
This commit is contained in:
Bogdan Dobrelya 2016-10-18 15:03:07 +02:00
parent a423927ac9
commit 6fa44458db
19 changed files with 438 additions and 51 deletions

View file

@ -29,6 +29,8 @@
- hosts: k8s-cluster - hosts: k8s-cluster
roles: roles:
- { role: kubernetes/preinstall, tags: preinstall }
- { role: kubernetes/kube-join, tags: kube-join }
- { role: dnsmasq, tags: dnsmasq } - { role: dnsmasq, tags: dnsmasq }
- hosts: kube-master[0] - hosts: kube-master[0]

View file

@ -18,6 +18,11 @@ retry_stagger: 5
# cert files to. Not really changable... # cert files to. Not really changable...
kube_cert_group: kube-cert kube_cert_group: kube-cert
# Enables native tooling and delegates certs management to it
use_kubeadm: true
# Initial cluster token for kubeadm
init_token: fa7ed3.c5debcef8dd01970
# Cluster Loglevel configuration # Cluster Loglevel configuration
kube_log_level: 2 kube_log_level: 2

View file

@ -6,6 +6,9 @@ download_run_once: False
# Versions # Versions
kube_version: v1.4.3 kube_version: v1.4.3
# TODO(bogdando) figure out a better way to download kubeadm binary for all supported OS types
# See https://packages.cloud.google.com/apt/dists/kubernetes-xenial/main/binary-amd64/Packages
kubeadm_version: 1.5.0-alpha.0-1534-gcf7301f-00
etcd_version: v3.0.6 etcd_version: v3.0.6
#TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults #TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
@ -17,17 +20,19 @@ flannel_version: v0.6.2
flannel_server_helper_version: 0.1 flannel_server_helper_version: 0.1
pod_infra_version: 3.0 pod_infra_version: 3.0
# Download URL's
etcd_download_url: "https://storage.googleapis.com/kargo/{{etcd_version}}_etcd"
calico_cni_download_url: "https://storage.googleapis.com/kargo/{{calico_cni_version}}_calico-cni-plugin"
calico_cni_ipam_download_url: "https://storage.googleapis.com/kargo/{{calico_cni_version}}_calico-cni-plugin-ipam"
weave_download_url: "https://storage.googleapis.com/kargo/{{weave_version}}_weave"
# Checksums # Checksums
calico_cni_checksum: "9cab29764681e9d80da826e4b2cd10841cc01a749e0018867d96dd76a4691548" calico_cni_checksum: "9cab29764681e9d80da826e4b2cd10841cc01a749e0018867d96dd76a4691548"
calico_cni_ipam_checksum: "09d076b15b791956efee91646e47fdfdcf382db16082cef4f542a9fff7bae172" calico_cni_ipam_checksum: "09d076b15b791956efee91646e47fdfdcf382db16082cef4f542a9fff7bae172"
weave_checksum: "9bf9d6e5a839e7bcbb28cc00c7acae9d09284faa3e7a3720ca9c2b9e93c68580" weave_checksum: "9bf9d6e5a839e7bcbb28cc00c7acae9d09284faa3e7a3720ca9c2b9e93c68580"
etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485" etcd_checksum: "385afd518f93e3005510b7aaa04d38ee4a39f06f5152cd33bb86d4f0c94c7485"
kubeadm_checksum: "9af7c4e3a0daa4f8b2463c1bd39fe28d6b68535042777bca89c917a0e4ebdbf7"
# Download URL's
etcd_download_url: "https://storage.googleapis.com/kargo/{{etcd_version}}_etcd"
calico_cni_download_url: "https://storage.googleapis.com/kargo/{{calico_cni_version}}_calico-cni-plugin"
calico_cni_ipam_download_url: "https://storage.googleapis.com/kargo/{{calico_cni_version}}_calico-cni-plugin-ipam"
weave_download_url: "https://storage.googleapis.com/kargo/{{weave_version}}_weave"
kubeadm_download_url: "http://apt.kubernetes.io/pool/kubeadm_{{kubeadm_version}}_amd64_{{kubeadm_checksum}}.deb"
# Containers # Containers
# Possible values: host, docker # Possible values: host, docker
@ -48,6 +53,15 @@ pod_infra_image_repo: "gcr.io/google_containers/pause-amd64"
pod_infra_image_tag: "{{ pod_infra_version }}" pod_infra_image_tag: "{{ pod_infra_version }}"
downloads: downloads:
kubeadm:
dest: kubeadm/deb/kubeadm.deb
version: "{{kubeadm_version}}"
sha256: "{{ kubeadm_checksum }}"
source_url: "{{ kubeadm_download_url }}"
url: "{{ kubeadm_download_url }}"
owner: "root"
mode: "0755"
enabled: "{{ use_kubeadm|bool }}"
calico_cni_plugin: calico_cni_plugin:
dest: calico/bin/calico dest: calico/bin/calico
version: "{{calico_cni_version}}" version: "{{calico_cni_version}}"

View file

@ -0,0 +1,8 @@
---
- set_fact:
kubeadm_certs_dir: "{{ kube_config_dir }}/pki"
- name: Write kube-proxy manifest
template:
src: manifests/kubeadm-kube-proxy.json.j2
dest: "{{ kube_manifest_dir }}/kube-proxy.manifest"

View file

@ -0,0 +1,18 @@
---
# TODO(bogdando) kubeadm init/join/taint on RedHat/CoreOS
- name: join workers to the initialized kubernetes cluster
command: "kubeadm join --token {{ init_token }} {{ first_kube_master }}"
when: (ansible_os_family in ["Debian"]) and ({{ use_kubeadm|bool }})
# NOTE(bogdando) we want ansible to control manifests instead of kubeadm, yet
- name: purge kube manifests created by kubeadm
file: path="{{ kube_manifest_dir }}/{{ item }}.json" state=absent
with_items: [ "kube-proxy" ]
- name: allow all nodes to be picked by schedulers
command: "kubectl taint nodes --all dedicated-"
when: (ansible_os_family in ["Debian"]) and ({{ use_kubeadm|bool }})
ignore_errors: true
- include: kubeadm-system-pods.yml
when: (ansible_os_family in ["Debian"]) and ({{ use_kubeadm|bool }})

View file

@ -0,0 +1,46 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-proxy
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: kube-proxy
image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}
command:
- /hyperkube
- proxy
- --v={{ kube_log_level | default('2') }}
- --master={{ kube_apiserver_endpoint }}
{% if not is_kube_master %}
- --kubeconfig=/etc/kubernetes/node-kubeconfig.yaml
{% endif %}
- --bind-address={{ ip | default(ansible_default_ipv4.address) }}
- --cluster-cidr={{ kube_pods_subnet }}
- --proxy-mode={{ kube_proxy_mode }}
{% if kube_proxy_masquerade_all and kube_proxy_mode == "iptables" %}
- --masquerade-all
{% endif %}
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
- mountPath: /etc/kubernetes/node-kubeconfig.yaml
name: "kubeconfig"
readOnly: true
- mountPath: /etc/kubernetes/ssl
name: "etc-kube-ssl"
readOnly: true
volumes:
- name: ssl-certs-host
hostPath:
path: /usr/share/ca-certificates
- name: "kubeconfig"
hostPath:
path: "/etc/kubernetes/node-kubeconfig.yaml"
- name: "etc-kube-ssl"
hostPath:
path: "/etc/kubernetes/ssl"

View file

@ -0,0 +1,25 @@
---
- name: check for bootstrap
command: kubectl get nodes
register: kube_bootstrap
ignore_errors: true
- name: initialize the kubernetes master
command: "kubeadm init --token {{ init_token }} \
--api-advertise-addresses {{ kube_apiserver_access_address }} \
--service-cidr {{ kube_service_addresses }} \
--external-etcd-endpoints {{ etcd_access_addresses }} \
--service-dns-domain {{ cluster_name }} \
{% if cloud_provider is defined %}--cloud-provider {{ cloud_provider }}{% endif %}"
when: "'localhost:8080 was refused' in kube_bootstrap.stderr"
register: initout
ignore_errors: true
# NOTE(bogdando) we want ansible to control configs/manifests instead of kubeadm, yet
- name: purge kube manifests created by kubeadm
file: path="{{ kube_manifest_dir }}/{{ item }}.json" state=absent
with_items: [ "kube-controller-manager", "kube-apiserver", "kube-scheduler" ]
- name: purge kube configs created by kubeadm
file: path="{{ kube_config_dir }}/{{ item }}.conf" state=absent
with_items: [ "kubelet", "admin" ]

View file

@ -0,0 +1,21 @@
---
- set_fact:
kubeadm_certs_dir: "{{ kube_config_dir }}/pki"
- name: Write kube-apiserver manifest
template:
src: manifests/kubeadm-kube-apiserver.json.j2
dest: "{{ kube_manifest_dir }}/kube-apiserver.manifest"
notify: Master | wait for the apiserver to be running
- name: Write kube-controller-manager manifest
template:
src: manifests/kubeadm-kube-controller-manager.json.j2
dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest"
notify: Master | wait for kube-controller-manager
- name: Write kube-scheduler manifest
template:
src: manifests/kubeadm-kube-scheduler.json.j2
dest: "{{ kube_manifest_dir }}/kube-scheduler.manifest"
notify: Master | wait for kube-scheduler

View file

@ -15,40 +15,13 @@
delay: "{{ retry_stagger | random + 3 }}" delay: "{{ retry_stagger | random + 3 }}"
changed_when: false changed_when: false
- name: Write kube-apiserver manifest - include: system-pods.yml
template: when: ({{ not use_kubeadm|bool }})
src: manifests/kube-apiserver.manifest.j2
dest: "{{ kube_manifest_dir }}/kube-apiserver.manifest"
notify: Master | wait for the apiserver to be running
- meta: flush_handlers # TODO(bogdando) kubeadm init/update pods on RedHat/CoreOS
# Create kube-system namespace - include: kube-init.yml
- name: copy 'kube-system' namespace manifest when: (ansible_os_family in ["Debian"]) and ({{ use_kubeadm|bool }}) and (inventory_hostname == groups['kube-master'][0])
copy: src=namespace.yml dest=/etc/kubernetes/kube-system-ns.yml run_once: true
run_once: yes
when: inventory_hostname == groups['kube-master'][0]
- name: Check if kube-system exists - include: kubeadm-system-pods.yml
command: "{{ bin_dir }}/kubectl get ns kube-system" when: (ansible_os_family in ["Debian"]) and ({{ use_kubeadm|bool }})
register: 'kubesystem'
changed_when: False
failed_when: False
run_once: yes
- name: Create 'kube-system' namespace
command: "{{ bin_dir }}/kubectl create -f /etc/kubernetes/kube-system-ns.yml"
changed_when: False
when: kubesystem|failed and inventory_hostname == groups['kube-master'][0]
# Write other manifests
- name: Write kube-controller-manager manifest
template:
src: manifests/kube-controller-manager.manifest.j2
dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest"
notify: Master | wait for kube-controller-manager
- name: Write kube-scheduler manifest
template:
src: manifests/kube-scheduler.manifest.j2
dest: "{{ kube_manifest_dir }}/kube-scheduler.manifest"
notify: Master | wait for kube-scheduler

View file

@ -0,0 +1,38 @@
---
- name: Write kube-apiserver manifest
template:
src: manifests/kube-apiserver.manifest.j2
dest: "{{ kube_manifest_dir }}/kube-apiserver.manifest"
notify: Master | wait for the apiserver to be running
- meta: flush_handlers
# Create kube-system namespace
- name: copy 'kube-system' namespace manifest
copy: src=namespace.yml dest=/etc/kubernetes/kube-system-ns.yml
run_once: yes
when: inventory_hostname == groups['kube-master'][0]
- name: Check if kube-system exists
command: "{{ bin_dir }}/kubectl get ns kube-system"
register: 'kubesystem'
changed_when: False
failed_when: False
run_once: yes
- name: Create 'kube-system' namespace
command: "{{ bin_dir }}/kubectl create -f /etc/kubernetes/kube-system-ns.yml"
changed_when: False
when: kubesystem|failed and inventory_hostname == groups['kube-master'][0]
# Write other manifests
- name: Write kube-controller-manager manifest
template:
src: manifests/kube-controller-manager.manifest.j2
dest: "{{ kube_manifest_dir }}/kube-controller-manager.manifest"
notify: Master | wait for kube-controller-manager
- name: Write kube-scheduler manifest
template:
src: manifests/kube-scheduler.manifest.j2
dest: "{{ kube_manifest_dir }}/kube-scheduler.manifest"
notify: Master | wait for kube-scheduler

View file

@ -19,10 +19,10 @@ spec:
- --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,ServiceAccount,ResourceQuota - --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,ServiceAccount,ResourceQuota
- --service-cluster-ip-range={{ kube_service_addresses }} - --service-cluster-ip-range={{ kube_service_addresses }}
- --client-ca-file={{ kube_cert_dir }}/ca.pem - --client-ca-file={{ kube_cert_dir }}/ca.pem
- --basic-auth-file={{ kube_users_dir }}/known_users.csv - --basic-auth-file={{ kube_users_dir }}/tokens.csv
- --tls-cert-file={{ kube_cert_dir }}/apiserver.pem - --tls-cert-file={{ kube_cert_dir }}/apiserver.pem
- --tls-private-key-file={{ kube_cert_dir }}/apiserver-key.pem - --tls-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
- --token-auth-file={{ kube_token_dir }}/known_tokens.csv - --token-auth-file={{ kube_token_dir }}/tokens.csv
- --service-account-key-file={{ kube_cert_dir }}/apiserver-key.pem - --service-account-key-file={{ kube_cert_dir }}/apiserver-key.pem
- --secure-port={{ kube_apiserver_port }} - --secure-port={{ kube_apiserver_port }}
- --insecure-port={{ kube_apiserver_insecure_port }} - --insecure-port={{ kube_apiserver_insecure_port }}
@ -42,7 +42,6 @@ spec:
{% elif cloud_provider is defined and cloud_provider == "aws" %} {% elif cloud_provider is defined and cloud_provider == "aws" %}
- --cloud-provider={{ cloud_provider }} - --cloud-provider={{ cloud_provider }}
{% endif %} {% endif %}
- 2>&1 >> {{ kube_log_dir }}/kube-apiserver.log
volumeMounts: volumeMounts:
- mountPath: {{ kube_config_dir }} - mountPath: {{ kube_config_dir }}
name: kubernetes-config name: kubernetes-config
@ -50,8 +49,6 @@ spec:
- mountPath: /etc/ssl/certs - mountPath: /etc/ssl/certs
name: ssl-certs-host name: ssl-certs-host
readOnly: true readOnly: true
- mountPath: /var/log/
name: logfile
volumes: volumes:
- hostPath: - hostPath:
path: {{ kube_config_dir }} path: {{ kube_config_dir }}
@ -59,6 +56,3 @@ spec:
- hostPath: - hostPath:
path: /etc/ssl/certs/ path: /etc/ssl/certs/
name: ssl-certs-host name: ssl-certs-host
- hostPath:
path: /var/log/
name: logfile

View file

@ -0,0 +1,98 @@
{
"kind": "Pod",
"apiVersion": "v1",
"metadata": {
"name": "kube-apiserver",
"namespace": "kube-system",
"creationTimestamp": null,
"labels": {
"component": "kube-apiserver",
"tier": "control-plane"
}
},
"spec": {
"volumes": [
{
"name": "certs",
"hostPath": {
"path": "/etc/ssl/certs"
}
},
{
"name": "pki",
"hostPath": {
"path": "{{ kube_config_dir }}"
}
}
],
"containers": [
{
"name": "kube-apiserver",
"image": "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}",
"command": [
"/hyperkube",
"apiserver",
"--v={{ kube_log_level | default('2') }}",
"--advertise-address={{ ip | default(ansible_default_ipv4.address) }}",
"--apiserver-count={{ kube_apiserver_count }}",
"--insecure-bind-address={{ kube_apiserver_insecure_bind_address }}",
"--etcd-servers={{ etcd_access_endpoint }}",
"--etcd-quorum-read=true",
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota",
"--service-cluster-ip-range={{ kube_service_addresses }}",
"--service-account-key-file={{ kubeadm_certs_dir }}/apiserver-key.pem",
"--client-ca-file={{ kubeadm_certs_dir }}/ca.pem",
"--tls-cert-file={{ kubeadm_certs_dir }}/apiserver.pem",
"--tls-private-key-file={{ kubeadm_certs_dir }}/apiserver-key.pem",
"--token-auth-file={{ kubeadm_certs_dir }}/tokens.csv",
"--basic-auth-file={{ kubeadm_certs_dir }}/tokens.csv",
"--secure-port={{ kube_apiserver_port }}",
{% if kube_api_runtime_config is defined %}
{% for conf in kube_api_runtime_config %}
"--runtime-config={{ conf }}",
{% endfor %}
{% endif %}
{% if enable_network_policy is defined and enable_network_policy == True %}
"--runtime-config=extensions/v1beta1/networkpolicies=true",
{% endif %}
"--v={{ kube_log_level | default('2') }}",
"--allow-privileged=true",
{% if cloud_provider is defined and cloud_provider == "openstack" %}
"--cloud-provider={{ cloud_provider }}",
"--cloud-config={{ kube_config_dir }}/cloud_config",
{% elif cloud_provider is defined and cloud_provider == "aws" %}
"--cloud-provider={{ cloud_provider }},"
{% endif %}
"--insecure-port={{ kube_apiserver_insecure_port }}"
],
"resources": {
"requests": {
"cpu": "250m"
}
},
"volumeMounts": [
{
"name": "certs",
"mountPath": "/etc/ssl/certs"
},
{
"name": "pki",
"readOnly": true,
"mountPath": "{{ kube_config_dir }}"
}
],
"livenessProbe": {
"httpGet": {
"path": "/healthz",
"port": {{ kube_apiserver_insecure_port }},
"host": "{{ kube_apiserver_insecure_bind_address }}"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15
}
}
],
"hostNetwork": true
},
"status": {}
}

View file

@ -0,0 +1,88 @@
{
"kind": "Pod",
"apiVersion": "v1",
"metadata": {
"name": "kube-controller-manager",
"namespace": "kube-system",
"creationTimestamp": null,
"labels": {
"component": "kube-controller-manager",
"tier": "control-plane"
}
},
"spec": {
"volumes": [
{% if cloud_provider is defined and cloud_provider == "openstack" %}
{
"name": "cloudconfig",
"hostPath": {
"path": "{{ kube_config_dir }}/cloud_config"
}
},
{% endif %}
{
"name": "pki",
"hostPath": {
"path": "{{ kube_config_dir }}"
}
}
],
"containers": [
{
"name": "kube-controller-manager",
"image": "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}",
"command": [
"/hyperkube",
"controller-manager",
"--v={{ kube_log_level | default('2') }}",
"--address=127.0.0.1",
"--leader-elect",
"--master={{ kube_apiserver_endpoint }}",
"--enable-hostpath-provisioner={{ kube_hostpath_dynamic_provisioner }}",
"--cluster-name=kubernetes",
"--root-ca-file={{ kubeadm_certs_dir }}/ca.pem",
"--service-account-private-key-file={{ kubeadm_certs_dir }}/apiserver-key.pem",
"--cluster-signing-cert-file={{ kubeadm_certs_dir }}/ca.pem",
"--cluster-signing-key-file={{ kubeadm_certs_dir }}/ca-key.pem",
{% if cloud_provider is defined and cloud_provider == "openstack" %}
"--cloud-provider={{cloud_provider}}",
"--cloud-config={{ kube_config_dir }}/cloud_config",
{% elif cloud_provider is defined and cloud_provider == "aws" %}
"--cloud-provider={{cloud_provider}}",
{% endif %}
"--insecure-experimental-approve-all-kubelet-csrs-for-group=system:kubelet-bootstrap"
],
"resources": {
"requests": {
"cpu": "200m"
}
},
"volumeMounts": [
{% if cloud_provider is defined and cloud_provider == "openstack" %}
{
"name": "cloudconfig",
"readOnly": true,
"mountPath": "{{ kube_config_dir }}/cloud_config"
},
{% endif %}
{
"name": "pki",
"readOnly": true,
"mountPath": "{{ kube_config_dir }}"
}
],
"livenessProbe": {
"httpGet": {
"path": "/healthz",
"port": 10252,
"host": "127.0.0.1"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15
}
}
],
"hostNetwork": true
},
"status": {}
}

View file

@ -0,0 +1,45 @@
{
"kind": "Pod",
"apiVersion": "v1",
"metadata": {
"name": "kube-scheduler",
"namespace": "kube-system",
"creationTimestamp": null,
"labels": {
"component": "kube-scheduler",
"tier": "control-plane"
}
},
"spec": {
"containers": [
{
"name": "kube-scheduler",
"image": "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}",
"command": [
"/hyperkube",
"scheduler",
"--v={{ kube_log_level | default('2') }}",
"--address=127.0.0.1",
"--leader-elect",
"--master={{ kube_apiserver_endpoint }}"
],
"resources": {
"requests": {
"cpu": "100m"
}
},
"livenessProbe": {
"httpGet": {
"path": "/healthz",
"port": 10251,
"host": "127.0.0.1"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15
}
}
],
"hostNetwork": true
},
"status": {}
}

View file

@ -2,6 +2,9 @@
dependencies: dependencies:
- role: download - role: download
file: "{{ downloads.hyperkube }}" file: "{{ downloads.hyperkube }}"
- role: download
file: "{{ downloads.kubeadm }}"
- role: download - role: download
file: "{{ downloads.pod_infra }}" file: "{{ downloads.pod_infra }}"
- role: kubernetes/secrets - role: kubernetes/secrets
when: ({{ not use_kubeadm|bool }})

View file

@ -17,3 +17,10 @@
- name: install | Install kubelet launch script - name: install | Install kubelet launch script
template: src=kubelet-container.j2 dest="{{ bin_dir }}/kubelet" owner=kube mode=0755 backup=yes template: src=kubelet-container.j2 dest="{{ bin_dir }}/kubelet" owner=kube mode=0755 backup=yes
notify: restart kubelet notify: restart kubelet
# TODO(bogdando) rework to consume the kubeadm from hypercube.
# This hack works for debian OS family only and complements the downloads role.
- name: install | Copy kubeadm binary from downloaddir
command: sh -c "dpkg -x {{local_release_dir}}/kubeadm/deb/kubeadm.deb {{local_release_dir}}/kubeadm &&
rsync -piu {{local_release_dir}}/kubeadm/usr/bin/kubeadm {{ bin_dir }}/kubeadm"
when: (ansible_os_family in ["Debian"]) and ({{ use_kubeadm|bool }})

View file

@ -1,4 +1,5 @@
--- ---
# TODO(bogdando) rework for kubeadm overlaps with kubelet setup tasks
- include: install.yml - include: install.yml
- include: nginx-proxy.yml - include: nginx-proxy.yml
@ -25,6 +26,7 @@
template: template:
src: manifests/kube-proxy.manifest.j2 src: manifests/kube-proxy.manifest.j2
dest: "{{ kube_manifest_dir }}/kube-proxy.manifest" dest: "{{ kube_manifest_dir }}/kube-proxy.manifest"
when: ({{ not use_kubeadm|bool }})
# reload-systemd # reload-systemd
- meta: flush_handlers - meta: flush_handlers

View file

@ -15,7 +15,7 @@
# limitations under the License. # limitations under the License.
token_dir=${TOKEN_DIR:-/var/srv/kubernetes} token_dir=${TOKEN_DIR:-/var/srv/kubernetes}
token_file="${token_dir}/known_tokens.csv" token_file="${token_dir}/tokens.csv"
create_accounts=($@) create_accounts=($@)

View file

@ -1,7 +1,7 @@
--- ---
- name: "Check_tokens | check if the tokens have already been generated on first master" - name: "Check_tokens | check if the tokens have already been generated on first master"
stat: stat:
path: "{{ kube_token_dir }}/known_tokens.csv" path: "{{ kube_token_dir }}/tokens.csv"
delegate_to: "{{groups['kube-master'][0]}}" delegate_to: "{{groups['kube-master'][0]}}"
register: known_tokens_master register: known_tokens_master
run_once: true run_once: true