commit
8ee2091955
20 changed files with 85 additions and 21 deletions
|
@ -51,6 +51,18 @@ ansible-playbook -i inventory/mycluster/hosts.ini scale.yml -b -v \
|
|||
--private-key=~/.ssh/private_key
|
||||
```
|
||||
|
||||
Remove nodes
|
||||
------------
|
||||
|
||||
You may want to remove **worker** nodes to your existing cluster. This can be done by re-running the `remove-node.yml` playbook. First, all nodes will be drained, then stop some kubernetes services and delete some certificates, and finally execute the kubectl command to delete these nodes. This can be combined with the add node function, This is generally helpful when doing something like autoscaling your clusters. Of course if a node is not working, you can remove the node and install it again.
|
||||
|
||||
- Add worker nodes to the list under kube-node if you want to delete them (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
||||
- Run the ansible-playbook command, substituting `remove-node.yml`:
|
||||
```
|
||||
ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
|
||||
--private-key=~/.ssh/private_key
|
||||
```
|
||||
|
||||
Connecting to Kubernetes
|
||||
------------------------
|
||||
By default, Kubespray configures kube-master hosts with insecure access to
|
||||
|
|
|
@ -76,6 +76,7 @@ bin_dir: /usr/local/bin
|
|||
#azure_subnet_name:
|
||||
#azure_security_group_name:
|
||||
#azure_vnet_name:
|
||||
#azure_vnet_resource_group:
|
||||
#azure_route_table_name:
|
||||
|
||||
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
|
||||
|
|
|
@ -111,7 +111,10 @@ kube_apiserver_insecure_port: 8080 # (http)
|
|||
|
||||
# Kube-proxy proxyMode configuration.
|
||||
# Can be ipvs, iptables
|
||||
kube_proxy_mode: iptables
|
||||
kube_proxy_mode: iptables
|
||||
|
||||
## Encrypting Secret Data at Rest (experimental)
|
||||
kube_encrypt_secret_data: false
|
||||
|
||||
# DNS configuration.
|
||||
# Kubernetes cluster name, also will be used as DNS domain
|
||||
|
|
|
@ -21,16 +21,16 @@ docker_dns_servers_strict: yes
|
|||
|
||||
docker_container_storage_setup: false
|
||||
|
||||
#CentOS/RedHat docker-ce repo
|
||||
# CentOS/RedHat docker-ce repo
|
||||
docker_rh_repo_base_url: 'https://download.docker.com/linux/centos/7/$basearch/stable'
|
||||
docker_rh_repo_gpgkey: 'https://download.docker.com/linux/centos/gpg'
|
||||
#Ubuntu docker-ce repo
|
||||
# Ubuntu docker-ce repo
|
||||
docker_ubuntu_repo_base_url: "https://download.docker.com/linux/ubuntu"
|
||||
docker_ubuntu_repo_gpgkey: 'https://download.docker.com/linux/ubuntu/gpg'
|
||||
#Debian docker-ce repo
|
||||
# Debian docker-ce repo
|
||||
docker_debian_repo_base_url: "https://download.docker.com/linux/debian"
|
||||
docker_debian_repo_gpgkey: 'https://download.docker.com/linux/debian/gpg'
|
||||
#dockerproject repo
|
||||
# dockerproject repo
|
||||
dockerproject_rh_repo_base_url: 'https://yum.dockerproject.org/repo/main/centos/7'
|
||||
dockerproject_rh_repo_gpgkey: 'https://yum.dockerproject.org/gpg'
|
||||
dockerproject_apt_repo_base_url: 'https://apt.dockerproject.org/repo'
|
||||
|
|
|
@ -7,4 +7,4 @@ metadata:
|
|||
labels:
|
||||
k8s-app: ingress-nginx
|
||||
data:
|
||||
{{ ingress_nginx_configmap | to_nice_yaml }}
|
||||
{{ ingress_nginx_configmap | to_nice_yaml | indent(2) }}
|
||||
|
|
|
@ -7,4 +7,4 @@ metadata:
|
|||
labels:
|
||||
k8s-app: ingress-nginx
|
||||
data:
|
||||
{{ ingress_nginx_configmap_tcp_services | to_nice_yaml }}
|
||||
{{ ingress_nginx_configmap_tcp_services | to_nice_yaml | indent(2) }}
|
||||
|
|
|
@ -7,4 +7,4 @@ metadata:
|
|||
labels:
|
||||
k8s-app: ingress-nginx
|
||||
data:
|
||||
{{ ingress_nginx_configmap_udp_services | to_nice_yaml }}
|
||||
{{ ingress_nginx_configmap_udp_services | to_nice_yaml | indent(2) }}
|
||||
|
|
|
@ -92,3 +92,8 @@ kube_kubeadm_scheduler_extra_args: {}
|
|||
|
||||
## Variable for influencing kube-scheduler behaviour
|
||||
volume_cross_zone_attachment: false
|
||||
|
||||
## Encrypting Secret Data at Rest
|
||||
kube_encrypt_secret_data: false
|
||||
kube_encrypt_token: "{{ lookup('password', inventory_dir + '/credentials/kube_encrypt_token length=32 chars=ascii_letters,digits') }}"
|
||||
kube_encryption_algorithm: "aescbc" # Must be either: aescbc, secretbox or aesgcm
|
||||
|
|
10
roles/kubernetes/master/tasks/encrypt-at-rest.yml
Normal file
10
roles/kubernetes/master/tasks/encrypt-at-rest.yml
Normal file
|
@ -0,0 +1,10 @@
|
|||
---
|
||||
- name: Write secrets for encrypting secret data at rest
|
||||
template:
|
||||
src: secrets_encryption.yaml.j2
|
||||
dest: "{{ kube_config_dir }}/ssl/secrets_encryption.yaml"
|
||||
owner: root
|
||||
group: "{{ kube_cert_group }}"
|
||||
mode: 0640
|
||||
tags:
|
||||
- kube-apiserver
|
|
@ -12,6 +12,9 @@
|
|||
- import_tasks: users-file.yml
|
||||
when: kube_basic_auth|default(true)
|
||||
|
||||
- import_tasks: encrypt-at-rest.yml
|
||||
when: kube_encrypt_secret_data
|
||||
|
||||
- name: Compare host kubectl with hyperkube container
|
||||
command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/systembindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /usr/bin/cmp /hyperkube /systembindir/kubectl"
|
||||
register: kubectl_task_compare_result
|
||||
|
|
|
@ -37,6 +37,7 @@ apiServerExtraArgs:
|
|||
admission-control: {{ kube_apiserver_admission_control | join(',') }}
|
||||
apiserver-count: "{{ kube_apiserver_count }}"
|
||||
service-node-port-range: {{ kube_apiserver_node_port_range }}
|
||||
kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}"
|
||||
{% if kube_basic_auth|default(true) %}
|
||||
basic-auth-file: {{ kube_users_dir }}/known_users.csv
|
||||
{% endif %}
|
||||
|
@ -52,6 +53,9 @@ apiServerExtraArgs:
|
|||
{% if kube_oidc_groups_claim is defined %}
|
||||
oidc-groups-claim: {{ kube_oidc_groups_claim }}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% if kube_encrypt_secret_data %}
|
||||
experimental-encryption-provider-config: {{ kube_config_dir }}/ssl/secrets_encryption.yaml
|
||||
{% endif %}
|
||||
storage-backend: {{ kube_apiserver_storage_backend }}
|
||||
{% if kube_api_runtime_config is defined %}
|
||||
|
@ -59,7 +63,7 @@ apiServerExtraArgs:
|
|||
{% endif %}
|
||||
allow-privileged: "true"
|
||||
{% for key in kube_kubeadm_apiserver_extra_args %}
|
||||
{{ key }}: {{ kube_kubeadm_apiserver_extra_args[key] }}
|
||||
{{ key }}: "{{ kube_kubeadm_apiserver_extra_args[key] }}"
|
||||
{% endfor %}
|
||||
controllerManagerExtraArgs:
|
||||
node-monitor-grace-period: {{ kube_controller_node_monitor_grace_period }}
|
||||
|
@ -69,12 +73,12 @@ controllerManagerExtraArgs:
|
|||
feature-gates: {{ kube_feature_gates|join(',') }}
|
||||
{% endif %}
|
||||
{% for key in kube_kubeadm_controller_extra_args %}
|
||||
{{ key }}: {{ kube_kubeadm_controller_extra_args[key] }}
|
||||
{{ key }}: "{{ kube_kubeadm_controller_extra_args[key] }}"
|
||||
{% endfor %}
|
||||
{% if kube_kubeadm_scheduler_extra_args|length > 0 %}
|
||||
schedulerExtraArgs:
|
||||
{% for key in kube_kubeadm_scheduler_extra_args %}
|
||||
{{ key }}: {{ kube_kubeadm_scheduler_extra_args[key] }}
|
||||
{{ key }}: "{{ kube_kubeadm_scheduler_extra_args[key] }}"
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
apiServerCertSANs:
|
||||
|
|
|
@ -103,6 +103,9 @@ spec:
|
|||
{% if authorization_modes %}
|
||||
- --authorization-mode={{ authorization_modes|join(',') }}
|
||||
{% endif %}
|
||||
{% if kube_encrypt_secret_data %}
|
||||
- --experimental-encryption-provider-config={{ kube_config_dir }}/ssl/secrets_encryption.yaml
|
||||
{% endif %}
|
||||
{% if kube_feature_gates %}
|
||||
- --feature-gates={{ kube_feature_gates|join(',') }}
|
||||
{% endif %}
|
||||
|
|
11
roles/kubernetes/master/templates/secrets_encryption.yaml.j2
Normal file
11
roles/kubernetes/master/templates/secrets_encryption.yaml.j2
Normal file
|
@ -0,0 +1,11 @@
|
|||
kind: EncryptionConfig
|
||||
apiVersion: v1
|
||||
resources:
|
||||
- resources:
|
||||
- secrets
|
||||
providers:
|
||||
- {{ kube_encryption_algorithm }}:
|
||||
keys:
|
||||
- name: key
|
||||
secret: {{ kube_encrypt_token | b64encode }}
|
||||
- identity: {}
|
|
@ -5,8 +5,8 @@
|
|||
--privileged \
|
||||
--name=kubelet \
|
||||
--restart=on-failure:5 \
|
||||
--memory={{ kubelet_memory_limit|regex_replace('Mi', 'M') }} \
|
||||
--cpu-shares={{ kubelet_cpu_limit|regex_replace('m', '') }} \
|
||||
--memory={{ kube_memory_reserved|regex_replace('Mi', 'M') }} \
|
||||
--cpu-shares={{ kube_cpu_reserved|regex_replace('m', '') }} \
|
||||
-v /dev:/dev:rw \
|
||||
-v /etc/cni:/etc/cni:ro \
|
||||
-v /opt/cni:/opt/cni:ro \
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
### Upstream source https://github.com/kubernetes/release/blob/master/debian/xenial/kubeadm/channel/stable/etc/systemd/system/kubelet.service.d/10-kubeadm.conf
|
||||
### Upstream source https://github.com/kubernetes/release/blob/master/debian/xenial/kubeadm/channel/stable/etc/systemd/system/kubelet.service.d/
|
||||
### All upstream values should be present in this file
|
||||
|
||||
# logging to stderr means we get it in the systemd journal
|
||||
|
@ -23,13 +23,14 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
|
|||
{% if kubelet_authentication_token_webhook %}
|
||||
--authentication-token-webhook \
|
||||
{% endif %}
|
||||
{% if kubelet_authorization_mode_webhook %}
|
||||
--authorization-mode=Webhook \
|
||||
{% endif %}
|
||||
--client-ca-file={{ kube_cert_dir }}/ca.crt \
|
||||
--pod-manifest-path={{ kube_manifest_dir }} \
|
||||
--cadvisor-port={{ kube_cadvisor_port }} \
|
||||
{# end kubeadm specific settings #}
|
||||
--pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }} \
|
||||
--kube-reserved cpu={{ kubelet_cpu_limit }},memory={{ kubelet_memory_limit|regex_replace('Mi', 'M') }} \
|
||||
--node-status-update-frequency={{ kubelet_status_update_frequency }} \
|
||||
--cgroup-driver={{ kubelet_cgroup_driver|default(kubelet_cgroup_driver_detected) }} \
|
||||
--docker-disable-shared-pid={{ kubelet_disable_shared_pid }} \
|
||||
|
|
|
@ -44,6 +44,11 @@
|
|||
msg: "azure_vnet_name is missing"
|
||||
when: azure_vnet_name is not defined or azure_vnet_name == ""
|
||||
|
||||
- name: check azure_vnet_resource_group value
|
||||
fail:
|
||||
msg: "azure_vnet_resource_group is missing"
|
||||
when: azure_vnet_resource_group is not defined or azure_vnet_resource_group == ""
|
||||
|
||||
- name: check azure_route_table_name value
|
||||
fail:
|
||||
msg: "azure_route_table_name is missing"
|
||||
|
|
|
@ -8,5 +8,6 @@
|
|||
"subnetName": "{{ azure_subnet_name }}",
|
||||
"securityGroupName": "{{ azure_security_group_name }}",
|
||||
"vnetName": "{{ azure_vnet_name }}",
|
||||
"vnetResourceGroup": "{{ azure_vnet_resource_group }}",
|
||||
"routeTableName": "{{ azure_route_table_name }}"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,12 +2,11 @@
|
|||
- import_tasks: seed.yml
|
||||
when: weave_mode_seed
|
||||
|
||||
|
||||
- name: template weavenet conflist
|
||||
template:
|
||||
src: weavenet.conflist.j2
|
||||
dest: /etc/cni/net.d/00-weave.conflist
|
||||
owner: kube
|
||||
src: weavenet.conflist.j2
|
||||
dest: /etc/cni/net.d/00-weave.conflist
|
||||
owner: kube
|
||||
|
||||
- name: Weave | Copy cni plugins from hyperkube
|
||||
command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/cp -r /opt/cni/bin/. /cnibindir/"
|
||||
|
|
|
@ -114,7 +114,12 @@
|
|||
with_items: "{{logs}}"
|
||||
|
||||
- name: Pack results and logs
|
||||
local_action: raw GZIP=-9 tar --remove-files -cvzf {{dir|default(".")}}/logs.tar.gz -C /tmp collect-info
|
||||
archive:
|
||||
path: "/tmp/collect-info"
|
||||
dest: "{{ dir|default('.') }}/logs.tar.gz"
|
||||
remove: true
|
||||
delegate_to: localhost
|
||||
become: false
|
||||
run_once: true
|
||||
|
||||
- name: Clean up collected command outputs
|
||||
|
|
|
@ -15,3 +15,4 @@ etcd_deployment_type: host
|
|||
deploy_netchecker: true
|
||||
kubedns_min_replicas: 1
|
||||
cloud_provider: gce
|
||||
kube_encrypt_secret_data: true
|
||||
|
|
Loading…
Reference in a new issue