Replace kube-master with kube_control_plane (#7256)

This replaces kube-master with kube_control_plane because of [1]:

  The Kubernetes project is moving away from wording that is
  considered offensive. A new working group WG Naming was created
  to track this work, and the word "master" was declared as offensive.
  A proposal was formalized for replacing the word "master" with
  "control plane". This means it should be removed from source code,
  documentation, and user-facing configuration from Kubernetes and
  its sub-projects.

NOTE: The reason why this changes it to kube_control_plane not
      kube-control-plane is for valid group names on ansible.

[1]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-cluster-lifecycle/kubeadm/2067-rename-master-label-taint/README.md#motivation
This commit is contained in:
Kenichi Omichi 2021-03-23 17:26:05 -07:00 committed by GitHub
parent d53fd29e34
commit 486b223e01
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
159 changed files with 564 additions and 485 deletions

View File

@ -30,7 +30,7 @@ variables:
MITOGEN_ENABLE: "false"
ANSIBLE_LOG_LEVEL: "-vv"
RECOVER_CONTROL_PLANE_TEST: "false"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
before_script:
- ./tests/scripts/rebase.sh

View File

@ -223,7 +223,7 @@ packet_ubuntu18-calico-ha-recover:
when: on_success
variables:
RECOVER_CONTROL_PLANE_TEST: "true"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
packet_ubuntu18-calico-ha-recover-noquorum:
stage: deploy-part3
@ -231,4 +231,4 @@ packet_ubuntu18-calico-ha-recover-noquorum:
when: on_success
variables:
RECOVER_CONTROL_PLANE_TEST: "true"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube-master[1:]"
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube_control_plane[1:]"

4
Vagrantfile vendored
View File

@ -253,9 +253,9 @@ Vagrant.configure("2") do |config|
#ansible.tags = ['download']
ansible.groups = {
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
"kube-master" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
"kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
"kube-node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
"k8s-cluster:children" => ["kube-master", "kube-node"],
"k8s-cluster:children" => ["kube_control_plane", "kube-node"],
}
end
end

View File

@ -2,6 +2,15 @@
- name: Check ansible version
import_playbook: ansible_version.yml
- name: Add kube-master nodes to kube_control_plane
# This is for old inventory which contains kube-master instead of kube_control_plane
hosts: kube-master
gather_facts: false
tasks:
- name: add nodes to kube_control_plane group
group_by:
key: 'kube_control_plane'
- hosts: bastion[0]
gather_facts: False
environment: "{{ proxy_disable_env }}"
@ -66,7 +75,7 @@
- { role: kubespray-defaults }
- { role: kubernetes/node, tags: node }
- hosts: kube-master
- hosts: kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
@ -94,7 +103,7 @@
- { role: kubespray-defaults }
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
- hosts: kube-master[0]
- hosts: kube_control_plane[0]
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
@ -102,7 +111,7 @@
- { role: kubespray-defaults }
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
- hosts: kube-master
- hosts: kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"
@ -114,7 +123,7 @@
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
- hosts: kube-master
- hosts: kube_control_plane
gather_facts: False
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
environment: "{{ proxy_disable_env }}"

View File

@ -35,7 +35,7 @@ class SearchEC2Tags(object):
hosts['_meta'] = { 'hostvars': {} }
##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
for group in ["kube-master", "kube-node", "etcd"]:
for group in ["kube_control_plane", "kube-node", "etcd"]:
hosts[group] = []
tag_key = "kubespray-role"
tag_value = ["*"+group+"*"]
@ -70,7 +70,7 @@ class SearchEC2Tags(object):
hosts[group].append(dns_name)
hosts['_meta']['hostvars'][dns_name] = ansible_host
hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
hosts['k8s-cluster'] = {'children':['kube_control_plane', 'kube-node']}
print(json.dumps(hosts, sort_keys=True, indent=2))
SearchEC2Tags()

View File

@ -7,9 +7,9 @@
{% endif %}
{% endfor %}
[kube-master]
[kube_control_plane]
{% for vm in vm_list %}
{% if 'kube-master' in vm.tags.roles %}
{% if 'kube_control_plane' in vm.tags.roles %}
{{ vm.name }}
{% endif %}
{% endfor %}
@ -30,4 +30,4 @@
[k8s-cluster:children]
kube-node
kube-master
kube_control_plane

View File

@ -7,9 +7,9 @@
{% endif %}
{% endfor %}
[kube-master]
[kube_control_plane]
{% for vm in vm_roles_list %}
{% if 'kube-master' in vm.tags.roles %}
{% if 'kube_control_plane' in vm.tags.roles %}
{{ vm.name }}
{% endif %}
{% endfor %}
@ -30,5 +30,5 @@
[k8s-cluster:children]
kube-node
kube-master
kube_control_plane

View File

@ -144,7 +144,7 @@
"[concat('Microsoft.Network/networkInterfaces/', 'master-{{i}}-nic')]"
],
"tags": {
"roles": "kube-master,etcd"
"roles": "kube_control_plane,etcd"
},
"apiVersion": "{{apiVersion}}",
"properties": {

View File

@ -44,7 +44,7 @@ import re
import subprocess
import sys
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster',
ROLES = ['all', 'kube_control_plane', 'kube-node', 'etcd', 'k8s-cluster',
'calico-rr']
PROTECTED_NAMES = ROLES
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames',
@ -299,21 +299,23 @@ class KubesprayInventory(object):
def set_kube_control_plane(self, hosts):
for host in hosts:
self.add_host_to_group('kube-master', host)
self.add_host_to_group('kube_control_plane', host)
def set_all(self, hosts):
for host, opts in hosts.items():
self.add_host_to_group('all', host, opts)
def set_k8s_cluster(self):
k8s_cluster = {'children': {'kube-master': None, 'kube-node': None}}
k8s_cluster = {'children': {'kube_control_plane': None,
'kube-node': None}}
self.yaml_config['all']['children']['k8s-cluster'] = k8s_cluster
def set_calico_rr(self, hosts):
for host in hosts:
if host in self.yaml_config['all']['children']['kube-master']:
if host in self.yaml_config['all']['children']['kube_control_plane']: # noqa
self.debug("Not adding {0} to calico-rr group because it "
"conflicts with kube-master group".format(host))
"conflicts with kube_control_plane "
"group".format(host))
continue
if host in self.yaml_config['all']['children']['kube-node']:
self.debug("Not adding {0} to calico-rr group because it "
@ -330,10 +332,10 @@ class KubesprayInventory(object):
"group.".format(host))
continue
if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa
if host in self.yaml_config['all']['children']['kube-master']['hosts']: # noqa
if host in self.yaml_config['all']['children']['kube_control_plane']['hosts']: # noqa
self.debug("Not adding {0} to kube-node group because of "
"scale deployment and host is in kube-master "
"group.".format(host))
"scale deployment and host is in "
"kube_control_plane group.".format(host))
continue
self.add_host_to_group('kube-node', host)

View File

@ -223,7 +223,7 @@ class TestInventory(unittest.TestCase):
None)
def test_set_kube_control_plane(self):
group = 'kube-master'
group = 'kube_control_plane'
host = 'node1'
self.inv.set_kube_control_plane([host])
@ -242,7 +242,7 @@ class TestInventory(unittest.TestCase):
def test_set_k8s_cluster(self):
group = 'k8s-cluster'
expected_hosts = ['kube-node', 'kube-master']
expected_hosts = ['kube-node', 'kube_control_plane']
self.inv.set_k8s_cluster()
for host in expected_hosts:

View File

@ -19,6 +19,6 @@
roles:
- { role: glusterfs/client }
- hosts: kube-master[0]
- hosts: kube_control_plane[0]
roles:
- { role: kubernetes-pv }

View File

@ -14,7 +14,7 @@
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
# [kube-master]
# [kube_control_plane]
# node1
# node2
@ -32,7 +32,7 @@
# [k8s-cluster:children]
# kube-node
# kube-master
# kube_control_plane
# [gfs-cluster]
# gfs_node1

View File

@ -8,7 +8,7 @@
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
register: gluster_pv
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
- name: Kubernetes Apps | Set GlusterFS endpoint and PV
kube:
@ -19,4 +19,4 @@
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
state: "{{ item.changed | ternary('latest','present') }}"
with_items: "{{ gluster_pv.results }}"
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined

View File

@ -1,5 +1,5 @@
---
- hosts: kube-master[0]
- hosts: kube_control_plane[0]
roles:
- { role: tear-down }

View File

@ -3,7 +3,7 @@
roles:
- { role: prepare }
- hosts: kube-master[0]
- hosts: kube_control_plane[0]
tags:
- "provision"
roles:

View File

@ -7,7 +7,7 @@ all:
vars:
kubelet_fail_swap_on: false
children:
kube-master:
kube_control_plane:
hosts:
node1:
etcd:

View File

@ -122,7 +122,7 @@ You can use the following set of commands to get the kubeconfig file from your n
```commandline
# Get the controller's IP address.
CONTROLLER_HOST_NAME=$(cat ./inventory/hosts | grep "\[kube-master\]" -A 1 | tail -n 1)
CONTROLLER_HOST_NAME=$(cat ./inventory/hosts | grep "\[kube_control_plane\]" -A 1 | tail -n 1)
CONTROLLER_IP=$(cat ./inventory/hosts | grep $CONTROLLER_HOST_NAME | grep ansible_host | cut -d'=' -f2)
# Get the hostname of the load balancer.

View File

@ -84,7 +84,7 @@ resource "aws_instance" "k8s-master" {
vpc_security_group_ids = module.aws-vpc.aws_security_group
iam_instance_profile = module.aws-iam.kube-master-profile
iam_instance_profile = module.aws-iam.kube_control_plane-profile
key_name = var.AWS_SSH_KEY_NAME
tags = merge(var.default_tags, map(

View File

@ -1,6 +1,6 @@
#Add AWS Roles for Kubernetes
resource "aws_iam_role" "kube-master" {
resource "aws_iam_role" "kube_control_plane" {
name = "kubernetes-${var.aws_cluster_name}-master"
assume_role_policy = <<EOF
@ -40,9 +40,9 @@ EOF
#Add AWS Policies for Kubernetes
resource "aws_iam_role_policy" "kube-master" {
resource "aws_iam_role_policy" "kube_control_plane" {
name = "kubernetes-${var.aws_cluster_name}-master"
role = aws_iam_role.kube-master.id
role = aws_iam_role.kube_control_plane.id
policy = <<EOF
{
@ -130,9 +130,9 @@ EOF
#Create AWS Instance Profiles
resource "aws_iam_instance_profile" "kube-master" {
resource "aws_iam_instance_profile" "kube_control_plane" {
name = "kube_${var.aws_cluster_name}_master_profile"
role = aws_iam_role.kube-master.name
role = aws_iam_role.kube_control_plane.name
}
resource "aws_iam_instance_profile" "kube-worker" {

View File

@ -1,5 +1,5 @@
output "kube-master-profile" {
value = aws_iam_instance_profile.kube-master.name
output "kube_control_plane-profile" {
value = aws_iam_instance_profile.kube_control_plane.name
}
output "kube-worker-profile" {

View File

@ -7,7 +7,7 @@ ${public_ip_address_bastion}
[bastion]
${public_ip_address_bastion}
[kube-master]
[kube_control_plane]
${list_master}
@ -21,7 +21,7 @@ ${list_etcd}
[k8s-cluster:children]
kube-node
kube-master
kube_control_plane
[k8s-cluster:vars]

View File

@ -2,10 +2,10 @@
${connection_strings_master}
${connection_strings_worker}
[kube-master]
[kube_control_plane]
${list_master}
[kube-master:vars]
[kube_control_plane:vars]
supplementary_addresses_in_ssl_keys = [ "${api_lb_ip_address}" ]
[etcd]
@ -15,5 +15,5 @@ ${list_master}
${list_worker}
[k8s-cluster:children]
kube-master
kube_control_plane
kube-node

View File

@ -50,13 +50,13 @@ for name in "${WORKER_NAMES[@]}"; do
done
echo ""
echo "[kube-master]"
echo "[kube_control_plane]"
for name in "${MASTER_NAMES[@]}"; do
echo "${name}"
done
echo ""
echo "[kube-master:vars]"
echo "[kube_control_plane:vars]"
echo "supplementary_addresses_in_ssl_keys = [ '${API_LB}' ]" # Add LB address to API server certificate
echo ""
echo "[etcd]"
@ -72,5 +72,5 @@ done
echo ""
echo "[k8s-cluster:children]"
echo "kube-master"
echo "kube_control_plane"
echo "kube-node"

View File

@ -245,7 +245,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
metadata = {
ssh_user = var.ssh_user
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s-cluster,vault"
depends_on = var.network_id
use_access_ip = var.use_access_ip
}
@ -292,7 +292,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
metadata = {
ssh_user = var.ssh_user
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s-cluster,vault"
depends_on = var.network_id
use_access_ip = var.use_access_ip
}
@ -379,7 +379,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
metadata = {
ssh_user = var.ssh_user
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
depends_on = var.network_id
use_access_ip = var.use_access_ip
}
@ -421,7 +421,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
metadata = {
ssh_user = var.ssh_user
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
depends_on = var.network_id
use_access_ip = var.use_access_ip
}

View File

@ -19,7 +19,7 @@ resource "packet_device" "k8s_master" {
operating_system = var.operating_system
billing_cycle = var.billing_cycle
project_id = var.packet_project_id
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-master", "etcd", "kube-node"]
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube_control_plane", "etcd", "kube-node"]
}
resource "packet_device" "k8s_master_no_etcd" {
@ -32,7 +32,7 @@ resource "packet_device" "k8s_master_no_etcd" {
operating_system = var.operating_system
billing_cycle = var.billing_cycle
project_id = var.packet_project_id
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-master"]
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube_control_plane"]
}
resource "packet_device" "k8s_etcd" {

View File

@ -3,7 +3,7 @@
${connection_strings_master}
${connection_strings_worker}
[kube-master]
[kube_control_plane]
${list_master}
[etcd]
@ -13,5 +13,5 @@ ${list_master}
${list_worker}
[k8s-cluster:children]
kube-master
kube_control_plane
kube-node

View File

@ -3,7 +3,7 @@
${connection_strings_master}
${connection_strings_worker}
[kube-master]
[kube_control_plane]
${list_master}
[etcd]
@ -13,5 +13,5 @@ ${list_master}
${list_worker}
[k8s-cluster:children]
kube-master
kube_control_plane
kube-node

View File

@ -1,30 +1,30 @@
---
- import_tasks: sync_kube_master_certs.yml
when: inventory_hostname in groups['kube-master']
when: inventory_hostname in groups['kube_control_plane']
- import_tasks: sync_kube_node_certs.yml
when: inventory_hostname in groups['k8s-cluster']
# Issue admin certs to kube-master hosts
# Issue admin certs to kube_control_plane hosts
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
vars:
issue_cert_common_name: "admin"
issue_cert_copy_ca: "{{ item == kube_admin_certs_needed|first }}"
issue_cert_file_group: "{{ kube_cert_group }}"
issue_cert_file_owner: kube
issue_cert_hosts: "{{ groups['kube-master'] }}"
issue_cert_hosts: "{{ groups['kube_control_plane'] }}"
issue_cert_path: "{{ item }}"
issue_cert_role: kube-master
issue_cert_role: kube_control_plane
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
with_items: "{{ kube_admin_certs_needed|d([]) }}"
when: inventory_hostname in groups['kube-master']
when: inventory_hostname in groups['kube_control_plane']
- name: gen_certs_vault | Set fact about certificate alt names
set_fact:
kube_cert_alt_names: >-
{{
groups['kube-master'] +
groups['kube_control_plane'] +
['kubernetes.default.svc.'+cluster_name, 'kubernetes.default.svc', 'kubernetes.default', 'kubernetes'] +
['localhost']
}}
@ -36,18 +36,18 @@
when: loadbalancer_apiserver is defined
run_once: true
# Issue master components certs to kube-master hosts
# Issue master components certs to kube_control_plane hosts
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
vars:
issue_cert_common_name: "kubernetes"
issue_cert_alt_names: "{{ kube_cert_alt_names }}"
issue_cert_file_group: "{{ kube_cert_group }}"
issue_cert_file_owner: kube
issue_cert_hosts: "{{ groups['kube-master'] }}"
issue_cert_hosts: "{{ groups['kube_control_plane'] }}"
issue_cert_run_once: true
issue_cert_ip_sans: >-
[
{%- for host in groups['kube-master'] -%}
{%- for host in groups['kube_control_plane'] -%}
"{{ hostvars[host]['ansible_default_ipv4']['address'] }}",
{%- if hostvars[host]['ip'] is defined -%}
"{{ hostvars[host]['ip'] }}",
@ -61,11 +61,11 @@
"127.0.0.1","::1","{{ kube_apiserver_ip }}"
]
issue_cert_path: "{{ item }}"
issue_cert_role: kube-master
issue_cert_role: kube_control_plane
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
with_items: "{{ kube_master_components_certs_needed|d([]) }}"
when: inventory_hostname in groups['kube-master']
when: inventory_hostname in groups['kube_control_plane']
notify: set secret_changed
# Issue node certs to k8s-cluster nodes
@ -100,7 +100,7 @@
with_items: "{{ kube_proxy_certs_needed|d([]) }}"
when: inventory_hostname in groups['k8s-cluster']
# Issue front proxy cert to kube-master hosts
# Issue front proxy cert to kube_control_plane hosts
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
vars:
issue_cert_common_name: "front-proxy-client"
@ -109,10 +109,10 @@
issue_cert_alt_names: "{{ kube_cert_alt_names }}"
issue_cert_file_group: "{{ kube_cert_group }}"
issue_cert_file_owner: kube
issue_cert_hosts: "{{ groups['kube-master'] }}"
issue_cert_hosts: "{{ groups['kube_control_plane'] }}"
issue_cert_ip_sans: >-
[
{%- for host in groups['kube-master'] -%}
{%- for host in groups['kube_control_plane'] -%}
"{{ hostvars[host]['ansible_default_ipv4']['address'] }}",
{%- if hostvars[host]['ip'] is defined -%}
"{{ hostvars[host]['ip'] }}",
@ -130,5 +130,5 @@
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
with_items: "{{ kube_front_proxy_clients_certs_needed|d([]) }}"
when: inventory_hostname in groups['kube-master']
when: inventory_hostname in groups['kube_control_plane']
notify: set secret_changed

View File

@ -29,7 +29,7 @@
sync_file: "{{ item }}"
sync_file_dir: "{{ kube_cert_dir }}"
sync_file_group: "{{ kube_cert_group }}"
sync_file_hosts: "{{ groups['kube-master'] }}"
sync_file_hosts: "{{ groups['kube_control_plane'] }}"
sync_file_is_cert: true
sync_file_owner: kube
with_items: ["apiserver.pem", "kube-scheduler.pem", "kube-controller-manager.pem", "service-account.pem"]
@ -49,7 +49,7 @@
sync_file: front-proxy-ca.pem
sync_file_dir: "{{ kube_cert_dir }}"
sync_file_group: "{{ kube_cert_group }}"
sync_file_hosts: "{{ groups['kube-master'] }}"
sync_file_hosts: "{{ groups['kube_control_plane'] }}"
sync_file_owner: kube
- name: sync_kube_master_certs | Unset sync_file_results after front-proxy-ca.pem
@ -61,7 +61,7 @@
sync_file: "{{ item }}"
sync_file_dir: "{{ kube_cert_dir }}"
sync_file_group: "{{ kube_cert_group }}"
sync_file_hosts: "{{ groups['kube-master'] }}"
sync_file_hosts: "{{ groups['kube_control_plane'] }}"
sync_file_is_cert: true
sync_file_owner: kube
with_items: ["front-proxy-client.pem"]
@ -81,7 +81,7 @@
sync_file: ca.pem
sync_file_dir: "{{ kube_cert_dir }}"
sync_file_group: "{{ kube_cert_group }}"
sync_file_hosts: "{{ groups['kube-master'] }}"
sync_file_hosts: "{{ groups['kube_control_plane'] }}"
sync_file_owner: kube
- name: sync_kube_master_certs | Unset sync_file_results after ca.pem

View File

@ -14,7 +14,7 @@
sync_file_owner: kube
with_items: "{{ kube_node_cert_list|default([]) }}"
- name: sync_kube_node_certs | Set facts for kube-master sync_file results
- name: sync_kube_node_certs | Set facts for kube_control_plane sync_file results
set_fact:
kube_node_certs_needed: "{{ kube_node_certs_needed|default([]) + [item.path] }}"
with_items: "{{ sync_file_results|d([]) }}"

View File

@ -166,16 +166,16 @@ vault_pki_mounts:
description: "Kubernetes Root CA"
cert_dir: "{{ kube_cert_dir }}"
roles:
- name: kube-master
group: kube-master
password: "{{ lookup('password', credentials_dir + '/vault/kube-master.creds length=15') }}"
- name: kube_control_plane
group: kube_control_plane
password: "{{ lookup('password', credentials_dir + '/vault/kube_control_plane.creds length=15') }}"
policy_rules: default
role_options:
allow_any_name: true
enforce_hostnames: false
organization: "system:masters"
- name: front-proxy-client
group: kube-master
group: kube_control_plane
password: "{{ lookup('password', credentials_dir + '/vault/kube-proxy.creds length=15') }}"
policy_rules: default
role_options:

View File

@ -51,7 +51,7 @@
gen_ca_mount_path: "/{{ vault_pki_mounts.vault.name }}"
gen_ca_vault_headers: "{{ vault_headers }}"
gen_ca_vault_options: "{{ vault_ca_options.vault }}"
gen_ca_copy_group: "kube-master"
gen_ca_copy_group: "kube_control_plane"
when: >-
inventory_hostname in groups.vault
and not vault_cluster_is_initialized

View File

@ -21,7 +21,7 @@
vars:
sync_file: "ca.pem"
sync_file_dir: "{{ vault_cert_dir }}"
sync_file_hosts: "{{ groups['kube-master'] }}"
sync_file_hosts: "{{ groups['kube_control_plane'] }}"
sync_file_owner: vault
sync_file_group: root
sync_file_is_cert: false

View File

@ -35,7 +35,7 @@
gen_ca_mount_path: "/{{ vault_pki_mounts.kube.name }}"
gen_ca_vault_headers: "{{ vault_headers }}"
gen_ca_vault_options: "{{ vault_ca_options.kube }}"
gen_ca_copy_group: "kube-master"
gen_ca_copy_group: "kube_control_plane"
when: inventory_hostname in groups.vault
- include_tasks: ../shared/auth_backend.yml

View File

@ -5,7 +5,7 @@
The inventory is composed of 3 groups:
* **kube-node** : list of kubernetes nodes where the pods will run.
* **kube-master** : list of servers where kubernetes master components (apiserver, scheduler, controller) will run.
* **kube_control_plane** : list of servers where kubernetes control plane components (apiserver, scheduler, controller) will run.
* **etcd**: list of servers to compose the etcd server. You should have at least 3 servers for failover purpose.
Note: do not modify the children of _k8s-cluster_, like putting
@ -18,9 +18,9 @@ k8s-cluster ⊂ etcd => kube-node ∩ etcd = etcd
When _kube-node_ contains _etcd_, you define your etcd cluster to be as well schedulable for Kubernetes workloads.
If you want it a standalone, make sure those groups do not intersect.
If you want the server to act both as master and node, the server must be defined
on both groups _kube-master_ and _kube-node_. If you want a standalone and
unschedulable master, the server must be defined only in the _kube-master_ and
If you want the server to act both as control-plane and node, the server must be defined
on both groups _kube_control_plane_ and _kube-node_. If you want a standalone and
unschedulable master, the server must be defined only in the _kube_control_plane_ and
not _kube-node_.
There are also two special groups:
@ -40,7 +40,7 @@ node4 ansible_host=95.54.0.15 ip=10.3.0.4
node5 ansible_host=95.54.0.16 ip=10.3.0.5
node6 ansible_host=95.54.0.17 ip=10.3.0.6
[kube-master]
[kube_control_plane]
node1
node2
@ -58,7 +58,7 @@ node6
[k8s-cluster:children]
kube-node
kube-master
kube_control_plane
```
## Group vars and overriding variables precedence

View File

@ -35,11 +35,11 @@ This will produce an inventory that is passed into Ansible that looks like the f
],
"k8s-cluster": {
"children": [
"kube-master",
"kube_control_plane",
"kube-node"
]
},
"kube-master": [
"kube_control_plane": [
"ip-172-31-3-xxx.us-east-2.compute.internal"
],
"kube-node": [
@ -51,7 +51,7 @@ This will produce an inventory that is passed into Ansible that looks like the f
Guide:
- Create instances in AWS as needed.
- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube-master`, `etcd`, or `kube-node`. You can also share roles like `kube-master, etcd`
- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube_control_plane`, `etcd`, or `kube-node`. You can also share roles like `kube_control_plane, etcd`
- Copy the `kubespray-aws-inventory.py` script from `kubespray/contrib/aws_inventory` to the `kubespray/inventory` directory.
- Set the following AWS credentials and info as environment variables in your terminal:

View File

@ -122,7 +122,7 @@ recommended here:
You need to edit your inventory and add:
* `calico-rr` group with nodes in it. `calico-rr` can be combined with
`kube-node` and/or `kube-master`. `calico-rr` group also must be a child
`kube-node` and/or `kube_control_plane`. `calico-rr` group also must be a child
group of `k8s-cluster` group.
* `cluster_id` by route reflector node/group (see details
[here](https://hub.docker.com/r/calico/routereflector/))
@ -138,7 +138,7 @@ node3 ansible_ssh_host=10.210.1.13 ip=10.210.1.13
node4 ansible_ssh_host=10.210.1.14 ip=10.210.1.14
node5 ansible_ssh_host=10.210.1.15 ip=10.210.1.15
[kube-master]
[kube_control_plane]
node2
node3
@ -155,7 +155,7 @@ node5
[k8s-cluster:children]
kube-node
kube-master
kube_control_plane
calico-rr
[calico-rr]

View File

@ -8,7 +8,7 @@ Kubespray supports several download/upload modes. The default is:
There is also a "pull once, push many" mode as well:
* Setting ``download_run_once: True`` will make kubespray download container images and binaries only once and then push them to the cluster nodes. The default download delegate node is the first `kube-master`.
* Setting ``download_run_once: True`` will make kubespray download container images and binaries only once and then push them to the cluster nodes. The default download delegate node is the first `kube_control_plane`.
* Set ``download_localhost: True`` to make localhost the download delegate. This can be useful if cluster nodes cannot access external addresses. To use this requires that docker is installed and running on the ansible master and that the current user is either in the docker group or can do passwordless sudo, to be able to access docker.
NOTE: When `download_run_once` is true and `download_localhost` is false, all downloads will be done on the delegate node, including downloads for container images that are not required on that node. As a consequence, the storage required on that node will probably be more than if download_run_once was false, because all images will be loaded into the docker instance on that node, instead of just the images required for that node.

View File

@ -76,16 +76,16 @@ var in inventory.
## Connecting to Kubernetes
By default, Kubespray configures kube-master hosts with insecure access to
By default, Kubespray configures kube_control_plane hosts with insecure access to
kube-apiserver via port 8080. A kubeconfig file is not necessary in this case,
because kubectl will use <http://localhost:8080> to connect. The kubeconfig files
generated will point to localhost (on kube-masters) and kube-node hosts will
generated will point to localhost (on kube_control_planes) and kube-node hosts will
connect either to a localhost nginx proxy or to a loadbalancer if configured.
More details on this process are in the [HA guide](/docs/ha-mode.md).
Kubespray permits connecting to the cluster remotely on any IP of any
kube-master host on port 6443 by default. However, this requires
authentication. One can get a kubeconfig from kube-master hosts
kube_control_plane host on port 6443 by default. However, this requires
authentication. One can get a kubeconfig from kube_control_plane hosts
(see [below](#accessing-kubernetes-api)) or connect with a [username and password](/docs/vars.md#user-accounts).
For more information on kubeconfig and accessing a Kubernetes cluster, refer to
@ -119,7 +119,7 @@ kubectl proxy
## Accessing Kubernetes API
The main client of Kubernetes is `kubectl`. It is installed on each kube-master
The main client of Kubernetes is `kubectl`. It is installed on each kube_control_plane
host and can optionally be configured on your ansible host by setting
`kubectl_localhost: true` and `kubeconfig_localhost: true` in the configuration:

View File

@ -32,7 +32,7 @@ If you choose to NOT use the local internal loadbalancer, you will need to
configure your own loadbalancer to achieve HA. Note that deploying a
loadbalancer is up to a user and is not covered by ansible roles in Kubespray.
By default, it only configures a non-HA endpoint, which points to the
`access_ip` or IP address of the first server node in the `kube-master` group.
`access_ip` or IP address of the first server node in the `kube_control_plane` group.
It can also configure clients to use endpoints for a given loadbalancer type.
The following diagram shows how traffic to the apiserver is directed.
@ -102,16 +102,16 @@ exclusive to `loadbalancer_apiserver_localhost`.
Access API endpoints are evaluated automatically, as the following:
| Endpoint type | kube-master | non-master | external |
|------------------------------|------------------|-------------------------|-----------------------|
| Local LB (default) | `https://bip:sp` | `https://lc:nsp` | `https://m[0].aip:sp` |
| Local LB + Unmanaged here LB | `https://bip:sp` | `https://lc:nsp` | `https://ext` |
| External LB, no internal | `https://bip:sp` | `<https://lb:lp>` | `https://lb:lp` |
| No ext/int LB | `https://bip:sp` | `<https://m[0].aip:sp>` | `https://m[0].aip:sp` |
| Endpoint type | kube_control_plane | non-master | external |
|------------------------------|--------------------|-------------------------|-----------------------|
| Local LB (default) | `https://bip:sp` | `https://lc:nsp` | `https://m[0].aip:sp` |
| Local LB + Unmanaged here LB | `https://bip:sp` | `https://lc:nsp` | `https://ext` |
| External LB, no internal | `https://bip:sp` | `<https://lb:lp>` | `https://lb:lp` |
| No ext/int LB | `https://bip:sp` | `<https://m[0].aip:sp>` | `https://m[0].aip:sp` |
Where:
* `m[0]` - the first node in the `kube-master` group;
* `m[0]` - the first node in the `kube_control_plane` group;
* `lb` - LB FQDN, `apiserver_loadbalancer_domain_name`;
* `ext` - Externally load balanced VIP:port and FQDN, not managed by Kubespray;
* `lc` - localhost;

View File

@ -62,7 +62,7 @@ You could rename *all.yml* config to something else, i.e. *kubespray.yml* and cr
kubemaster
kubemaster-ha
[kube-master:children]
[kube_control_plane:children]
kubemaster
kubemaster-ha

View File

@ -39,7 +39,7 @@ For a large scaled deployments, consider the following configuration changes:
* Add calico-rr nodes if you are deploying with Calico or Canal. Nodes recover
from host/network interruption much quicker with calico-rr. Note that
calico-rr role must be on a host without kube-master or kube-node role (but
calico-rr role must be on a host without kube_control_plane or kube-node role (but
etcd role is okay).
* Check out the

View File

@ -2,9 +2,9 @@
Modified from [comments in #3471](https://github.com/kubernetes-sigs/kubespray/issues/3471#issuecomment-530036084)
## Limitation: Removal of first kube-master and etcd-master
## Limitation: Removal of first kube_control_plane and etcd-master
Currently you can't remove the first node in your kube-master and etcd-master list. If you still want to remove this node you have to:
Currently you can't remove the first node in your kube_control_plane and etcd-master list. If you still want to remove this node you have to:
### 1) Change order of current masters
@ -12,7 +12,7 @@ Modify the order of your master list by pushing your first entry to any other po
```yaml
children:
kube-master:
kube_control_plane:
hosts:
node-1:
node-2:
@ -33,7 +33,7 @@ change your inventory to:
```yaml
children:
kube-master:
kube_control_plane:
hosts:
node-2:
node-3:
@ -103,10 +103,10 @@ You need to make sure there are always an odd number of etcd nodes in the cluste
### 1) Add the new node running cluster.yml
Update the inventory and run `cluster.yml` passing `--limit=etcd,kube-master -e ignore_assert_errors=yes`.
Update the inventory and run `cluster.yml` passing `--limit=etcd,kube_control_plane -e ignore_assert_errors=yes`.
If the node you want to add as an etcd node is already a worker or master node in your cluster, you have to remove him first using `remove-node.yml`.
Run `upgrade-cluster.yml` also passing `--limit=etcd,kube-master -e ignore_assert_errors=yes`. This is necessary to update all etcd configuration in the cluster.
Run `upgrade-cluster.yml` also passing `--limit=etcd,kube_control_plane -e ignore_assert_errors=yes`. This is necessary to update all etcd configuration in the cluster.
At this point, you will have an even number of nodes.
Everything should still be working, and you should only have problems if the cluster decides to elect a new etcd leader before you remove a node.

View File

@ -5,8 +5,8 @@ To recover from broken nodes in the control plane use the "recover\-control\-pla
* Backup what you can
* Provision new nodes to replace the broken ones
* Place the surviving nodes of the control plane first in the "etcd" and "kube-master" groups
* Add the new nodes below the surviving control plane nodes in the "etcd" and "kube-master" groups
* Place the surviving nodes of the control plane first in the "etcd" and "kube\_control\_plane" groups
* Add the new nodes below the surviving control plane nodes in the "etcd" and "kube\_control\_plane" groups
Examples of what broken means in this context:
@ -20,9 +20,9 @@ __Note that you need at least one functional node to be able to recover using th
## Runbook
* Move any broken etcd nodes into the "broken\_etcd" group, make sure the "etcd\_member\_name" variable is set.
* Move any broken master nodes into the "broken\_kube-master" group.
* Move any broken master nodes into the "broken\_kube\_control\_plane" group.
Then run the playbook with ```--limit etcd,kube-master``` and increase the number of ETCD retries by setting ```-e etcd_retries=10``` or something even larger. The amount of retries required is difficult to predict.
Then run the playbook with ```--limit etcd,kube_control_plane``` and increase the number of ETCD retries by setting ```-e etcd_retries=10``` or something even larger. The amount of retries required is difficult to predict.
When finished you should have a fully working control plane again.

View File

@ -3,10 +3,10 @@
There are four node layout types: `default`, `separate`, `ha`, and `scale`.
`default` is a non-HA two nodes setup with one separate `kube-node`
and the `etcd` group merged with the `kube-master`.
and the `etcd` group merged with the `kube_control_plane`.
`separate` layout is when there is only node of each type, which includes
a kube-master, kube-node, and etcd cluster member.
a kube_control_plane, kube-node, and etcd cluster member.
`ha` layout consists of two etcd nodes, two masters and a single worker node,
with role intersection.

View File

@ -41,7 +41,7 @@ The var ```-e upgrade_cluster_setup=true``` is needed to be set in order to migr
Kubespray also supports cordon, drain and uncordoning of nodes when performing
a cluster upgrade. There is a separate playbook used for this purpose. It is
important to note that upgrade-cluster.yml can only be used for upgrading an
existing cluster. That means there must be at least 1 kube-master already
existing cluster. That means there must be at least 1 kube_control_plane already
deployed.
```ShellSession

View File

@ -36,7 +36,7 @@ Some variables of note include:
* *ansible_default_ipv4.address* - Not Kubespray-specific, but it is used if ip
and access_ip are undefined
* *loadbalancer_apiserver* - If defined, all hosts will connect to this
address instead of localhost for kube-masters and kube-master[0] for
address instead of localhost for kube_control_planes and kube_control_plane[0] for
kube-nodes. See more details in the
[HA guide](/docs/ha-mode.md).
* *loadbalancer_apiserver_localhost* - makes all hosts to connect to

View File

@ -1,5 +1,5 @@
---
- hosts: kube-node:kube-master
- hosts: kube-node:kube_control_plane
tasks:
- name: Remove old cloud provider config
file:
@ -7,7 +7,7 @@
state: absent
with_items:
- /etc/kubernetes/cloud_config
- hosts: kube-master[0]
- hosts: kube_control_plane[0]
tasks:
- name: Include kubespray-default variables
include_vars: ../roles/kubespray-defaults/defaults/main.yaml

View File

@ -34,7 +34,7 @@
- { role: kubernetes/preinstall, tags: preinstall }
- name: Handle upgrades to master components first to maintain backwards compat.
hosts: kube-master
hosts: kube_control_plane
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: 1
roles:
@ -47,7 +47,7 @@
- { role: upgrade/post-upgrade, tags: post-upgrade }
- name: Finally handle worker upgrades, based on given batch size
hosts: kube-node:!kube-master
hosts: kube-node:!kube_control_plane
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: "{{ serial | default('20%') }}"
roles:

View File

@ -1,6 +1,6 @@
node1 ansible_connection=local local_release_dir={{ansible_env.HOME}}/releases
[kube-master]
[kube_control_plane]
node1
[etcd]
@ -11,5 +11,5 @@ node1
[k8s-cluster:children]
kube-node
kube-master
kube_control_plane
calico-rr

View File

@ -13,7 +13,7 @@
# [bastion]
# bastion ansible_host=x.x.x.x ansible_user=some_user
[kube-master]
[kube_control_plane]
# node1
# node2
# node3
@ -33,6 +33,6 @@
[calico-rr]
[k8s-cluster:children]
kube-master
kube_control_plane
kube-node
calico-rr

View File

@ -2,6 +2,15 @@
- name: Check ansible version
import_playbook: ansible_version.yml
- name: Add kube-master nodes to kube_control_plane
# This is for old inventory which contains kube-master instead of kube_control_plane
hosts: kube-master
gather_facts: false
tasks:
- name: add nodes to kube_control_plane group
group_by:
key: 'kube_control_plane'
- hosts: bastion[0]
gather_facts: False
environment: "{{ proxy_disable_env }}"
@ -15,7 +24,7 @@
- { role: kubespray-defaults}
- { role: recover_control_plane/etcd }
- hosts: "{{ groups['kube-master'] | first }}"
- hosts: "{{ groups['kube_control_plane'] | first }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults}
@ -23,7 +32,7 @@
- include: cluster.yml
- hosts: "{{ groups['kube-master'] }}"
- hosts: "{{ groups['kube_control_plane'] }}"
environment: "{{ proxy_disable_env }}"
roles:
- { role: kubespray-defaults}

View File

@ -2,6 +2,15 @@
- name: Check ansible version
import_playbook: ansible_version.yml
- name: Add kube-master nodes to kube_control_plane
# This is for old inventory which contains kube-master instead of kube_control_plane
hosts: kube-master
gather_facts: false
tasks:
- name: add nodes to kube_control_plane group
group_by:
key: 'kube_control_plane'
- hosts: "{{ node | default('etcd:k8s-cluster:calico-rr') }}"
gather_facts: no
environment: "{{ proxy_disable_env }}"
@ -17,7 +26,7 @@
msg: "Delete nodes confirmation failed"
when: delete_nodes_confirmation != "yes"
- hosts: kube-master[0]
- hosts: kube_control_plane[0]
gather_facts: no
environment: "{{ proxy_disable_env }}"
roles:
@ -35,7 +44,7 @@
- { role: reset, tags: reset, when: reset_nodes|default(True)|bool }
# Currently cannot remove first master or etcd
- hosts: "{{ node | default('kube-master[1:]:etcd[1:]') }}"
- hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}"
gather_facts: no
environment: "{{ proxy_disable_env }}"
roles:

View File

@ -2,6 +2,15 @@
- name: Check ansible version
import_playbook: ansible_version.yml
- name: Add kube-master nodes to kube_control_plane
# This is for old inventory which contains kube-master instead of kube_control_plane
hosts: kube-master
gather_facts: false
tasks:
- name: add nodes to kube_control_plane group
group_by:
key: 'kube_control_plane'
- hosts: bastion[0]
gather_facts: False
environment: "{{ proxy_disable_env }}"

View File

@ -12,7 +12,7 @@ platforms:
cpus: 2
memory: 1024
groups:
- kube-master
- kube_control_plane
provisioner:
name: ansible
env:

View File

@ -12,25 +12,25 @@ platforms:
cpus: 2
memory: 1024
groups:
- kube-master
- kube_control_plane
- name: centos7
box: centos/7
cpus: 2
memory: 1024
groups:
- kube-master
- kube_control_plane
- name: centos8
box: centos/8
cpus: 2
memory: 1024
groups:
- kube-master
- kube_control_plane
- name: fedora
box: fedora/33-cloud-base
cpus: 2
memory: 1024
groups:
- kube-master
- kube_control_plane
provisioner:
name: ansible
env:

View File

@ -15,14 +15,14 @@ platforms:
memory: 1024
nested: true
groups:
- kube-master
- kube_control_plane
- name: ubuntu20
box: generic/ubuntu2004
cpus: 1
memory: 1024
nested: true
groups:
- kube-master
- kube_control_plane
provisioner:
name: ansible
env:

View File

@ -30,7 +30,7 @@ download_container: true
# if this is set to true, uses the localhost for download_run_once mode
# (requires docker and sudo to access docker). You may want this option for
# local caching of docker images or for Flatcar Container Linux by Kinvolk cluster nodes.
# Otherwise, uses the first node in the kube-master group to store images
# Otherwise, uses the first node in the kube_control_plane group to store images
# in the download_run_once mode.
download_localhost: false
@ -42,8 +42,8 @@ download_always_pull: false
# SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
download_validate_certs: true
# Use the first kube-master if download_localhost is not set
download_delegate: "{% if download_localhost %}localhost{% else %}{{ groups['kube-master'][0] }}{% endif %}"
# Use the first kube_control_plane if download_localhost is not set
download_delegate: "{% if download_localhost %}localhost{% else %}{{ groups['kube_control_plane'][0] }}{% endif %}"
# Arch of Docker images and needed packages
image_arch: "{{host_architecture | default('amd64')}}"
@ -733,7 +733,7 @@ downloads:
owner: "root"
mode: "0755"
groups:
- kube-master
- kube_control_plane
crictl:
file: true
@ -883,7 +883,7 @@ downloads:
owner: "root"
mode: "0755"
groups:
- kube-master
- kube_control_plane
weave_kube:
enabled: "{{ kube_network_plugin == 'weave' }}"
@ -973,7 +973,7 @@ downloads:
tag: "{{ coredns_image_tag }}"
sha256: "{{ coredns_digest_checksum|default(None) }}"
groups:
- kube-master
- kube_control_plane
nodelocaldns:
enabled: "{{ enable_nodelocaldns }}"
@ -991,7 +991,7 @@ downloads:
tag: "{{ dnsautoscaler_image_tag }}"
sha256: "{{ dnsautoscaler_digest_checksum|default(None) }}"
groups:
- kube-master
- kube_control_plane
testbox:
enabled: false
@ -1011,7 +1011,7 @@ downloads:
owner: "root"
mode: "0755"
groups:
- kube-master
- kube_control_plane
registry:
enabled: "{{ registry_enabled }}"
@ -1038,7 +1038,7 @@ downloads:
tag: "{{ metrics_server_image_tag }}"
sha256: "{{ metrics_server_digest_checksum|default(None) }}"
groups:
- kube-master
- kube_control_plane
addon_resizer:
# Currently addon_resizer is only used by metrics server
@ -1048,7 +1048,7 @@ downloads:
tag: "{{ addon_resizer_image_tag }}"
sha256: "{{ addon_resizer_digest_checksum|default(None) }}"
groups:
- kube-master
- kube_control_plane
local_volume_provisioner:
enabled: "{{ local_volume_provisioner_enabled }}"
@ -1219,7 +1219,7 @@ downloads:
tag: "{{ dashboard_image_tag }}"
sha256: "{{ dashboard_digest_checksum|default(None) }}"
groups:
- kube-master
- kube_control_plane
dashboard_metrics_scrapper:
enabled: "{{ dashboard_enabled }}"
@ -1228,7 +1228,7 @@ downloads:
tag: "{{ dashboard_metrics_scraper_tag }}"
sha256: "{{ dashboard_digest_checksum|default(None) }}"
groups:
- kube-master
- kube_control_plane
download_defaults:
container: false

View File

@ -18,7 +18,7 @@
include_tasks: prep_kubeadm_images.yml
when:
- not skip_downloads|default(false)
- inventory_hostname in groups['kube-master']
- inventory_hostname in groups['kube_control_plane']
tags:
- download
- upload

View File

@ -6,7 +6,7 @@
ignore_errors: true
when:
- dns_mode in ['coredns', 'coredns_dual']
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Delete kubeadm CoreDNS
kube:
@ -17,7 +17,7 @@
state: absent
when:
- dns_mode in ['coredns', 'coredns_dual']
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- createdby_annotation.stdout != 'kubespray'
- name: Kubernetes Apps | Delete kubeadm Kube-DNS service
@ -29,4 +29,4 @@
state: absent
when:
- dns_mode in ['coredns', 'coredns_dual']
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]

View File

@ -20,7 +20,7 @@
clusterIP: "{{ skydns_server }}"
when:
- dns_mode in ['coredns', 'coredns_dual']
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- coredns
@ -38,6 +38,6 @@
coredns_ordinal_suffix: "-secondary"
when:
- dns_mode == 'coredns_dual'
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- coredns

View File

@ -6,7 +6,7 @@
with_items:
- { file: dashboard.yml, type: deploy, name: kubernetes-dashboard }
register: manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Start dashboard
kube:
@ -17,4 +17,4 @@
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ manifests.results }}"
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]

View File

@ -9,12 +9,12 @@
until: result.status == 200
retries: 20
delay: 1
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Cleanup DNS
import_tasks: cleanup_dns.yml
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
- coredns
@ -24,7 +24,7 @@
import_tasks: "coredns.yml"
when:
- dns_mode in ['coredns', 'coredns_dual']
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- coredns
@ -32,7 +32,7 @@
import_tasks: "nodelocaldns.yml"
when:
- enable_nodelocaldns
- inventory_hostname == groups['kube-master'] | first
- inventory_hostname == groups['kube_control_plane'] | first
tags:
- nodelocaldns
@ -50,7 +50,7 @@
- "{{ nodelocaldns_manifests.results | default({}) }}"
when:
- dns_mode != 'none'
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
register: resource_result
until: resource_result is succeeded

View File

@ -28,7 +28,7 @@
with_items: "{{ netchecker_templates }}"
register: manifests
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Start Netchecker Resources
kube:
@ -39,4 +39,4 @@
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ manifests.results }}"
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped

View File

@ -10,7 +10,7 @@
secondaryclusterIP: "{{ skydns_server_secondary }}"
when:
- enable_nodelocaldns
- inventory_hostname == groups['kube-master'] | first
- inventory_hostname == groups['kube_control_plane'] | first
tags:
- nodelocaldns
- coredns
@ -39,7 +39,7 @@
{%- endif -%}
when:
- enable_nodelocaldns
- inventory_hostname == groups['kube-master'] | first
- inventory_hostname == groups['kube_control_plane'] | first
tags:
- nodelocaldns
- coredns

View File

@ -7,7 +7,7 @@
template:
src: controller-manager-config.yml.j2
dest: "{{ kube_config_dir }}/controller-manager-config.yml"
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: oci
- name: "OCI Cloud Controller | Slurp Configuration"
@ -18,14 +18,14 @@
- name: "OCI Cloud Controller | Encode Configuration"
set_fact:
controller_manager_config_base64: "{{ controller_manager_config.content }}"
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: oci
- name: "OCI Cloud Controller | Generate Manifests"
template:
src: oci-cloud-provider.yml.j2
dest: "{{ kube_config_dir }}/oci-cloud-provider.yml"
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: oci
- name: "OCI Cloud Controller | Apply Manifests"
@ -33,5 +33,5 @@
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/oci-cloud-provider.yml"
state: latest
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: oci

View File

@ -9,14 +9,14 @@
until: result.status == 200
retries: 10
delay: 6
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Check AppArmor status
command: which apparmor_parser
register: apparmor_status
when:
- podsecuritypolicy_enabled
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
failed_when: false
- name: Kubernetes Apps | Set apparmor_enabled
@ -24,7 +24,7 @@
apparmor_enabled: "{{ apparmor_status.rc == 0 }}"
when:
- podsecuritypolicy_enabled
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Render templates for PodSecurityPolicy
template:
@ -37,7 +37,7 @@
- {file: psp-crb.yml, type: rolebinding, name: psp-crb}
when:
- podsecuritypolicy_enabled
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Add policies, roles, bindings for PodSecurityPolicy
kube:
@ -52,7 +52,7 @@
delay: 6
with_items: "{{ psp_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"
@ -64,7 +64,7 @@
register: node_crb_manifest
when:
- rbac_enabled
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Apply workaround to allow all nodes with cert O=system:nodes to register
kube:
@ -80,7 +80,7 @@
when:
- rbac_enabled
- node_crb_manifest.changed
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Add webhook ClusterRole that grants access to proxy, stats, log, spec, and metrics on a kubelet
template:
@ -90,7 +90,7 @@
when:
- rbac_enabled
- kubelet_authorization_mode_webhook
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags: node-webhook
- name: Apply webhook ClusterRole
@ -104,7 +104,7 @@
- rbac_enabled
- kubelet_authorization_mode_webhook
- node_webhook_cr_manifest.changed
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags: node-webhook
- name: Kubernetes Apps | Add ClusterRoleBinding for system:nodes to webhook ClusterRole
@ -115,7 +115,7 @@
when:
- rbac_enabled
- kubelet_authorization_mode_webhook
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags: node-webhook
- name: Grant system:nodes the webhook ClusterRole
@ -129,7 +129,7 @@
- rbac_enabled
- kubelet_authorization_mode_webhook
- node_webhook_crb_manifest.changed
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags: node-webhook
- include_tasks: oci.yml
@ -140,7 +140,7 @@
- name: PriorityClass | Copy k8s-cluster-critical-pc.yml file
copy: src=k8s-cluster-critical-pc.yml dest={{ kube_config_dir }}/k8s-cluster-critical-pc.yml
when: inventory_hostname == groups['kube-master']|last
when: inventory_hostname == groups['kube_control_plane']|last
- name: PriorityClass | Create k8s-cluster-critical
kube:
@ -149,4 +149,4 @@
resource: "PriorityClass"
filename: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml"
state: latest
when: inventory_hostname == groups['kube-master']|last
when: inventory_hostname == groups['kube_control_plane']|last

View File

@ -6,7 +6,7 @@
when:
- cloud_provider is defined
- cloud_provider == 'oci'
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Apply OCI RBAC
kube:
@ -15,4 +15,4 @@
when:
- cloud_provider is defined
- cloud_provider == 'oci'
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]

View File

@ -38,7 +38,7 @@
- { name: k8s-device-plugin-nvidia-daemonset, file: k8s-device-plugin-nvidia-daemonset.yml, type: daemonset }
register: container_engine_accelerator_manifests
when:
- inventory_hostname == groups['kube-master'][0] and nvidia_driver_install_container
- inventory_hostname == groups['kube_control_plane'][0] and nvidia_driver_install_container
- name: Container Engine Acceleration Nvidia GPU | Apply manifests for nvidia accelerators
kube:
@ -51,4 +51,4 @@
with_items:
- "{{ container_engine_accelerator_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0] and nvidia_driver_install_container and nvidia_driver_install_supported
- inventory_hostname == groups['kube_control_plane'][0] and nvidia_driver_install_container and nvidia_driver_install_supported

View File

@ -6,7 +6,7 @@
dest: "{{ kube_config_dir }}/runtimeclass-crun.yml"
mode: "0664"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: crun | Apply manifests
kube:
@ -16,4 +16,4 @@
filename: "{{ kube_config_dir }}/runtimeclass-crun.yml"
state: "latest"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]

View File

@ -20,7 +20,7 @@
with_items: "{{ kata_containers_templates }}"
register: kata_containers_manifests
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kata Containers | Apply manifests
kube:
@ -31,4 +31,4 @@
state: "latest"
with_items: "{{ kata_containers_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]

View File

@ -9,7 +9,7 @@
- {name: aws-ebs-csi-controllerservice, file: aws-ebs-csi-controllerservice.yml}
- {name: aws-ebs-csi-nodeservice, file: aws-ebs-csi-nodeservice.yml}
register: aws_csi_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: aws-ebs-csi-driver
- name: AWS CSI Driver | Apply Manifests
@ -20,7 +20,7 @@
with_items:
- "{{ aws_csi_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"

View File

@ -8,14 +8,14 @@
dest: "{{ kube_config_dir }}/azure_csi_cloud_config"
group: "{{ kube_cert_group }}"
mode: 0640
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: azure-csi-driver
- name: Azure CSI Driver | Get base64 cloud-config
slurp:
src: "{{ kube_config_dir }}/azure_csi_cloud_config"
register: cloud_config_secret
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: azure-csi-driver
- name: Azure CSI Driver | Generate Manifests
@ -30,7 +30,7 @@
- {name: azure-csi-azuredisk-node, file: azure-csi-azuredisk-node.yml}
- {name: azure-csi-node-info-crd.yml.j2, file: azure-csi-node-info-crd.yml}
register: azure_csi_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: azure-csi-driver
- name: Azure CSI Driver | Apply Manifests
@ -41,7 +41,7 @@
with_items:
- "{{ azure_csi_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"

View File

@ -20,14 +20,14 @@
dest: "{{ kube_config_dir }}/cinder_cloud_config"
group: "{{ kube_cert_group }}"
mode: 0640
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: cinder-csi-driver
- name: Cinder CSI Driver | Get base64 cloud-config
slurp:
src: "{{ kube_config_dir }}/cinder_cloud_config"
register: cloud_config_secret
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: cinder-csi-driver
- name: Cinder CSI Driver | Generate Manifests
@ -43,7 +43,7 @@
- {name: cinder-csi-nodeplugin, file: cinder-csi-nodeplugin.yml}
- {name: cinder-csi-poddisruptionbudget, file: cinder-csi-poddisruptionbudget.yml}
register: cinder_csi_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: cinder-csi-driver
- name: Cinder CSI Driver | Apply Manifests
@ -54,7 +54,7 @@
with_items:
- "{{ cinder_csi_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"

View File

@ -8,7 +8,7 @@
- {name: volumesnapshotcontents, file: volumesnapshotcontents.yml}
- {name: volumesnapshots, file: volumesnapshots.yml}
register: csi_crd_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: csi-driver
- name: CSI CRD | Apply Manifests
@ -20,7 +20,7 @@
with_items:
- "{{ csi_crd_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"

View File

@ -11,14 +11,14 @@
dest: "{{ kube_config_dir }}/cloud-sa.json"
group: "{{ kube_cert_group }}"
mode: 0640
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: gcp-pd-csi-driver
- name: GCP PD CSI Driver | Get base64 cloud-sa.json
slurp:
src: "{{ kube_config_dir }}/cloud-sa.json"
register: gcp_cred_secret
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: gcp-pd-csi-driver
- name: GCP PD CSI Driver | Generate Manifests
@ -31,7 +31,7 @@
- {name: gcp-pd-csi-controller, file: gcp-pd-csi-controller.yml}
- {name: gcp-pd-csi-node, file: gcp-pd-csi-node.yml}
register: gcp_pd_csi_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: gcp-pd-csi-driver
- name: GCP PD CSI Driver | Apply Manifests
@ -42,7 +42,7 @@
with_items:
- "{{ gcp_pd_csi_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"

View File

@ -9,7 +9,7 @@
mode: 0640
with_items:
- vsphere-csi-cloud-config
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: vsphere-csi-driver
- name: vSphere CSI Driver | Generate Manifests
@ -21,13 +21,13 @@
- vsphere-csi-controller-ss.yml
- vsphere-csi-node.yml
register: vsphere_csi_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: vsphere-csi-driver
- name: vSphere CSI Driver | Generate a CSI secret manifest
command: "{{ bin_dir }}/kubectl create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n kube-system --dry-run --save-config -o yaml"
register: vsphere_csi_secret_manifest
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
no_log: true
tags: vsphere-csi-driver
@ -35,7 +35,7 @@
command:
cmd: "{{ bin_dir }}/kubectl apply -f -"
stdin: "{{ vsphere_csi_secret_manifest.stdout }}"
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
no_log: true
tags: vsphere-csi-driver
@ -47,7 +47,7 @@
with_items:
- "{{ vsphere_csi_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item }}"

View File

@ -6,7 +6,7 @@ dependencies:
- cloud_provider == "external"
- external_cloud_provider is defined
- external_cloud_provider == "openstack"
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- external-cloud-controller
- external-openstack
@ -16,7 +16,7 @@ dependencies:
- cloud_provider == "external"
- external_cloud_provider is defined
- external_cloud_provider == "vsphere"
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- external-cloud-controller
- external-vsphere

View File

@ -20,14 +20,14 @@
dest: "{{ kube_config_dir }}/external_openstack_cloud_config"
group: "{{ kube_cert_group }}"
mode: 0640
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: external-openstack
- name: External OpenStack Cloud Controller | Get base64 cloud-config
slurp:
src: "{{ kube_config_dir }}/external_openstack_cloud_config"
register: external_openstack_cloud_config_secret
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: external-openstack
- name: External OpenStack Cloud Controller | Generate Manifests
@ -42,7 +42,7 @@
- {name: external-openstack-cloud-controller-manager-role-bindings, file: external-openstack-cloud-controller-manager-role-bindings.yml}
- {name: external-openstack-cloud-controller-manager-ds, file: external-openstack-cloud-controller-manager-ds.yml}
register: external_openstack_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: external-openstack
- name: External OpenStack Cloud Controller | Apply Manifests
@ -53,7 +53,7 @@
with_items:
- "{{ external_openstack_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item.file }}"

View File

@ -9,7 +9,7 @@
mode: 0640
with_items:
- external-vsphere-cpi-cloud-config
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: external-vsphere
- name: External vSphere Cloud Controller | Generate Manifests
@ -22,20 +22,20 @@
- external-vsphere-cloud-controller-manager-role-bindings.yml
- external-vsphere-cloud-controller-manager-ds.yml
register: external_vsphere_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: external-vsphere
- name: External vSphere Cloud Provider Interface | Create a CPI configMap manifest
command: "{{ bin_dir }}/kubectl create configmap cloud-config --from-file=vsphere.conf={{ kube_config_dir }}/external-vsphere-cpi-cloud-config -n kube-system --dry-run --save-config -o yaml"
register: external_vsphere_configmap_manifest
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: external-vsphere
- name: External vSphere Cloud Provider Interface | Apply a CPI configMap manifest
command:
cmd: "{{ bin_dir }}/kubectl apply -f -"
stdin: "{{ external_vsphere_configmap_manifest.stdout }}"
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags: external-vsphere
- name: External vSphere Cloud Controller | Apply Manifests
@ -46,7 +46,7 @@
with_items:
- "{{ external_vsphere_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- not item is skipped
loop_control:
label: "{{ item.item }}"

View File

@ -5,7 +5,7 @@
path: "{{ kube_config_dir }}/addons/cephfs_provisioner"
state: absent
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
@ -14,7 +14,7 @@
{{ bin_dir }}/kubectl delete namespace {{ cephfs_provisioner_namespace }}
ignore_errors: yes
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
@ -23,7 +23,7 @@
{{ bin_dir }}/kubectl delete storageclass {{ cephfs_provisioner_storage_class }}
ignore_errors: yes
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
@ -35,7 +35,7 @@
group: root
mode: 0755
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: CephFS Provisioner | Templates list
set_fact:
@ -65,7 +65,7 @@
dest: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.file }}"
with_items: "{{ cephfs_provisioner_templates }}"
register: cephfs_provisioner_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
- name: CephFS Provisioner | Apply manifests
kube:
@ -76,4 +76,4 @@
filename: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.item.file }}"
state: "latest"
with_items: "{{ cephfs_provisioner_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]

View File

@ -7,7 +7,7 @@
group: root
mode: 0755
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Local Path Provisioner | Create claim root dir
file:
@ -42,7 +42,7 @@
dest: "{{ kube_config_dir }}/addons/local_path_provisioner/{{ item.file }}"
with_items: "{{ local_path_provisioner_templates }}"
register: local_path_provisioner_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Local Path Provisioner | Apply manifests
kube:
@ -53,4 +53,4 @@
filename: "{{ kube_config_dir }}/addons/local_path_provisioner/{{ item.item.file }}"
state: "latest"
with_items: "{{ local_path_provisioner_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]

View File

@ -42,7 +42,7 @@
dest: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.file }}"
with_items: "{{ local_volume_provisioner_templates }}"
register: local_volume_provisioner_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Local Volume Provisioner | Apply manifests
kube:
@ -53,6 +53,6 @@
filename: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.item.file }}"
state: "latest"
with_items: "{{ local_volume_provisioner_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
loop_control:
label: "{{ item.item.file }}"

View File

@ -3,7 +3,7 @@ dependencies:
- role: kubernetes-apps/external_provisioner/local_volume_provisioner
when:
- local_volume_provisioner_enabled
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- apps
- local-volume-provisioner

View File

@ -5,7 +5,7 @@
path: "{{ kube_config_dir }}/addons/rbd_provisioner"
state: absent
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
@ -14,7 +14,7 @@
{{ bin_dir }}/kubectl delete namespace {{ rbd_provisioner_namespace }}
ignore_errors: yes
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
@ -23,7 +23,7 @@
{{ bin_dir }}/kubectl delete storageclass {{ rbd_provisioner_storage_class }}
ignore_errors: yes
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
@ -35,7 +35,7 @@
group: root
mode: 0755
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: RBD Provisioner | Templates list
set_fact:
@ -65,7 +65,7 @@
dest: "{{ kube_config_dir }}/addons/rbd_provisioner/{{ item.file }}"
with_items: "{{ rbd_provisioner_templates }}"
register: rbd_provisioner_manifests
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
- name: RBD Provisioner | Apply manifests
kube:
@ -76,4 +76,4 @@
filename: "{{ kube_config_dir }}/addons/rbd_provisioner/{{ item.item.file }}"
state: "latest"
with_items: "{{ rbd_provisioner_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]

View File

@ -20,7 +20,7 @@
- { name: alb-ingress-deploy, file: alb-ingress-deploy.yml, type: deploy }
register: alb_ingress_manifests
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: ALB Ingress Controller | Apply manifests
kube:
@ -32,4 +32,4 @@
state: "latest"
with_items: "{{ alb_ingress_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]

View File

@ -8,7 +8,7 @@
group: root
mode: 0755
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Ambassador | Templates list
set_fact:
@ -29,7 +29,7 @@
loop: "{{ ingress_ambassador_templates }}"
register: ingress_ambassador_manifests
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Ambassador | Apply manifests
kube:
@ -41,7 +41,7 @@
state: "latest"
loop: "{{ ingress_ambassador_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
# load the AmbassadorInstallation _after_ the CustomResourceDefinition has been loaded
@ -57,7 +57,7 @@
loop: "{{ ingress_ambassador_cr_templates }}"
register: ingress_ambassador_cr_manifests
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Ambassador | Apply AmbassadorInstallation
kube:
@ -69,4 +69,4 @@
state: "latest"
loop: "{{ ingress_ambassador_cr_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]

View File

@ -5,7 +5,7 @@
path: "{{ kube_config_dir }}/addons/cert_manager"
state: absent
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
@ -14,7 +14,7 @@
{{ bin_dir }}/kubectl delete namespace {{ cert_manager_namespace }}
ignore_errors: yes
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
@ -26,7 +26,7 @@
group: root
mode: 0755
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Cert Manager | Templates list
set_fact:
@ -54,7 +54,7 @@
with_items: "{{ cert_manager_templates }}"
register: cert_manager_manifests
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Cert Manager | Apply manifests
kube:
@ -65,12 +65,12 @@
state: "latest"
with_items: "{{ cert_manager_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Cert Manager | Wait for Webhook pods become ready
command: "{{ bin_dir }}/kubectl wait po --namespace={{ cert_manager_namespace }} --selector app=webhook --for=condition=Ready --timeout=600s"
register: cert_manager_webhook_pods_ready
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Cert Manager | Create ClusterIssuer manifest
template:
@ -78,7 +78,7 @@
dest: "{{ kube_config_dir }}/addons/cert_manager/clusterissuer-cert-manager.yml"
register: cert_manager_clusterissuer_manifest
when:
- inventory_hostname == groups['kube-master'][0] and cert_manager_webhook_pods_ready is succeeded
- inventory_hostname == groups['kube_control_plane'][0] and cert_manager_webhook_pods_ready is succeeded
- name: Cert Manager | Apply ClusterIssuer manifest
kube:
@ -86,4 +86,4 @@
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/addons/cert_manager/clusterissuer-cert-manager.yml"
state: "latest"
when: inventory_hostname == groups['kube-master'][0] and cert_manager_clusterissuer_manifest is succeeded
when: inventory_hostname == groups['kube_control_plane'][0] and cert_manager_clusterissuer_manifest is succeeded

View File

@ -8,7 +8,7 @@
group: root
mode: 0755
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: NGINX Ingress Controller | Templates list
set_fact:
@ -38,7 +38,7 @@
with_items: "{{ ingress_nginx_templates }}"
register: ingress_nginx_manifests
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: NGINX Ingress Controller | Apply manifests
kube:
@ -50,4 +50,4 @@
state: "latest"
with_items: "{{ ingress_nginx_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]

View File

@ -2,7 +2,7 @@
dependencies:
- role: kubernetes-apps/ansible
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- role: kubernetes-apps/helm
when:
@ -13,21 +13,21 @@ dependencies:
- role: kubernetes-apps/registry
when:
- registry_enabled
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- registry
- role: kubernetes-apps/metrics_server
when:
- metrics_server_enabled
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- metrics_server
- role: kubernetes-apps/csi_driver/csi_crd
when:
- cinder_csi_enabled
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- csi-driver
@ -69,19 +69,19 @@ dependencies:
- role: kubernetes-apps/persistent_volumes
when:
- persistent_volumes_enabled
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- persistent_volumes
- role: kubernetes-apps/snapshots
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
tags:
- snapshots
- csi-driver
- role: kubernetes-apps/container_runtimes
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- container-runtimes
@ -94,13 +94,13 @@ dependencies:
when:
- cloud_provider is defined
- cloud_provider == "oci"
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- oci
- role: kubernetes-apps/metallb
when:
- metallb_enabled
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- metallb

View File

@ -22,7 +22,7 @@
register: apparmor_status
when:
- podsecuritypolicy_enabled
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
failed_when: false
- name: Kubernetes Apps | Set apparmor_enabled
@ -30,7 +30,7 @@
apparmor_enabled: "{{ apparmor_status.rc == 0 }}"
when:
- podsecuritypolicy_enabled
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: "Kubernetes Apps | Lay Down MetalLB"
become: true
@ -38,7 +38,7 @@
with_items: ["metallb.yml", "metallb-config.yml"]
register: "rendering"
when:
- "inventory_hostname == groups['kube-master'][0]"
- "inventory_hostname == groups['kube_control_plane'][0]"
- name: "Kubernetes Apps | Install and configure MetalLB"
kube:
@ -49,7 +49,7 @@
become: true
with_items: "{{ rendering.results }}"
when:
- "inventory_hostname == groups['kube-master'][0]"
- "inventory_hostname == groups['kube_control_plane'][0]"
- name: Kubernetes Apps | Check existing secret of MetalLB
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n metallb-system get secret memberlist"
@ -57,18 +57,18 @@
become: true
ignore_errors: yes
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Kubernetes Apps | Create random bytes for MetalLB
command: "openssl rand -base64 32"
register: metallb_rand
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- metallb_secret.rc != 0
- name: Kubernetes Apps | Install secret of MetalLB if not existing
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n metallb-system create secret generic memberlist --from-literal=secretkey={{ metallb_rand.stdout }}"
become: true
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- metallb_secret.rc != 0

View File

@ -2,14 +2,14 @@
# If all masters have node role, there are no tainted master and toleration should not be specified.
- name: Check all masters are node or not
set_fact:
masters_are_not_tainted: "{{ groups['kube-node'] | intersect(groups['kube-master']) == groups['kube-master'] }}"
masters_are_not_tainted: "{{ groups['kube-node'] | intersect(groups['kube_control_plane']) == groups['kube_control_plane'] }}"
- name: Metrics Server | Delete addon dir
file:
path: "{{ kube_config_dir }}/addons/metrics_server"
state: absent
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade
@ -21,7 +21,7 @@
group: root
mode: 0755
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Metrics Server | Templates list
set_fact:
@ -43,7 +43,7 @@
with_items: "{{ metrics_server_templates }}"
register: metrics_server_manifests
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]
- name: Metrics Server | Apply manifests
kube:
@ -54,4 +54,4 @@
state: "latest"
with_items: "{{ metrics_server_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]
- inventory_hostname == groups['kube_control_plane'][0]

View File

@ -8,4 +8,4 @@
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ canal_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped

View File

@ -8,7 +8,7 @@
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ cilium_node_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
- name: Cilium | Wait for pods to run
command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601
@ -17,4 +17,4 @@
retries: 30
delay: 10
ignore_errors: yes
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]

View File

@ -8,7 +8,7 @@
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ flannel_node_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
- name: Flannel | Wait for flannel subnet.env file presence
wait_for:

View File

@ -6,4 +6,4 @@
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ kube_ovn_node_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped

View File

@ -8,7 +8,7 @@
resource: "ds"
namespace: "kube-system"
state: "latest"
delegate_to: "{{ groups['kube-master'] | first }}"
delegate_to: "{{ groups['kube_control_plane'] | first }}"
run_once: true
- name: kube-router | Wait for kube-router pods to be ready
@ -18,6 +18,6 @@
retries: 30
delay: 10
ignore_errors: yes
delegate_to: "{{ groups['kube-master'] | first }}"
delegate_to: "{{ groups['kube_control_plane'] | first }}"
run_once: true
changed_when: false

View File

@ -8,4 +8,4 @@
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ multus_manifest_1.results }} + {{ multus_manifest_2.results }}"
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped

View File

@ -6,4 +6,4 @@
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ ovn4nfv_node_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped

View File

@ -8,7 +8,7 @@
resource: "ds"
namespace: "kube-system"
state: "latest"
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]
- name: Weave | Wait for Weave to become available
uri:
@ -18,4 +18,4 @@
retries: 180
delay: 5
until: "weave_status.status == 200 and 'Status: ready' in weave_status.content"
when: inventory_hostname == groups['kube-master'][0]
when: inventory_hostname == groups['kube_control_plane'][0]

Some files were not shown because too many files have changed in this diff Show More