Replace kube-master with kube_control_plane (#7256)
This replaces kube-master with kube_control_plane because of [1]: The Kubernetes project is moving away from wording that is considered offensive. A new working group WG Naming was created to track this work, and the word "master" was declared as offensive. A proposal was formalized for replacing the word "master" with "control plane". This means it should be removed from source code, documentation, and user-facing configuration from Kubernetes and its sub-projects. NOTE: The reason why this changes it to kube_control_plane not kube-control-plane is for valid group names on ansible. [1]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-cluster-lifecycle/kubeadm/2067-rename-master-label-taint/README.md#motivation
This commit is contained in:
parent
d53fd29e34
commit
486b223e01
159 changed files with 564 additions and 485 deletions
|
@ -30,7 +30,7 @@ variables:
|
||||||
MITOGEN_ENABLE: "false"
|
MITOGEN_ENABLE: "false"
|
||||||
ANSIBLE_LOG_LEVEL: "-vv"
|
ANSIBLE_LOG_LEVEL: "-vv"
|
||||||
RECOVER_CONTROL_PLANE_TEST: "false"
|
RECOVER_CONTROL_PLANE_TEST: "false"
|
||||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
|
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||||
|
|
||||||
before_script:
|
before_script:
|
||||||
- ./tests/scripts/rebase.sh
|
- ./tests/scripts/rebase.sh
|
||||||
|
|
|
@ -223,7 +223,7 @@ packet_ubuntu18-calico-ha-recover:
|
||||||
when: on_success
|
when: on_success
|
||||||
variables:
|
variables:
|
||||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube-master[1:]"
|
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[2:],kube_control_plane[1:]"
|
||||||
|
|
||||||
packet_ubuntu18-calico-ha-recover-noquorum:
|
packet_ubuntu18-calico-ha-recover-noquorum:
|
||||||
stage: deploy-part3
|
stage: deploy-part3
|
||||||
|
@ -231,4 +231,4 @@ packet_ubuntu18-calico-ha-recover-noquorum:
|
||||||
when: on_success
|
when: on_success
|
||||||
variables:
|
variables:
|
||||||
RECOVER_CONTROL_PLANE_TEST: "true"
|
RECOVER_CONTROL_PLANE_TEST: "true"
|
||||||
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube-master[1:]"
|
RECOVER_CONTROL_PLANE_TEST_GROUPS: "etcd[1:],kube_control_plane[1:]"
|
||||||
|
|
4
Vagrantfile
vendored
4
Vagrantfile
vendored
|
@ -253,9 +253,9 @@ Vagrant.configure("2") do |config|
|
||||||
#ansible.tags = ['download']
|
#ansible.tags = ['download']
|
||||||
ansible.groups = {
|
ansible.groups = {
|
||||||
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
|
"etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"],
|
||||||
"kube-master" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
"kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"],
|
||||||
"kube-node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
|
"kube-node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"],
|
||||||
"k8s-cluster:children" => ["kube-master", "kube-node"],
|
"k8s-cluster:children" => ["kube_control_plane", "kube-node"],
|
||||||
}
|
}
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
17
cluster.yml
17
cluster.yml
|
@ -2,6 +2,15 @@
|
||||||
- name: Check ansible version
|
- name: Check ansible version
|
||||||
import_playbook: ansible_version.yml
|
import_playbook: ansible_version.yml
|
||||||
|
|
||||||
|
- name: Add kube-master nodes to kube_control_plane
|
||||||
|
# This is for old inventory which contains kube-master instead of kube_control_plane
|
||||||
|
hosts: kube-master
|
||||||
|
gather_facts: false
|
||||||
|
tasks:
|
||||||
|
- name: add nodes to kube_control_plane group
|
||||||
|
group_by:
|
||||||
|
key: 'kube_control_plane'
|
||||||
|
|
||||||
- hosts: bastion[0]
|
- hosts: bastion[0]
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -66,7 +75,7 @@
|
||||||
- { role: kubespray-defaults }
|
- { role: kubespray-defaults }
|
||||||
- { role: kubernetes/node, tags: node }
|
- { role: kubernetes/node, tags: node }
|
||||||
|
|
||||||
- hosts: kube-master
|
- hosts: kube_control_plane
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -94,7 +103,7 @@
|
||||||
- { role: kubespray-defaults }
|
- { role: kubespray-defaults }
|
||||||
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
|
- { role: network_plugin/calico/rr, tags: ['network', 'calico_rr'] }
|
||||||
|
|
||||||
- hosts: kube-master[0]
|
- hosts: kube_control_plane[0]
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -102,7 +111,7 @@
|
||||||
- { role: kubespray-defaults }
|
- { role: kubespray-defaults }
|
||||||
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
|
- { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] }
|
||||||
|
|
||||||
- hosts: kube-master
|
- hosts: kube_control_plane
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -114,7 +123,7 @@
|
||||||
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
||||||
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
||||||
|
|
||||||
- hosts: kube-master
|
- hosts: kube_control_plane
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
|
|
@ -35,7 +35,7 @@ class SearchEC2Tags(object):
|
||||||
hosts['_meta'] = { 'hostvars': {} }
|
hosts['_meta'] = { 'hostvars': {} }
|
||||||
|
|
||||||
##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
|
##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
|
||||||
for group in ["kube-master", "kube-node", "etcd"]:
|
for group in ["kube_control_plane", "kube-node", "etcd"]:
|
||||||
hosts[group] = []
|
hosts[group] = []
|
||||||
tag_key = "kubespray-role"
|
tag_key = "kubespray-role"
|
||||||
tag_value = ["*"+group+"*"]
|
tag_value = ["*"+group+"*"]
|
||||||
|
@ -70,7 +70,7 @@ class SearchEC2Tags(object):
|
||||||
hosts[group].append(dns_name)
|
hosts[group].append(dns_name)
|
||||||
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
hosts['_meta']['hostvars'][dns_name] = ansible_host
|
||||||
|
|
||||||
hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
|
hosts['k8s-cluster'] = {'children':['kube_control_plane', 'kube-node']}
|
||||||
print(json.dumps(hosts, sort_keys=True, indent=2))
|
print(json.dumps(hosts, sort_keys=True, indent=2))
|
||||||
|
|
||||||
SearchEC2Tags()
|
SearchEC2Tags()
|
||||||
|
|
|
@ -7,9 +7,9 @@
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
[kube-master]
|
[kube_control_plane]
|
||||||
{% for vm in vm_list %}
|
{% for vm in vm_list %}
|
||||||
{% if 'kube-master' in vm.tags.roles %}
|
{% if 'kube_control_plane' in vm.tags.roles %}
|
||||||
{{ vm.name }}
|
{{ vm.name }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
@ -30,4 +30,4 @@
|
||||||
|
|
||||||
[k8s-cluster:children]
|
[k8s-cluster:children]
|
||||||
kube-node
|
kube-node
|
||||||
kube-master
|
kube_control_plane
|
||||||
|
|
|
@ -7,9 +7,9 @@
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
[kube-master]
|
[kube_control_plane]
|
||||||
{% for vm in vm_roles_list %}
|
{% for vm in vm_roles_list %}
|
||||||
{% if 'kube-master' in vm.tags.roles %}
|
{% if 'kube_control_plane' in vm.tags.roles %}
|
||||||
{{ vm.name }}
|
{{ vm.name }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
@ -30,5 +30,5 @@
|
||||||
|
|
||||||
[k8s-cluster:children]
|
[k8s-cluster:children]
|
||||||
kube-node
|
kube-node
|
||||||
kube-master
|
kube_control_plane
|
||||||
|
|
||||||
|
|
|
@ -144,7 +144,7 @@
|
||||||
"[concat('Microsoft.Network/networkInterfaces/', 'master-{{i}}-nic')]"
|
"[concat('Microsoft.Network/networkInterfaces/', 'master-{{i}}-nic')]"
|
||||||
],
|
],
|
||||||
"tags": {
|
"tags": {
|
||||||
"roles": "kube-master,etcd"
|
"roles": "kube_control_plane,etcd"
|
||||||
},
|
},
|
||||||
"apiVersion": "{{apiVersion}}",
|
"apiVersion": "{{apiVersion}}",
|
||||||
"properties": {
|
"properties": {
|
||||||
|
|
|
@ -44,7 +44,7 @@ import re
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster',
|
ROLES = ['all', 'kube_control_plane', 'kube-node', 'etcd', 'k8s-cluster',
|
||||||
'calico-rr']
|
'calico-rr']
|
||||||
PROTECTED_NAMES = ROLES
|
PROTECTED_NAMES = ROLES
|
||||||
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames',
|
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames',
|
||||||
|
@ -299,21 +299,23 @@ class KubesprayInventory(object):
|
||||||
|
|
||||||
def set_kube_control_plane(self, hosts):
|
def set_kube_control_plane(self, hosts):
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
self.add_host_to_group('kube-master', host)
|
self.add_host_to_group('kube_control_plane', host)
|
||||||
|
|
||||||
def set_all(self, hosts):
|
def set_all(self, hosts):
|
||||||
for host, opts in hosts.items():
|
for host, opts in hosts.items():
|
||||||
self.add_host_to_group('all', host, opts)
|
self.add_host_to_group('all', host, opts)
|
||||||
|
|
||||||
def set_k8s_cluster(self):
|
def set_k8s_cluster(self):
|
||||||
k8s_cluster = {'children': {'kube-master': None, 'kube-node': None}}
|
k8s_cluster = {'children': {'kube_control_plane': None,
|
||||||
|
'kube-node': None}}
|
||||||
self.yaml_config['all']['children']['k8s-cluster'] = k8s_cluster
|
self.yaml_config['all']['children']['k8s-cluster'] = k8s_cluster
|
||||||
|
|
||||||
def set_calico_rr(self, hosts):
|
def set_calico_rr(self, hosts):
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
if host in self.yaml_config['all']['children']['kube-master']:
|
if host in self.yaml_config['all']['children']['kube_control_plane']: # noqa
|
||||||
self.debug("Not adding {0} to calico-rr group because it "
|
self.debug("Not adding {0} to calico-rr group because it "
|
||||||
"conflicts with kube-master group".format(host))
|
"conflicts with kube_control_plane "
|
||||||
|
"group".format(host))
|
||||||
continue
|
continue
|
||||||
if host in self.yaml_config['all']['children']['kube-node']:
|
if host in self.yaml_config['all']['children']['kube-node']:
|
||||||
self.debug("Not adding {0} to calico-rr group because it "
|
self.debug("Not adding {0} to calico-rr group because it "
|
||||||
|
@ -330,10 +332,10 @@ class KubesprayInventory(object):
|
||||||
"group.".format(host))
|
"group.".format(host))
|
||||||
continue
|
continue
|
||||||
if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa
|
if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa
|
||||||
if host in self.yaml_config['all']['children']['kube-master']['hosts']: # noqa
|
if host in self.yaml_config['all']['children']['kube_control_plane']['hosts']: # noqa
|
||||||
self.debug("Not adding {0} to kube-node group because of "
|
self.debug("Not adding {0} to kube-node group because of "
|
||||||
"scale deployment and host is in kube-master "
|
"scale deployment and host is in "
|
||||||
"group.".format(host))
|
"kube_control_plane group.".format(host))
|
||||||
continue
|
continue
|
||||||
self.add_host_to_group('kube-node', host)
|
self.add_host_to_group('kube-node', host)
|
||||||
|
|
||||||
|
|
|
@ -223,7 +223,7 @@ class TestInventory(unittest.TestCase):
|
||||||
None)
|
None)
|
||||||
|
|
||||||
def test_set_kube_control_plane(self):
|
def test_set_kube_control_plane(self):
|
||||||
group = 'kube-master'
|
group = 'kube_control_plane'
|
||||||
host = 'node1'
|
host = 'node1'
|
||||||
|
|
||||||
self.inv.set_kube_control_plane([host])
|
self.inv.set_kube_control_plane([host])
|
||||||
|
@ -242,7 +242,7 @@ class TestInventory(unittest.TestCase):
|
||||||
|
|
||||||
def test_set_k8s_cluster(self):
|
def test_set_k8s_cluster(self):
|
||||||
group = 'k8s-cluster'
|
group = 'k8s-cluster'
|
||||||
expected_hosts = ['kube-node', 'kube-master']
|
expected_hosts = ['kube-node', 'kube_control_plane']
|
||||||
|
|
||||||
self.inv.set_k8s_cluster()
|
self.inv.set_k8s_cluster()
|
||||||
for host in expected_hosts:
|
for host in expected_hosts:
|
||||||
|
|
|
@ -19,6 +19,6 @@
|
||||||
roles:
|
roles:
|
||||||
- { role: glusterfs/client }
|
- { role: glusterfs/client }
|
||||||
|
|
||||||
- hosts: kube-master[0]
|
- hosts: kube_control_plane[0]
|
||||||
roles:
|
roles:
|
||||||
- { role: kubernetes-pv }
|
- { role: kubernetes-pv }
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
|
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
|
||||||
# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
|
# gfs_node3 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
|
||||||
|
|
||||||
# [kube-master]
|
# [kube_control_plane]
|
||||||
# node1
|
# node1
|
||||||
# node2
|
# node2
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@
|
||||||
|
|
||||||
# [k8s-cluster:children]
|
# [k8s-cluster:children]
|
||||||
# kube-node
|
# kube-node
|
||||||
# kube-master
|
# kube_control_plane
|
||||||
|
|
||||||
# [gfs-cluster]
|
# [gfs-cluster]
|
||||||
# gfs_node1
|
# gfs_node1
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||||
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
|
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
|
||||||
register: gluster_pv
|
register: gluster_pv
|
||||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||||
|
|
||||||
- name: Kubernetes Apps | Set GlusterFS endpoint and PV
|
- name: Kubernetes Apps | Set GlusterFS endpoint and PV
|
||||||
kube:
|
kube:
|
||||||
|
@ -19,4 +19,4 @@
|
||||||
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
||||||
state: "{{ item.changed | ternary('latest','present') }}"
|
state: "{{ item.changed | ternary('latest','present') }}"
|
||||||
with_items: "{{ gluster_pv.results }}"
|
with_items: "{{ gluster_pv.results }}"
|
||||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined
|
when: inventory_hostname == groups['kube_control_plane'][0] and groups['gfs-cluster'] is defined
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- hosts: kube-master[0]
|
- hosts: kube_control_plane[0]
|
||||||
roles:
|
roles:
|
||||||
- { role: tear-down }
|
- { role: tear-down }
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
roles:
|
roles:
|
||||||
- { role: prepare }
|
- { role: prepare }
|
||||||
|
|
||||||
- hosts: kube-master[0]
|
- hosts: kube_control_plane[0]
|
||||||
tags:
|
tags:
|
||||||
- "provision"
|
- "provision"
|
||||||
roles:
|
roles:
|
||||||
|
|
|
@ -7,7 +7,7 @@ all:
|
||||||
vars:
|
vars:
|
||||||
kubelet_fail_swap_on: false
|
kubelet_fail_swap_on: false
|
||||||
children:
|
children:
|
||||||
kube-master:
|
kube_control_plane:
|
||||||
hosts:
|
hosts:
|
||||||
node1:
|
node1:
|
||||||
etcd:
|
etcd:
|
||||||
|
|
|
@ -122,7 +122,7 @@ You can use the following set of commands to get the kubeconfig file from your n
|
||||||
|
|
||||||
```commandline
|
```commandline
|
||||||
# Get the controller's IP address.
|
# Get the controller's IP address.
|
||||||
CONTROLLER_HOST_NAME=$(cat ./inventory/hosts | grep "\[kube-master\]" -A 1 | tail -n 1)
|
CONTROLLER_HOST_NAME=$(cat ./inventory/hosts | grep "\[kube_control_plane\]" -A 1 | tail -n 1)
|
||||||
CONTROLLER_IP=$(cat ./inventory/hosts | grep $CONTROLLER_HOST_NAME | grep ansible_host | cut -d'=' -f2)
|
CONTROLLER_IP=$(cat ./inventory/hosts | grep $CONTROLLER_HOST_NAME | grep ansible_host | cut -d'=' -f2)
|
||||||
|
|
||||||
# Get the hostname of the load balancer.
|
# Get the hostname of the load balancer.
|
||||||
|
|
|
@ -84,7 +84,7 @@ resource "aws_instance" "k8s-master" {
|
||||||
|
|
||||||
vpc_security_group_ids = module.aws-vpc.aws_security_group
|
vpc_security_group_ids = module.aws-vpc.aws_security_group
|
||||||
|
|
||||||
iam_instance_profile = module.aws-iam.kube-master-profile
|
iam_instance_profile = module.aws-iam.kube_control_plane-profile
|
||||||
key_name = var.AWS_SSH_KEY_NAME
|
key_name = var.AWS_SSH_KEY_NAME
|
||||||
|
|
||||||
tags = merge(var.default_tags, map(
|
tags = merge(var.default_tags, map(
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
#Add AWS Roles for Kubernetes
|
#Add AWS Roles for Kubernetes
|
||||||
|
|
||||||
resource "aws_iam_role" "kube-master" {
|
resource "aws_iam_role" "kube_control_plane" {
|
||||||
name = "kubernetes-${var.aws_cluster_name}-master"
|
name = "kubernetes-${var.aws_cluster_name}-master"
|
||||||
|
|
||||||
assume_role_policy = <<EOF
|
assume_role_policy = <<EOF
|
||||||
|
@ -40,9 +40,9 @@ EOF
|
||||||
|
|
||||||
#Add AWS Policies for Kubernetes
|
#Add AWS Policies for Kubernetes
|
||||||
|
|
||||||
resource "aws_iam_role_policy" "kube-master" {
|
resource "aws_iam_role_policy" "kube_control_plane" {
|
||||||
name = "kubernetes-${var.aws_cluster_name}-master"
|
name = "kubernetes-${var.aws_cluster_name}-master"
|
||||||
role = aws_iam_role.kube-master.id
|
role = aws_iam_role.kube_control_plane.id
|
||||||
|
|
||||||
policy = <<EOF
|
policy = <<EOF
|
||||||
{
|
{
|
||||||
|
@ -130,9 +130,9 @@ EOF
|
||||||
|
|
||||||
#Create AWS Instance Profiles
|
#Create AWS Instance Profiles
|
||||||
|
|
||||||
resource "aws_iam_instance_profile" "kube-master" {
|
resource "aws_iam_instance_profile" "kube_control_plane" {
|
||||||
name = "kube_${var.aws_cluster_name}_master_profile"
|
name = "kube_${var.aws_cluster_name}_master_profile"
|
||||||
role = aws_iam_role.kube-master.name
|
role = aws_iam_role.kube_control_plane.name
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_iam_instance_profile" "kube-worker" {
|
resource "aws_iam_instance_profile" "kube-worker" {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
output "kube-master-profile" {
|
output "kube_control_plane-profile" {
|
||||||
value = aws_iam_instance_profile.kube-master.name
|
value = aws_iam_instance_profile.kube_control_plane.name
|
||||||
}
|
}
|
||||||
|
|
||||||
output "kube-worker-profile" {
|
output "kube-worker-profile" {
|
||||||
|
|
|
@ -7,7 +7,7 @@ ${public_ip_address_bastion}
|
||||||
[bastion]
|
[bastion]
|
||||||
${public_ip_address_bastion}
|
${public_ip_address_bastion}
|
||||||
|
|
||||||
[kube-master]
|
[kube_control_plane]
|
||||||
${list_master}
|
${list_master}
|
||||||
|
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ ${list_etcd}
|
||||||
|
|
||||||
[k8s-cluster:children]
|
[k8s-cluster:children]
|
||||||
kube-node
|
kube-node
|
||||||
kube-master
|
kube_control_plane
|
||||||
|
|
||||||
|
|
||||||
[k8s-cluster:vars]
|
[k8s-cluster:vars]
|
||||||
|
|
|
@ -2,10 +2,10 @@
|
||||||
${connection_strings_master}
|
${connection_strings_master}
|
||||||
${connection_strings_worker}
|
${connection_strings_worker}
|
||||||
|
|
||||||
[kube-master]
|
[kube_control_plane]
|
||||||
${list_master}
|
${list_master}
|
||||||
|
|
||||||
[kube-master:vars]
|
[kube_control_plane:vars]
|
||||||
supplementary_addresses_in_ssl_keys = [ "${api_lb_ip_address}" ]
|
supplementary_addresses_in_ssl_keys = [ "${api_lb_ip_address}" ]
|
||||||
|
|
||||||
[etcd]
|
[etcd]
|
||||||
|
@ -15,5 +15,5 @@ ${list_master}
|
||||||
${list_worker}
|
${list_worker}
|
||||||
|
|
||||||
[k8s-cluster:children]
|
[k8s-cluster:children]
|
||||||
kube-master
|
kube_control_plane
|
||||||
kube-node
|
kube-node
|
||||||
|
|
|
@ -50,13 +50,13 @@ for name in "${WORKER_NAMES[@]}"; do
|
||||||
done
|
done
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "[kube-master]"
|
echo "[kube_control_plane]"
|
||||||
for name in "${MASTER_NAMES[@]}"; do
|
for name in "${MASTER_NAMES[@]}"; do
|
||||||
echo "${name}"
|
echo "${name}"
|
||||||
done
|
done
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "[kube-master:vars]"
|
echo "[kube_control_plane:vars]"
|
||||||
echo "supplementary_addresses_in_ssl_keys = [ '${API_LB}' ]" # Add LB address to API server certificate
|
echo "supplementary_addresses_in_ssl_keys = [ '${API_LB}' ]" # Add LB address to API server certificate
|
||||||
echo ""
|
echo ""
|
||||||
echo "[etcd]"
|
echo "[etcd]"
|
||||||
|
@ -72,5 +72,5 @@ done
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "[k8s-cluster:children]"
|
echo "[k8s-cluster:children]"
|
||||||
echo "kube-master"
|
echo "kube_control_plane"
|
||||||
echo "kube-node"
|
echo "kube-node"
|
||||||
|
|
|
@ -245,7 +245,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = var.ssh_user
|
ssh_user = var.ssh_user
|
||||||
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||||
depends_on = var.network_id
|
depends_on = var.network_id
|
||||||
use_access_ip = var.use_access_ip
|
use_access_ip = var.use_access_ip
|
||||||
}
|
}
|
||||||
|
@ -292,7 +292,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = var.ssh_user
|
ssh_user = var.ssh_user
|
||||||
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||||
depends_on = var.network_id
|
depends_on = var.network_id
|
||||||
use_access_ip = var.use_access_ip
|
use_access_ip = var.use_access_ip
|
||||||
}
|
}
|
||||||
|
@ -379,7 +379,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = var.ssh_user
|
ssh_user = var.ssh_user
|
||||||
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||||
depends_on = var.network_id
|
depends_on = var.network_id
|
||||||
use_access_ip = var.use_access_ip
|
use_access_ip = var.use_access_ip
|
||||||
}
|
}
|
||||||
|
@ -421,7 +421,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = var.ssh_user
|
ssh_user = var.ssh_user
|
||||||
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||||
depends_on = var.network_id
|
depends_on = var.network_id
|
||||||
use_access_ip = var.use_access_ip
|
use_access_ip = var.use_access_ip
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,7 @@ resource "packet_device" "k8s_master" {
|
||||||
operating_system = var.operating_system
|
operating_system = var.operating_system
|
||||||
billing_cycle = var.billing_cycle
|
billing_cycle = var.billing_cycle
|
||||||
project_id = var.packet_project_id
|
project_id = var.packet_project_id
|
||||||
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-master", "etcd", "kube-node"]
|
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube_control_plane", "etcd", "kube-node"]
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "packet_device" "k8s_master_no_etcd" {
|
resource "packet_device" "k8s_master_no_etcd" {
|
||||||
|
@ -32,7 +32,7 @@ resource "packet_device" "k8s_master_no_etcd" {
|
||||||
operating_system = var.operating_system
|
operating_system = var.operating_system
|
||||||
billing_cycle = var.billing_cycle
|
billing_cycle = var.billing_cycle
|
||||||
project_id = var.packet_project_id
|
project_id = var.packet_project_id
|
||||||
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-master"]
|
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube_control_plane"]
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "packet_device" "k8s_etcd" {
|
resource "packet_device" "k8s_etcd" {
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
${connection_strings_master}
|
${connection_strings_master}
|
||||||
${connection_strings_worker}
|
${connection_strings_worker}
|
||||||
|
|
||||||
[kube-master]
|
[kube_control_plane]
|
||||||
${list_master}
|
${list_master}
|
||||||
|
|
||||||
[etcd]
|
[etcd]
|
||||||
|
@ -13,5 +13,5 @@ ${list_master}
|
||||||
${list_worker}
|
${list_worker}
|
||||||
|
|
||||||
[k8s-cluster:children]
|
[k8s-cluster:children]
|
||||||
kube-master
|
kube_control_plane
|
||||||
kube-node
|
kube-node
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
${connection_strings_master}
|
${connection_strings_master}
|
||||||
${connection_strings_worker}
|
${connection_strings_worker}
|
||||||
|
|
||||||
[kube-master]
|
[kube_control_plane]
|
||||||
${list_master}
|
${list_master}
|
||||||
|
|
||||||
[etcd]
|
[etcd]
|
||||||
|
@ -13,5 +13,5 @@ ${list_master}
|
||||||
${list_worker}
|
${list_worker}
|
||||||
|
|
||||||
[k8s-cluster:children]
|
[k8s-cluster:children]
|
||||||
kube-master
|
kube_control_plane
|
||||||
kube-node
|
kube-node
|
||||||
|
|
|
@ -1,30 +1,30 @@
|
||||||
---
|
---
|
||||||
- import_tasks: sync_kube_master_certs.yml
|
- import_tasks: sync_kube_master_certs.yml
|
||||||
when: inventory_hostname in groups['kube-master']
|
when: inventory_hostname in groups['kube_control_plane']
|
||||||
|
|
||||||
- import_tasks: sync_kube_node_certs.yml
|
- import_tasks: sync_kube_node_certs.yml
|
||||||
when: inventory_hostname in groups['k8s-cluster']
|
when: inventory_hostname in groups['k8s-cluster']
|
||||||
|
|
||||||
# Issue admin certs to kube-master hosts
|
# Issue admin certs to kube_control_plane hosts
|
||||||
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
|
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
|
||||||
vars:
|
vars:
|
||||||
issue_cert_common_name: "admin"
|
issue_cert_common_name: "admin"
|
||||||
issue_cert_copy_ca: "{{ item == kube_admin_certs_needed|first }}"
|
issue_cert_copy_ca: "{{ item == kube_admin_certs_needed|first }}"
|
||||||
issue_cert_file_group: "{{ kube_cert_group }}"
|
issue_cert_file_group: "{{ kube_cert_group }}"
|
||||||
issue_cert_file_owner: kube
|
issue_cert_file_owner: kube
|
||||||
issue_cert_hosts: "{{ groups['kube-master'] }}"
|
issue_cert_hosts: "{{ groups['kube_control_plane'] }}"
|
||||||
issue_cert_path: "{{ item }}"
|
issue_cert_path: "{{ item }}"
|
||||||
issue_cert_role: kube-master
|
issue_cert_role: kube_control_plane
|
||||||
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||||
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
|
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
|
||||||
with_items: "{{ kube_admin_certs_needed|d([]) }}"
|
with_items: "{{ kube_admin_certs_needed|d([]) }}"
|
||||||
when: inventory_hostname in groups['kube-master']
|
when: inventory_hostname in groups['kube_control_plane']
|
||||||
|
|
||||||
- name: gen_certs_vault | Set fact about certificate alt names
|
- name: gen_certs_vault | Set fact about certificate alt names
|
||||||
set_fact:
|
set_fact:
|
||||||
kube_cert_alt_names: >-
|
kube_cert_alt_names: >-
|
||||||
{{
|
{{
|
||||||
groups['kube-master'] +
|
groups['kube_control_plane'] +
|
||||||
['kubernetes.default.svc.'+cluster_name, 'kubernetes.default.svc', 'kubernetes.default', 'kubernetes'] +
|
['kubernetes.default.svc.'+cluster_name, 'kubernetes.default.svc', 'kubernetes.default', 'kubernetes'] +
|
||||||
['localhost']
|
['localhost']
|
||||||
}}
|
}}
|
||||||
|
@ -36,18 +36,18 @@
|
||||||
when: loadbalancer_apiserver is defined
|
when: loadbalancer_apiserver is defined
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
# Issue master components certs to kube-master hosts
|
# Issue master components certs to kube_control_plane hosts
|
||||||
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
|
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
|
||||||
vars:
|
vars:
|
||||||
issue_cert_common_name: "kubernetes"
|
issue_cert_common_name: "kubernetes"
|
||||||
issue_cert_alt_names: "{{ kube_cert_alt_names }}"
|
issue_cert_alt_names: "{{ kube_cert_alt_names }}"
|
||||||
issue_cert_file_group: "{{ kube_cert_group }}"
|
issue_cert_file_group: "{{ kube_cert_group }}"
|
||||||
issue_cert_file_owner: kube
|
issue_cert_file_owner: kube
|
||||||
issue_cert_hosts: "{{ groups['kube-master'] }}"
|
issue_cert_hosts: "{{ groups['kube_control_plane'] }}"
|
||||||
issue_cert_run_once: true
|
issue_cert_run_once: true
|
||||||
issue_cert_ip_sans: >-
|
issue_cert_ip_sans: >-
|
||||||
[
|
[
|
||||||
{%- for host in groups['kube-master'] -%}
|
{%- for host in groups['kube_control_plane'] -%}
|
||||||
"{{ hostvars[host]['ansible_default_ipv4']['address'] }}",
|
"{{ hostvars[host]['ansible_default_ipv4']['address'] }}",
|
||||||
{%- if hostvars[host]['ip'] is defined -%}
|
{%- if hostvars[host]['ip'] is defined -%}
|
||||||
"{{ hostvars[host]['ip'] }}",
|
"{{ hostvars[host]['ip'] }}",
|
||||||
|
@ -61,11 +61,11 @@
|
||||||
"127.0.0.1","::1","{{ kube_apiserver_ip }}"
|
"127.0.0.1","::1","{{ kube_apiserver_ip }}"
|
||||||
]
|
]
|
||||||
issue_cert_path: "{{ item }}"
|
issue_cert_path: "{{ item }}"
|
||||||
issue_cert_role: kube-master
|
issue_cert_role: kube_control_plane
|
||||||
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||||
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
|
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
|
||||||
with_items: "{{ kube_master_components_certs_needed|d([]) }}"
|
with_items: "{{ kube_master_components_certs_needed|d([]) }}"
|
||||||
when: inventory_hostname in groups['kube-master']
|
when: inventory_hostname in groups['kube_control_plane']
|
||||||
notify: set secret_changed
|
notify: set secret_changed
|
||||||
|
|
||||||
# Issue node certs to k8s-cluster nodes
|
# Issue node certs to k8s-cluster nodes
|
||||||
|
@ -100,7 +100,7 @@
|
||||||
with_items: "{{ kube_proxy_certs_needed|d([]) }}"
|
with_items: "{{ kube_proxy_certs_needed|d([]) }}"
|
||||||
when: inventory_hostname in groups['k8s-cluster']
|
when: inventory_hostname in groups['k8s-cluster']
|
||||||
|
|
||||||
# Issue front proxy cert to kube-master hosts
|
# Issue front proxy cert to kube_control_plane hosts
|
||||||
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
|
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
|
||||||
vars:
|
vars:
|
||||||
issue_cert_common_name: "front-proxy-client"
|
issue_cert_common_name: "front-proxy-client"
|
||||||
|
@ -109,10 +109,10 @@
|
||||||
issue_cert_alt_names: "{{ kube_cert_alt_names }}"
|
issue_cert_alt_names: "{{ kube_cert_alt_names }}"
|
||||||
issue_cert_file_group: "{{ kube_cert_group }}"
|
issue_cert_file_group: "{{ kube_cert_group }}"
|
||||||
issue_cert_file_owner: kube
|
issue_cert_file_owner: kube
|
||||||
issue_cert_hosts: "{{ groups['kube-master'] }}"
|
issue_cert_hosts: "{{ groups['kube_control_plane'] }}"
|
||||||
issue_cert_ip_sans: >-
|
issue_cert_ip_sans: >-
|
||||||
[
|
[
|
||||||
{%- for host in groups['kube-master'] -%}
|
{%- for host in groups['kube_control_plane'] -%}
|
||||||
"{{ hostvars[host]['ansible_default_ipv4']['address'] }}",
|
"{{ hostvars[host]['ansible_default_ipv4']['address'] }}",
|
||||||
{%- if hostvars[host]['ip'] is defined -%}
|
{%- if hostvars[host]['ip'] is defined -%}
|
||||||
"{{ hostvars[host]['ip'] }}",
|
"{{ hostvars[host]['ip'] }}",
|
||||||
|
@ -130,5 +130,5 @@
|
||||||
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||||
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
|
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
|
||||||
with_items: "{{ kube_front_proxy_clients_certs_needed|d([]) }}"
|
with_items: "{{ kube_front_proxy_clients_certs_needed|d([]) }}"
|
||||||
when: inventory_hostname in groups['kube-master']
|
when: inventory_hostname in groups['kube_control_plane']
|
||||||
notify: set secret_changed
|
notify: set secret_changed
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
sync_file: "{{ item }}"
|
sync_file: "{{ item }}"
|
||||||
sync_file_dir: "{{ kube_cert_dir }}"
|
sync_file_dir: "{{ kube_cert_dir }}"
|
||||||
sync_file_group: "{{ kube_cert_group }}"
|
sync_file_group: "{{ kube_cert_group }}"
|
||||||
sync_file_hosts: "{{ groups['kube-master'] }}"
|
sync_file_hosts: "{{ groups['kube_control_plane'] }}"
|
||||||
sync_file_is_cert: true
|
sync_file_is_cert: true
|
||||||
sync_file_owner: kube
|
sync_file_owner: kube
|
||||||
with_items: ["apiserver.pem", "kube-scheduler.pem", "kube-controller-manager.pem", "service-account.pem"]
|
with_items: ["apiserver.pem", "kube-scheduler.pem", "kube-controller-manager.pem", "service-account.pem"]
|
||||||
|
@ -49,7 +49,7 @@
|
||||||
sync_file: front-proxy-ca.pem
|
sync_file: front-proxy-ca.pem
|
||||||
sync_file_dir: "{{ kube_cert_dir }}"
|
sync_file_dir: "{{ kube_cert_dir }}"
|
||||||
sync_file_group: "{{ kube_cert_group }}"
|
sync_file_group: "{{ kube_cert_group }}"
|
||||||
sync_file_hosts: "{{ groups['kube-master'] }}"
|
sync_file_hosts: "{{ groups['kube_control_plane'] }}"
|
||||||
sync_file_owner: kube
|
sync_file_owner: kube
|
||||||
|
|
||||||
- name: sync_kube_master_certs | Unset sync_file_results after front-proxy-ca.pem
|
- name: sync_kube_master_certs | Unset sync_file_results after front-proxy-ca.pem
|
||||||
|
@ -61,7 +61,7 @@
|
||||||
sync_file: "{{ item }}"
|
sync_file: "{{ item }}"
|
||||||
sync_file_dir: "{{ kube_cert_dir }}"
|
sync_file_dir: "{{ kube_cert_dir }}"
|
||||||
sync_file_group: "{{ kube_cert_group }}"
|
sync_file_group: "{{ kube_cert_group }}"
|
||||||
sync_file_hosts: "{{ groups['kube-master'] }}"
|
sync_file_hosts: "{{ groups['kube_control_plane'] }}"
|
||||||
sync_file_is_cert: true
|
sync_file_is_cert: true
|
||||||
sync_file_owner: kube
|
sync_file_owner: kube
|
||||||
with_items: ["front-proxy-client.pem"]
|
with_items: ["front-proxy-client.pem"]
|
||||||
|
@ -81,7 +81,7 @@
|
||||||
sync_file: ca.pem
|
sync_file: ca.pem
|
||||||
sync_file_dir: "{{ kube_cert_dir }}"
|
sync_file_dir: "{{ kube_cert_dir }}"
|
||||||
sync_file_group: "{{ kube_cert_group }}"
|
sync_file_group: "{{ kube_cert_group }}"
|
||||||
sync_file_hosts: "{{ groups['kube-master'] }}"
|
sync_file_hosts: "{{ groups['kube_control_plane'] }}"
|
||||||
sync_file_owner: kube
|
sync_file_owner: kube
|
||||||
|
|
||||||
- name: sync_kube_master_certs | Unset sync_file_results after ca.pem
|
- name: sync_kube_master_certs | Unset sync_file_results after ca.pem
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
sync_file_owner: kube
|
sync_file_owner: kube
|
||||||
with_items: "{{ kube_node_cert_list|default([]) }}"
|
with_items: "{{ kube_node_cert_list|default([]) }}"
|
||||||
|
|
||||||
- name: sync_kube_node_certs | Set facts for kube-master sync_file results
|
- name: sync_kube_node_certs | Set facts for kube_control_plane sync_file results
|
||||||
set_fact:
|
set_fact:
|
||||||
kube_node_certs_needed: "{{ kube_node_certs_needed|default([]) + [item.path] }}"
|
kube_node_certs_needed: "{{ kube_node_certs_needed|default([]) + [item.path] }}"
|
||||||
with_items: "{{ sync_file_results|d([]) }}"
|
with_items: "{{ sync_file_results|d([]) }}"
|
||||||
|
|
|
@ -166,16 +166,16 @@ vault_pki_mounts:
|
||||||
description: "Kubernetes Root CA"
|
description: "Kubernetes Root CA"
|
||||||
cert_dir: "{{ kube_cert_dir }}"
|
cert_dir: "{{ kube_cert_dir }}"
|
||||||
roles:
|
roles:
|
||||||
- name: kube-master
|
- name: kube_control_plane
|
||||||
group: kube-master
|
group: kube_control_plane
|
||||||
password: "{{ lookup('password', credentials_dir + '/vault/kube-master.creds length=15') }}"
|
password: "{{ lookup('password', credentials_dir + '/vault/kube_control_plane.creds length=15') }}"
|
||||||
policy_rules: default
|
policy_rules: default
|
||||||
role_options:
|
role_options:
|
||||||
allow_any_name: true
|
allow_any_name: true
|
||||||
enforce_hostnames: false
|
enforce_hostnames: false
|
||||||
organization: "system:masters"
|
organization: "system:masters"
|
||||||
- name: front-proxy-client
|
- name: front-proxy-client
|
||||||
group: kube-master
|
group: kube_control_plane
|
||||||
password: "{{ lookup('password', credentials_dir + '/vault/kube-proxy.creds length=15') }}"
|
password: "{{ lookup('password', credentials_dir + '/vault/kube-proxy.creds length=15') }}"
|
||||||
policy_rules: default
|
policy_rules: default
|
||||||
role_options:
|
role_options:
|
||||||
|
|
|
@ -51,7 +51,7 @@
|
||||||
gen_ca_mount_path: "/{{ vault_pki_mounts.vault.name }}"
|
gen_ca_mount_path: "/{{ vault_pki_mounts.vault.name }}"
|
||||||
gen_ca_vault_headers: "{{ vault_headers }}"
|
gen_ca_vault_headers: "{{ vault_headers }}"
|
||||||
gen_ca_vault_options: "{{ vault_ca_options.vault }}"
|
gen_ca_vault_options: "{{ vault_ca_options.vault }}"
|
||||||
gen_ca_copy_group: "kube-master"
|
gen_ca_copy_group: "kube_control_plane"
|
||||||
when: >-
|
when: >-
|
||||||
inventory_hostname in groups.vault
|
inventory_hostname in groups.vault
|
||||||
and not vault_cluster_is_initialized
|
and not vault_cluster_is_initialized
|
||||||
|
|
|
@ -21,7 +21,7 @@
|
||||||
vars:
|
vars:
|
||||||
sync_file: "ca.pem"
|
sync_file: "ca.pem"
|
||||||
sync_file_dir: "{{ vault_cert_dir }}"
|
sync_file_dir: "{{ vault_cert_dir }}"
|
||||||
sync_file_hosts: "{{ groups['kube-master'] }}"
|
sync_file_hosts: "{{ groups['kube_control_plane'] }}"
|
||||||
sync_file_owner: vault
|
sync_file_owner: vault
|
||||||
sync_file_group: root
|
sync_file_group: root
|
||||||
sync_file_is_cert: false
|
sync_file_is_cert: false
|
||||||
|
|
|
@ -35,7 +35,7 @@
|
||||||
gen_ca_mount_path: "/{{ vault_pki_mounts.kube.name }}"
|
gen_ca_mount_path: "/{{ vault_pki_mounts.kube.name }}"
|
||||||
gen_ca_vault_headers: "{{ vault_headers }}"
|
gen_ca_vault_headers: "{{ vault_headers }}"
|
||||||
gen_ca_vault_options: "{{ vault_ca_options.kube }}"
|
gen_ca_vault_options: "{{ vault_ca_options.kube }}"
|
||||||
gen_ca_copy_group: "kube-master"
|
gen_ca_copy_group: "kube_control_plane"
|
||||||
when: inventory_hostname in groups.vault
|
when: inventory_hostname in groups.vault
|
||||||
|
|
||||||
- include_tasks: ../shared/auth_backend.yml
|
- include_tasks: ../shared/auth_backend.yml
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
The inventory is composed of 3 groups:
|
The inventory is composed of 3 groups:
|
||||||
|
|
||||||
* **kube-node** : list of kubernetes nodes where the pods will run.
|
* **kube-node** : list of kubernetes nodes where the pods will run.
|
||||||
* **kube-master** : list of servers where kubernetes master components (apiserver, scheduler, controller) will run.
|
* **kube_control_plane** : list of servers where kubernetes control plane components (apiserver, scheduler, controller) will run.
|
||||||
* **etcd**: list of servers to compose the etcd server. You should have at least 3 servers for failover purpose.
|
* **etcd**: list of servers to compose the etcd server. You should have at least 3 servers for failover purpose.
|
||||||
|
|
||||||
Note: do not modify the children of _k8s-cluster_, like putting
|
Note: do not modify the children of _k8s-cluster_, like putting
|
||||||
|
@ -18,9 +18,9 @@ k8s-cluster ⊂ etcd => kube-node ∩ etcd = etcd
|
||||||
|
|
||||||
When _kube-node_ contains _etcd_, you define your etcd cluster to be as well schedulable for Kubernetes workloads.
|
When _kube-node_ contains _etcd_, you define your etcd cluster to be as well schedulable for Kubernetes workloads.
|
||||||
If you want it a standalone, make sure those groups do not intersect.
|
If you want it a standalone, make sure those groups do not intersect.
|
||||||
If you want the server to act both as master and node, the server must be defined
|
If you want the server to act both as control-plane and node, the server must be defined
|
||||||
on both groups _kube-master_ and _kube-node_. If you want a standalone and
|
on both groups _kube_control_plane_ and _kube-node_. If you want a standalone and
|
||||||
unschedulable master, the server must be defined only in the _kube-master_ and
|
unschedulable master, the server must be defined only in the _kube_control_plane_ and
|
||||||
not _kube-node_.
|
not _kube-node_.
|
||||||
|
|
||||||
There are also two special groups:
|
There are also two special groups:
|
||||||
|
@ -40,7 +40,7 @@ node4 ansible_host=95.54.0.15 ip=10.3.0.4
|
||||||
node5 ansible_host=95.54.0.16 ip=10.3.0.5
|
node5 ansible_host=95.54.0.16 ip=10.3.0.5
|
||||||
node6 ansible_host=95.54.0.17 ip=10.3.0.6
|
node6 ansible_host=95.54.0.17 ip=10.3.0.6
|
||||||
|
|
||||||
[kube-master]
|
[kube_control_plane]
|
||||||
node1
|
node1
|
||||||
node2
|
node2
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ node6
|
||||||
|
|
||||||
[k8s-cluster:children]
|
[k8s-cluster:children]
|
||||||
kube-node
|
kube-node
|
||||||
kube-master
|
kube_control_plane
|
||||||
```
|
```
|
||||||
|
|
||||||
## Group vars and overriding variables precedence
|
## Group vars and overriding variables precedence
|
||||||
|
|
|
@ -35,11 +35,11 @@ This will produce an inventory that is passed into Ansible that looks like the f
|
||||||
],
|
],
|
||||||
"k8s-cluster": {
|
"k8s-cluster": {
|
||||||
"children": [
|
"children": [
|
||||||
"kube-master",
|
"kube_control_plane",
|
||||||
"kube-node"
|
"kube-node"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"kube-master": [
|
"kube_control_plane": [
|
||||||
"ip-172-31-3-xxx.us-east-2.compute.internal"
|
"ip-172-31-3-xxx.us-east-2.compute.internal"
|
||||||
],
|
],
|
||||||
"kube-node": [
|
"kube-node": [
|
||||||
|
@ -51,7 +51,7 @@ This will produce an inventory that is passed into Ansible that looks like the f
|
||||||
Guide:
|
Guide:
|
||||||
|
|
||||||
- Create instances in AWS as needed.
|
- Create instances in AWS as needed.
|
||||||
- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube-master`, `etcd`, or `kube-node`. You can also share roles like `kube-master, etcd`
|
- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube_control_plane`, `etcd`, or `kube-node`. You can also share roles like `kube_control_plane, etcd`
|
||||||
- Copy the `kubespray-aws-inventory.py` script from `kubespray/contrib/aws_inventory` to the `kubespray/inventory` directory.
|
- Copy the `kubespray-aws-inventory.py` script from `kubespray/contrib/aws_inventory` to the `kubespray/inventory` directory.
|
||||||
- Set the following AWS credentials and info as environment variables in your terminal:
|
- Set the following AWS credentials and info as environment variables in your terminal:
|
||||||
|
|
||||||
|
|
|
@ -122,7 +122,7 @@ recommended here:
|
||||||
You need to edit your inventory and add:
|
You need to edit your inventory and add:
|
||||||
|
|
||||||
* `calico-rr` group with nodes in it. `calico-rr` can be combined with
|
* `calico-rr` group with nodes in it. `calico-rr` can be combined with
|
||||||
`kube-node` and/or `kube-master`. `calico-rr` group also must be a child
|
`kube-node` and/or `kube_control_plane`. `calico-rr` group also must be a child
|
||||||
group of `k8s-cluster` group.
|
group of `k8s-cluster` group.
|
||||||
* `cluster_id` by route reflector node/group (see details
|
* `cluster_id` by route reflector node/group (see details
|
||||||
[here](https://hub.docker.com/r/calico/routereflector/))
|
[here](https://hub.docker.com/r/calico/routereflector/))
|
||||||
|
@ -138,7 +138,7 @@ node3 ansible_ssh_host=10.210.1.13 ip=10.210.1.13
|
||||||
node4 ansible_ssh_host=10.210.1.14 ip=10.210.1.14
|
node4 ansible_ssh_host=10.210.1.14 ip=10.210.1.14
|
||||||
node5 ansible_ssh_host=10.210.1.15 ip=10.210.1.15
|
node5 ansible_ssh_host=10.210.1.15 ip=10.210.1.15
|
||||||
|
|
||||||
[kube-master]
|
[kube_control_plane]
|
||||||
node2
|
node2
|
||||||
node3
|
node3
|
||||||
|
|
||||||
|
@ -155,7 +155,7 @@ node5
|
||||||
|
|
||||||
[k8s-cluster:children]
|
[k8s-cluster:children]
|
||||||
kube-node
|
kube-node
|
||||||
kube-master
|
kube_control_plane
|
||||||
calico-rr
|
calico-rr
|
||||||
|
|
||||||
[calico-rr]
|
[calico-rr]
|
||||||
|
|
|
@ -8,7 +8,7 @@ Kubespray supports several download/upload modes. The default is:
|
||||||
|
|
||||||
There is also a "pull once, push many" mode as well:
|
There is also a "pull once, push many" mode as well:
|
||||||
|
|
||||||
* Setting ``download_run_once: True`` will make kubespray download container images and binaries only once and then push them to the cluster nodes. The default download delegate node is the first `kube-master`.
|
* Setting ``download_run_once: True`` will make kubespray download container images and binaries only once and then push them to the cluster nodes. The default download delegate node is the first `kube_control_plane`.
|
||||||
* Set ``download_localhost: True`` to make localhost the download delegate. This can be useful if cluster nodes cannot access external addresses. To use this requires that docker is installed and running on the ansible master and that the current user is either in the docker group or can do passwordless sudo, to be able to access docker.
|
* Set ``download_localhost: True`` to make localhost the download delegate. This can be useful if cluster nodes cannot access external addresses. To use this requires that docker is installed and running on the ansible master and that the current user is either in the docker group or can do passwordless sudo, to be able to access docker.
|
||||||
|
|
||||||
NOTE: When `download_run_once` is true and `download_localhost` is false, all downloads will be done on the delegate node, including downloads for container images that are not required on that node. As a consequence, the storage required on that node will probably be more than if download_run_once was false, because all images will be loaded into the docker instance on that node, instead of just the images required for that node.
|
NOTE: When `download_run_once` is true and `download_localhost` is false, all downloads will be done on the delegate node, including downloads for container images that are not required on that node. As a consequence, the storage required on that node will probably be more than if download_run_once was false, because all images will be loaded into the docker instance on that node, instead of just the images required for that node.
|
||||||
|
|
|
@ -76,16 +76,16 @@ var in inventory.
|
||||||
|
|
||||||
## Connecting to Kubernetes
|
## Connecting to Kubernetes
|
||||||
|
|
||||||
By default, Kubespray configures kube-master hosts with insecure access to
|
By default, Kubespray configures kube_control_plane hosts with insecure access to
|
||||||
kube-apiserver via port 8080. A kubeconfig file is not necessary in this case,
|
kube-apiserver via port 8080. A kubeconfig file is not necessary in this case,
|
||||||
because kubectl will use <http://localhost:8080> to connect. The kubeconfig files
|
because kubectl will use <http://localhost:8080> to connect. The kubeconfig files
|
||||||
generated will point to localhost (on kube-masters) and kube-node hosts will
|
generated will point to localhost (on kube_control_planes) and kube-node hosts will
|
||||||
connect either to a localhost nginx proxy or to a loadbalancer if configured.
|
connect either to a localhost nginx proxy or to a loadbalancer if configured.
|
||||||
More details on this process are in the [HA guide](/docs/ha-mode.md).
|
More details on this process are in the [HA guide](/docs/ha-mode.md).
|
||||||
|
|
||||||
Kubespray permits connecting to the cluster remotely on any IP of any
|
Kubespray permits connecting to the cluster remotely on any IP of any
|
||||||
kube-master host on port 6443 by default. However, this requires
|
kube_control_plane host on port 6443 by default. However, this requires
|
||||||
authentication. One can get a kubeconfig from kube-master hosts
|
authentication. One can get a kubeconfig from kube_control_plane hosts
|
||||||
(see [below](#accessing-kubernetes-api)) or connect with a [username and password](/docs/vars.md#user-accounts).
|
(see [below](#accessing-kubernetes-api)) or connect with a [username and password](/docs/vars.md#user-accounts).
|
||||||
|
|
||||||
For more information on kubeconfig and accessing a Kubernetes cluster, refer to
|
For more information on kubeconfig and accessing a Kubernetes cluster, refer to
|
||||||
|
@ -119,7 +119,7 @@ kubectl proxy
|
||||||
|
|
||||||
## Accessing Kubernetes API
|
## Accessing Kubernetes API
|
||||||
|
|
||||||
The main client of Kubernetes is `kubectl`. It is installed on each kube-master
|
The main client of Kubernetes is `kubectl`. It is installed on each kube_control_plane
|
||||||
host and can optionally be configured on your ansible host by setting
|
host and can optionally be configured on your ansible host by setting
|
||||||
`kubectl_localhost: true` and `kubeconfig_localhost: true` in the configuration:
|
`kubectl_localhost: true` and `kubeconfig_localhost: true` in the configuration:
|
||||||
|
|
||||||
|
|
|
@ -32,7 +32,7 @@ If you choose to NOT use the local internal loadbalancer, you will need to
|
||||||
configure your own loadbalancer to achieve HA. Note that deploying a
|
configure your own loadbalancer to achieve HA. Note that deploying a
|
||||||
loadbalancer is up to a user and is not covered by ansible roles in Kubespray.
|
loadbalancer is up to a user and is not covered by ansible roles in Kubespray.
|
||||||
By default, it only configures a non-HA endpoint, which points to the
|
By default, it only configures a non-HA endpoint, which points to the
|
||||||
`access_ip` or IP address of the first server node in the `kube-master` group.
|
`access_ip` or IP address of the first server node in the `kube_control_plane` group.
|
||||||
It can also configure clients to use endpoints for a given loadbalancer type.
|
It can also configure clients to use endpoints for a given loadbalancer type.
|
||||||
The following diagram shows how traffic to the apiserver is directed.
|
The following diagram shows how traffic to the apiserver is directed.
|
||||||
|
|
||||||
|
@ -102,8 +102,8 @@ exclusive to `loadbalancer_apiserver_localhost`.
|
||||||
|
|
||||||
Access API endpoints are evaluated automatically, as the following:
|
Access API endpoints are evaluated automatically, as the following:
|
||||||
|
|
||||||
| Endpoint type | kube-master | non-master | external |
|
| Endpoint type | kube_control_plane | non-master | external |
|
||||||
|------------------------------|------------------|-------------------------|-----------------------|
|
|------------------------------|--------------------|-------------------------|-----------------------|
|
||||||
| Local LB (default) | `https://bip:sp` | `https://lc:nsp` | `https://m[0].aip:sp` |
|
| Local LB (default) | `https://bip:sp` | `https://lc:nsp` | `https://m[0].aip:sp` |
|
||||||
| Local LB + Unmanaged here LB | `https://bip:sp` | `https://lc:nsp` | `https://ext` |
|
| Local LB + Unmanaged here LB | `https://bip:sp` | `https://lc:nsp` | `https://ext` |
|
||||||
| External LB, no internal | `https://bip:sp` | `<https://lb:lp>` | `https://lb:lp` |
|
| External LB, no internal | `https://bip:sp` | `<https://lb:lp>` | `https://lb:lp` |
|
||||||
|
@ -111,7 +111,7 @@ Access API endpoints are evaluated automatically, as the following:
|
||||||
|
|
||||||
Where:
|
Where:
|
||||||
|
|
||||||
* `m[0]` - the first node in the `kube-master` group;
|
* `m[0]` - the first node in the `kube_control_plane` group;
|
||||||
* `lb` - LB FQDN, `apiserver_loadbalancer_domain_name`;
|
* `lb` - LB FQDN, `apiserver_loadbalancer_domain_name`;
|
||||||
* `ext` - Externally load balanced VIP:port and FQDN, not managed by Kubespray;
|
* `ext` - Externally load balanced VIP:port and FQDN, not managed by Kubespray;
|
||||||
* `lc` - localhost;
|
* `lc` - localhost;
|
||||||
|
|
|
@ -62,7 +62,7 @@ You could rename *all.yml* config to something else, i.e. *kubespray.yml* and cr
|
||||||
kubemaster
|
kubemaster
|
||||||
kubemaster-ha
|
kubemaster-ha
|
||||||
|
|
||||||
[kube-master:children]
|
[kube_control_plane:children]
|
||||||
kubemaster
|
kubemaster
|
||||||
kubemaster-ha
|
kubemaster-ha
|
||||||
|
|
||||||
|
|
|
@ -39,7 +39,7 @@ For a large scaled deployments, consider the following configuration changes:
|
||||||
|
|
||||||
* Add calico-rr nodes if you are deploying with Calico or Canal. Nodes recover
|
* Add calico-rr nodes if you are deploying with Calico or Canal. Nodes recover
|
||||||
from host/network interruption much quicker with calico-rr. Note that
|
from host/network interruption much quicker with calico-rr. Note that
|
||||||
calico-rr role must be on a host without kube-master or kube-node role (but
|
calico-rr role must be on a host without kube_control_plane or kube-node role (but
|
||||||
etcd role is okay).
|
etcd role is okay).
|
||||||
|
|
||||||
* Check out the
|
* Check out the
|
||||||
|
|
|
@ -2,9 +2,9 @@
|
||||||
|
|
||||||
Modified from [comments in #3471](https://github.com/kubernetes-sigs/kubespray/issues/3471#issuecomment-530036084)
|
Modified from [comments in #3471](https://github.com/kubernetes-sigs/kubespray/issues/3471#issuecomment-530036084)
|
||||||
|
|
||||||
## Limitation: Removal of first kube-master and etcd-master
|
## Limitation: Removal of first kube_control_plane and etcd-master
|
||||||
|
|
||||||
Currently you can't remove the first node in your kube-master and etcd-master list. If you still want to remove this node you have to:
|
Currently you can't remove the first node in your kube_control_plane and etcd-master list. If you still want to remove this node you have to:
|
||||||
|
|
||||||
### 1) Change order of current masters
|
### 1) Change order of current masters
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ Modify the order of your master list by pushing your first entry to any other po
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
children:
|
children:
|
||||||
kube-master:
|
kube_control_plane:
|
||||||
hosts:
|
hosts:
|
||||||
node-1:
|
node-1:
|
||||||
node-2:
|
node-2:
|
||||||
|
@ -33,7 +33,7 @@ change your inventory to:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
children:
|
children:
|
||||||
kube-master:
|
kube_control_plane:
|
||||||
hosts:
|
hosts:
|
||||||
node-2:
|
node-2:
|
||||||
node-3:
|
node-3:
|
||||||
|
@ -103,10 +103,10 @@ You need to make sure there are always an odd number of etcd nodes in the cluste
|
||||||
|
|
||||||
### 1) Add the new node running cluster.yml
|
### 1) Add the new node running cluster.yml
|
||||||
|
|
||||||
Update the inventory and run `cluster.yml` passing `--limit=etcd,kube-master -e ignore_assert_errors=yes`.
|
Update the inventory and run `cluster.yml` passing `--limit=etcd,kube_control_plane -e ignore_assert_errors=yes`.
|
||||||
If the node you want to add as an etcd node is already a worker or master node in your cluster, you have to remove him first using `remove-node.yml`.
|
If the node you want to add as an etcd node is already a worker or master node in your cluster, you have to remove him first using `remove-node.yml`.
|
||||||
|
|
||||||
Run `upgrade-cluster.yml` also passing `--limit=etcd,kube-master -e ignore_assert_errors=yes`. This is necessary to update all etcd configuration in the cluster.
|
Run `upgrade-cluster.yml` also passing `--limit=etcd,kube_control_plane -e ignore_assert_errors=yes`. This is necessary to update all etcd configuration in the cluster.
|
||||||
|
|
||||||
At this point, you will have an even number of nodes.
|
At this point, you will have an even number of nodes.
|
||||||
Everything should still be working, and you should only have problems if the cluster decides to elect a new etcd leader before you remove a node.
|
Everything should still be working, and you should only have problems if the cluster decides to elect a new etcd leader before you remove a node.
|
||||||
|
|
|
@ -5,8 +5,8 @@ To recover from broken nodes in the control plane use the "recover\-control\-pla
|
||||||
|
|
||||||
* Backup what you can
|
* Backup what you can
|
||||||
* Provision new nodes to replace the broken ones
|
* Provision new nodes to replace the broken ones
|
||||||
* Place the surviving nodes of the control plane first in the "etcd" and "kube-master" groups
|
* Place the surviving nodes of the control plane first in the "etcd" and "kube\_control\_plane" groups
|
||||||
* Add the new nodes below the surviving control plane nodes in the "etcd" and "kube-master" groups
|
* Add the new nodes below the surviving control plane nodes in the "etcd" and "kube\_control\_plane" groups
|
||||||
|
|
||||||
Examples of what broken means in this context:
|
Examples of what broken means in this context:
|
||||||
|
|
||||||
|
@ -20,9 +20,9 @@ __Note that you need at least one functional node to be able to recover using th
|
||||||
## Runbook
|
## Runbook
|
||||||
|
|
||||||
* Move any broken etcd nodes into the "broken\_etcd" group, make sure the "etcd\_member\_name" variable is set.
|
* Move any broken etcd nodes into the "broken\_etcd" group, make sure the "etcd\_member\_name" variable is set.
|
||||||
* Move any broken master nodes into the "broken\_kube-master" group.
|
* Move any broken master nodes into the "broken\_kube\_control\_plane" group.
|
||||||
|
|
||||||
Then run the playbook with ```--limit etcd,kube-master``` and increase the number of ETCD retries by setting ```-e etcd_retries=10``` or something even larger. The amount of retries required is difficult to predict.
|
Then run the playbook with ```--limit etcd,kube_control_plane``` and increase the number of ETCD retries by setting ```-e etcd_retries=10``` or something even larger. The amount of retries required is difficult to predict.
|
||||||
|
|
||||||
When finished you should have a fully working control plane again.
|
When finished you should have a fully working control plane again.
|
||||||
|
|
||||||
|
|
|
@ -3,10 +3,10 @@
|
||||||
There are four node layout types: `default`, `separate`, `ha`, and `scale`.
|
There are four node layout types: `default`, `separate`, `ha`, and `scale`.
|
||||||
|
|
||||||
`default` is a non-HA two nodes setup with one separate `kube-node`
|
`default` is a non-HA two nodes setup with one separate `kube-node`
|
||||||
and the `etcd` group merged with the `kube-master`.
|
and the `etcd` group merged with the `kube_control_plane`.
|
||||||
|
|
||||||
`separate` layout is when there is only node of each type, which includes
|
`separate` layout is when there is only node of each type, which includes
|
||||||
a kube-master, kube-node, and etcd cluster member.
|
a kube_control_plane, kube-node, and etcd cluster member.
|
||||||
|
|
||||||
`ha` layout consists of two etcd nodes, two masters and a single worker node,
|
`ha` layout consists of two etcd nodes, two masters and a single worker node,
|
||||||
with role intersection.
|
with role intersection.
|
||||||
|
|
|
@ -41,7 +41,7 @@ The var ```-e upgrade_cluster_setup=true``` is needed to be set in order to migr
|
||||||
Kubespray also supports cordon, drain and uncordoning of nodes when performing
|
Kubespray also supports cordon, drain and uncordoning of nodes when performing
|
||||||
a cluster upgrade. There is a separate playbook used for this purpose. It is
|
a cluster upgrade. There is a separate playbook used for this purpose. It is
|
||||||
important to note that upgrade-cluster.yml can only be used for upgrading an
|
important to note that upgrade-cluster.yml can only be used for upgrading an
|
||||||
existing cluster. That means there must be at least 1 kube-master already
|
existing cluster. That means there must be at least 1 kube_control_plane already
|
||||||
deployed.
|
deployed.
|
||||||
|
|
||||||
```ShellSession
|
```ShellSession
|
||||||
|
|
|
@ -36,7 +36,7 @@ Some variables of note include:
|
||||||
* *ansible_default_ipv4.address* - Not Kubespray-specific, but it is used if ip
|
* *ansible_default_ipv4.address* - Not Kubespray-specific, but it is used if ip
|
||||||
and access_ip are undefined
|
and access_ip are undefined
|
||||||
* *loadbalancer_apiserver* - If defined, all hosts will connect to this
|
* *loadbalancer_apiserver* - If defined, all hosts will connect to this
|
||||||
address instead of localhost for kube-masters and kube-master[0] for
|
address instead of localhost for kube_control_planes and kube_control_plane[0] for
|
||||||
kube-nodes. See more details in the
|
kube-nodes. See more details in the
|
||||||
[HA guide](/docs/ha-mode.md).
|
[HA guide](/docs/ha-mode.md).
|
||||||
* *loadbalancer_apiserver_localhost* - makes all hosts to connect to
|
* *loadbalancer_apiserver_localhost* - makes all hosts to connect to
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
---
|
---
|
||||||
- hosts: kube-node:kube-master
|
- hosts: kube-node:kube_control_plane
|
||||||
tasks:
|
tasks:
|
||||||
- name: Remove old cloud provider config
|
- name: Remove old cloud provider config
|
||||||
file:
|
file:
|
||||||
|
@ -7,7 +7,7 @@
|
||||||
state: absent
|
state: absent
|
||||||
with_items:
|
with_items:
|
||||||
- /etc/kubernetes/cloud_config
|
- /etc/kubernetes/cloud_config
|
||||||
- hosts: kube-master[0]
|
- hosts: kube_control_plane[0]
|
||||||
tasks:
|
tasks:
|
||||||
- name: Include kubespray-default variables
|
- name: Include kubespray-default variables
|
||||||
include_vars: ../roles/kubespray-defaults/defaults/main.yaml
|
include_vars: ../roles/kubespray-defaults/defaults/main.yaml
|
||||||
|
|
|
@ -34,7 +34,7 @@
|
||||||
- { role: kubernetes/preinstall, tags: preinstall }
|
- { role: kubernetes/preinstall, tags: preinstall }
|
||||||
|
|
||||||
- name: Handle upgrades to master components first to maintain backwards compat.
|
- name: Handle upgrades to master components first to maintain backwards compat.
|
||||||
hosts: kube-master
|
hosts: kube_control_plane
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
serial: 1
|
serial: 1
|
||||||
roles:
|
roles:
|
||||||
|
@ -47,7 +47,7 @@
|
||||||
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
||||||
|
|
||||||
- name: Finally handle worker upgrades, based on given batch size
|
- name: Finally handle worker upgrades, based on given batch size
|
||||||
hosts: kube-node:!kube-master
|
hosts: kube-node:!kube_control_plane
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
serial: "{{ serial | default('20%') }}"
|
serial: "{{ serial | default('20%') }}"
|
||||||
roles:
|
roles:
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
node1 ansible_connection=local local_release_dir={{ansible_env.HOME}}/releases
|
node1 ansible_connection=local local_release_dir={{ansible_env.HOME}}/releases
|
||||||
|
|
||||||
[kube-master]
|
[kube_control_plane]
|
||||||
node1
|
node1
|
||||||
|
|
||||||
[etcd]
|
[etcd]
|
||||||
|
@ -11,5 +11,5 @@ node1
|
||||||
|
|
||||||
[k8s-cluster:children]
|
[k8s-cluster:children]
|
||||||
kube-node
|
kube-node
|
||||||
kube-master
|
kube_control_plane
|
||||||
calico-rr
|
calico-rr
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
# [bastion]
|
# [bastion]
|
||||||
# bastion ansible_host=x.x.x.x ansible_user=some_user
|
# bastion ansible_host=x.x.x.x ansible_user=some_user
|
||||||
|
|
||||||
[kube-master]
|
[kube_control_plane]
|
||||||
# node1
|
# node1
|
||||||
# node2
|
# node2
|
||||||
# node3
|
# node3
|
||||||
|
@ -33,6 +33,6 @@
|
||||||
[calico-rr]
|
[calico-rr]
|
||||||
|
|
||||||
[k8s-cluster:children]
|
[k8s-cluster:children]
|
||||||
kube-master
|
kube_control_plane
|
||||||
kube-node
|
kube-node
|
||||||
calico-rr
|
calico-rr
|
||||||
|
|
|
@ -2,6 +2,15 @@
|
||||||
- name: Check ansible version
|
- name: Check ansible version
|
||||||
import_playbook: ansible_version.yml
|
import_playbook: ansible_version.yml
|
||||||
|
|
||||||
|
- name: Add kube-master nodes to kube_control_plane
|
||||||
|
# This is for old inventory which contains kube-master instead of kube_control_plane
|
||||||
|
hosts: kube-master
|
||||||
|
gather_facts: false
|
||||||
|
tasks:
|
||||||
|
- name: add nodes to kube_control_plane group
|
||||||
|
group_by:
|
||||||
|
key: 'kube_control_plane'
|
||||||
|
|
||||||
- hosts: bastion[0]
|
- hosts: bastion[0]
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -15,7 +24,7 @@
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: recover_control_plane/etcd }
|
- { role: recover_control_plane/etcd }
|
||||||
|
|
||||||
- hosts: "{{ groups['kube-master'] | first }}"
|
- hosts: "{{ groups['kube_control_plane'] | first }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
|
@ -23,7 +32,7 @@
|
||||||
|
|
||||||
- include: cluster.yml
|
- include: cluster.yml
|
||||||
|
|
||||||
- hosts: "{{ groups['kube-master'] }}"
|
- hosts: "{{ groups['kube_control_plane'] }}"
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
|
|
|
@ -2,6 +2,15 @@
|
||||||
- name: Check ansible version
|
- name: Check ansible version
|
||||||
import_playbook: ansible_version.yml
|
import_playbook: ansible_version.yml
|
||||||
|
|
||||||
|
- name: Add kube-master nodes to kube_control_plane
|
||||||
|
# This is for old inventory which contains kube-master instead of kube_control_plane
|
||||||
|
hosts: kube-master
|
||||||
|
gather_facts: false
|
||||||
|
tasks:
|
||||||
|
- name: add nodes to kube_control_plane group
|
||||||
|
group_by:
|
||||||
|
key: 'kube_control_plane'
|
||||||
|
|
||||||
- hosts: "{{ node | default('etcd:k8s-cluster:calico-rr') }}"
|
- hosts: "{{ node | default('etcd:k8s-cluster:calico-rr') }}"
|
||||||
gather_facts: no
|
gather_facts: no
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
@ -17,7 +26,7 @@
|
||||||
msg: "Delete nodes confirmation failed"
|
msg: "Delete nodes confirmation failed"
|
||||||
when: delete_nodes_confirmation != "yes"
|
when: delete_nodes_confirmation != "yes"
|
||||||
|
|
||||||
- hosts: kube-master[0]
|
- hosts: kube_control_plane[0]
|
||||||
gather_facts: no
|
gather_facts: no
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
|
@ -35,7 +44,7 @@
|
||||||
- { role: reset, tags: reset, when: reset_nodes|default(True)|bool }
|
- { role: reset, tags: reset, when: reset_nodes|default(True)|bool }
|
||||||
|
|
||||||
# Currently cannot remove first master or etcd
|
# Currently cannot remove first master or etcd
|
||||||
- hosts: "{{ node | default('kube-master[1:]:etcd[1:]') }}"
|
- hosts: "{{ node | default('kube_control_plane[1:]:etcd[1:]') }}"
|
||||||
gather_facts: no
|
gather_facts: no
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
roles:
|
roles:
|
||||||
|
|
|
@ -2,6 +2,15 @@
|
||||||
- name: Check ansible version
|
- name: Check ansible version
|
||||||
import_playbook: ansible_version.yml
|
import_playbook: ansible_version.yml
|
||||||
|
|
||||||
|
- name: Add kube-master nodes to kube_control_plane
|
||||||
|
# This is for old inventory which contains kube-master instead of kube_control_plane
|
||||||
|
hosts: kube-master
|
||||||
|
gather_facts: false
|
||||||
|
tasks:
|
||||||
|
- name: add nodes to kube_control_plane group
|
||||||
|
group_by:
|
||||||
|
key: 'kube_control_plane'
|
||||||
|
|
||||||
- hosts: bastion[0]
|
- hosts: bastion[0]
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
environment: "{{ proxy_disable_env }}"
|
environment: "{{ proxy_disable_env }}"
|
||||||
|
|
|
@ -12,7 +12,7 @@ platforms:
|
||||||
cpus: 2
|
cpus: 2
|
||||||
memory: 1024
|
memory: 1024
|
||||||
groups:
|
groups:
|
||||||
- kube-master
|
- kube_control_plane
|
||||||
provisioner:
|
provisioner:
|
||||||
name: ansible
|
name: ansible
|
||||||
env:
|
env:
|
||||||
|
|
|
@ -12,25 +12,25 @@ platforms:
|
||||||
cpus: 2
|
cpus: 2
|
||||||
memory: 1024
|
memory: 1024
|
||||||
groups:
|
groups:
|
||||||
- kube-master
|
- kube_control_plane
|
||||||
- name: centos7
|
- name: centos7
|
||||||
box: centos/7
|
box: centos/7
|
||||||
cpus: 2
|
cpus: 2
|
||||||
memory: 1024
|
memory: 1024
|
||||||
groups:
|
groups:
|
||||||
- kube-master
|
- kube_control_plane
|
||||||
- name: centos8
|
- name: centos8
|
||||||
box: centos/8
|
box: centos/8
|
||||||
cpus: 2
|
cpus: 2
|
||||||
memory: 1024
|
memory: 1024
|
||||||
groups:
|
groups:
|
||||||
- kube-master
|
- kube_control_plane
|
||||||
- name: fedora
|
- name: fedora
|
||||||
box: fedora/33-cloud-base
|
box: fedora/33-cloud-base
|
||||||
cpus: 2
|
cpus: 2
|
||||||
memory: 1024
|
memory: 1024
|
||||||
groups:
|
groups:
|
||||||
- kube-master
|
- kube_control_plane
|
||||||
provisioner:
|
provisioner:
|
||||||
name: ansible
|
name: ansible
|
||||||
env:
|
env:
|
||||||
|
|
|
@ -15,14 +15,14 @@ platforms:
|
||||||
memory: 1024
|
memory: 1024
|
||||||
nested: true
|
nested: true
|
||||||
groups:
|
groups:
|
||||||
- kube-master
|
- kube_control_plane
|
||||||
- name: ubuntu20
|
- name: ubuntu20
|
||||||
box: generic/ubuntu2004
|
box: generic/ubuntu2004
|
||||||
cpus: 1
|
cpus: 1
|
||||||
memory: 1024
|
memory: 1024
|
||||||
nested: true
|
nested: true
|
||||||
groups:
|
groups:
|
||||||
- kube-master
|
- kube_control_plane
|
||||||
provisioner:
|
provisioner:
|
||||||
name: ansible
|
name: ansible
|
||||||
env:
|
env:
|
||||||
|
|
|
@ -30,7 +30,7 @@ download_container: true
|
||||||
# if this is set to true, uses the localhost for download_run_once mode
|
# if this is set to true, uses the localhost for download_run_once mode
|
||||||
# (requires docker and sudo to access docker). You may want this option for
|
# (requires docker and sudo to access docker). You may want this option for
|
||||||
# local caching of docker images or for Flatcar Container Linux by Kinvolk cluster nodes.
|
# local caching of docker images or for Flatcar Container Linux by Kinvolk cluster nodes.
|
||||||
# Otherwise, uses the first node in the kube-master group to store images
|
# Otherwise, uses the first node in the kube_control_plane group to store images
|
||||||
# in the download_run_once mode.
|
# in the download_run_once mode.
|
||||||
download_localhost: false
|
download_localhost: false
|
||||||
|
|
||||||
|
@ -42,8 +42,8 @@ download_always_pull: false
|
||||||
# SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
|
# SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
|
||||||
download_validate_certs: true
|
download_validate_certs: true
|
||||||
|
|
||||||
# Use the first kube-master if download_localhost is not set
|
# Use the first kube_control_plane if download_localhost is not set
|
||||||
download_delegate: "{% if download_localhost %}localhost{% else %}{{ groups['kube-master'][0] }}{% endif %}"
|
download_delegate: "{% if download_localhost %}localhost{% else %}{{ groups['kube_control_plane'][0] }}{% endif %}"
|
||||||
|
|
||||||
# Arch of Docker images and needed packages
|
# Arch of Docker images and needed packages
|
||||||
image_arch: "{{host_architecture | default('amd64')}}"
|
image_arch: "{{host_architecture | default('amd64')}}"
|
||||||
|
@ -733,7 +733,7 @@ downloads:
|
||||||
owner: "root"
|
owner: "root"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
groups:
|
groups:
|
||||||
- kube-master
|
- kube_control_plane
|
||||||
|
|
||||||
crictl:
|
crictl:
|
||||||
file: true
|
file: true
|
||||||
|
@ -883,7 +883,7 @@ downloads:
|
||||||
owner: "root"
|
owner: "root"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
groups:
|
groups:
|
||||||
- kube-master
|
- kube_control_plane
|
||||||
|
|
||||||
weave_kube:
|
weave_kube:
|
||||||
enabled: "{{ kube_network_plugin == 'weave' }}"
|
enabled: "{{ kube_network_plugin == 'weave' }}"
|
||||||
|
@ -973,7 +973,7 @@ downloads:
|
||||||
tag: "{{ coredns_image_tag }}"
|
tag: "{{ coredns_image_tag }}"
|
||||||
sha256: "{{ coredns_digest_checksum|default(None) }}"
|
sha256: "{{ coredns_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-master
|
- kube_control_plane
|
||||||
|
|
||||||
nodelocaldns:
|
nodelocaldns:
|
||||||
enabled: "{{ enable_nodelocaldns }}"
|
enabled: "{{ enable_nodelocaldns }}"
|
||||||
|
@ -991,7 +991,7 @@ downloads:
|
||||||
tag: "{{ dnsautoscaler_image_tag }}"
|
tag: "{{ dnsautoscaler_image_tag }}"
|
||||||
sha256: "{{ dnsautoscaler_digest_checksum|default(None) }}"
|
sha256: "{{ dnsautoscaler_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-master
|
- kube_control_plane
|
||||||
|
|
||||||
testbox:
|
testbox:
|
||||||
enabled: false
|
enabled: false
|
||||||
|
@ -1011,7 +1011,7 @@ downloads:
|
||||||
owner: "root"
|
owner: "root"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
groups:
|
groups:
|
||||||
- kube-master
|
- kube_control_plane
|
||||||
|
|
||||||
registry:
|
registry:
|
||||||
enabled: "{{ registry_enabled }}"
|
enabled: "{{ registry_enabled }}"
|
||||||
|
@ -1038,7 +1038,7 @@ downloads:
|
||||||
tag: "{{ metrics_server_image_tag }}"
|
tag: "{{ metrics_server_image_tag }}"
|
||||||
sha256: "{{ metrics_server_digest_checksum|default(None) }}"
|
sha256: "{{ metrics_server_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-master
|
- kube_control_plane
|
||||||
|
|
||||||
addon_resizer:
|
addon_resizer:
|
||||||
# Currently addon_resizer is only used by metrics server
|
# Currently addon_resizer is only used by metrics server
|
||||||
|
@ -1048,7 +1048,7 @@ downloads:
|
||||||
tag: "{{ addon_resizer_image_tag }}"
|
tag: "{{ addon_resizer_image_tag }}"
|
||||||
sha256: "{{ addon_resizer_digest_checksum|default(None) }}"
|
sha256: "{{ addon_resizer_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-master
|
- kube_control_plane
|
||||||
|
|
||||||
local_volume_provisioner:
|
local_volume_provisioner:
|
||||||
enabled: "{{ local_volume_provisioner_enabled }}"
|
enabled: "{{ local_volume_provisioner_enabled }}"
|
||||||
|
@ -1219,7 +1219,7 @@ downloads:
|
||||||
tag: "{{ dashboard_image_tag }}"
|
tag: "{{ dashboard_image_tag }}"
|
||||||
sha256: "{{ dashboard_digest_checksum|default(None) }}"
|
sha256: "{{ dashboard_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-master
|
- kube_control_plane
|
||||||
|
|
||||||
dashboard_metrics_scrapper:
|
dashboard_metrics_scrapper:
|
||||||
enabled: "{{ dashboard_enabled }}"
|
enabled: "{{ dashboard_enabled }}"
|
||||||
|
@ -1228,7 +1228,7 @@ downloads:
|
||||||
tag: "{{ dashboard_metrics_scraper_tag }}"
|
tag: "{{ dashboard_metrics_scraper_tag }}"
|
||||||
sha256: "{{ dashboard_digest_checksum|default(None) }}"
|
sha256: "{{ dashboard_digest_checksum|default(None) }}"
|
||||||
groups:
|
groups:
|
||||||
- kube-master
|
- kube_control_plane
|
||||||
|
|
||||||
download_defaults:
|
download_defaults:
|
||||||
container: false
|
container: false
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
include_tasks: prep_kubeadm_images.yml
|
include_tasks: prep_kubeadm_images.yml
|
||||||
when:
|
when:
|
||||||
- not skip_downloads|default(false)
|
- not skip_downloads|default(false)
|
||||||
- inventory_hostname in groups['kube-master']
|
- inventory_hostname in groups['kube_control_plane']
|
||||||
tags:
|
tags:
|
||||||
- download
|
- download
|
||||||
- upload
|
- upload
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
ignore_errors: true
|
ignore_errors: true
|
||||||
when:
|
when:
|
||||||
- dns_mode in ['coredns', 'coredns_dual']
|
- dns_mode in ['coredns', 'coredns_dual']
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Kubernetes Apps | Delete kubeadm CoreDNS
|
- name: Kubernetes Apps | Delete kubeadm CoreDNS
|
||||||
kube:
|
kube:
|
||||||
|
@ -17,7 +17,7 @@
|
||||||
state: absent
|
state: absent
|
||||||
when:
|
when:
|
||||||
- dns_mode in ['coredns', 'coredns_dual']
|
- dns_mode in ['coredns', 'coredns_dual']
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
- createdby_annotation.stdout != 'kubespray'
|
- createdby_annotation.stdout != 'kubespray'
|
||||||
|
|
||||||
- name: Kubernetes Apps | Delete kubeadm Kube-DNS service
|
- name: Kubernetes Apps | Delete kubeadm Kube-DNS service
|
||||||
|
@ -29,4 +29,4 @@
|
||||||
state: absent
|
state: absent
|
||||||
when:
|
when:
|
||||||
- dns_mode in ['coredns', 'coredns_dual']
|
- dns_mode in ['coredns', 'coredns_dual']
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
clusterIP: "{{ skydns_server }}"
|
clusterIP: "{{ skydns_server }}"
|
||||||
when:
|
when:
|
||||||
- dns_mode in ['coredns', 'coredns_dual']
|
- dns_mode in ['coredns', 'coredns_dual']
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
- coredns
|
- coredns
|
||||||
|
|
||||||
|
@ -38,6 +38,6 @@
|
||||||
coredns_ordinal_suffix: "-secondary"
|
coredns_ordinal_suffix: "-secondary"
|
||||||
when:
|
when:
|
||||||
- dns_mode == 'coredns_dual'
|
- dns_mode == 'coredns_dual'
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
- coredns
|
- coredns
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
with_items:
|
with_items:
|
||||||
- { file: dashboard.yml, type: deploy, name: kubernetes-dashboard }
|
- { file: dashboard.yml, type: deploy, name: kubernetes-dashboard }
|
||||||
register: manifests
|
register: manifests
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Kubernetes Apps | Start dashboard
|
- name: Kubernetes Apps | Start dashboard
|
||||||
kube:
|
kube:
|
||||||
|
@ -17,4 +17,4 @@
|
||||||
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ manifests.results }}"
|
with_items: "{{ manifests.results }}"
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
|
@ -9,12 +9,12 @@
|
||||||
until: result.status == 200
|
until: result.status == 200
|
||||||
retries: 20
|
retries: 20
|
||||||
delay: 1
|
delay: 1
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Kubernetes Apps | Cleanup DNS
|
- name: Kubernetes Apps | Cleanup DNS
|
||||||
import_tasks: cleanup_dns.yml
|
import_tasks: cleanup_dns.yml
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
- upgrade
|
- upgrade
|
||||||
- coredns
|
- coredns
|
||||||
|
@ -24,7 +24,7 @@
|
||||||
import_tasks: "coredns.yml"
|
import_tasks: "coredns.yml"
|
||||||
when:
|
when:
|
||||||
- dns_mode in ['coredns', 'coredns_dual']
|
- dns_mode in ['coredns', 'coredns_dual']
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
- coredns
|
- coredns
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@
|
||||||
import_tasks: "nodelocaldns.yml"
|
import_tasks: "nodelocaldns.yml"
|
||||||
when:
|
when:
|
||||||
- enable_nodelocaldns
|
- enable_nodelocaldns
|
||||||
- inventory_hostname == groups['kube-master'] | first
|
- inventory_hostname == groups['kube_control_plane'] | first
|
||||||
tags:
|
tags:
|
||||||
- nodelocaldns
|
- nodelocaldns
|
||||||
|
|
||||||
|
@ -50,7 +50,7 @@
|
||||||
- "{{ nodelocaldns_manifests.results | default({}) }}"
|
- "{{ nodelocaldns_manifests.results | default({}) }}"
|
||||||
when:
|
when:
|
||||||
- dns_mode != 'none'
|
- dns_mode != 'none'
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
- not item is skipped
|
- not item is skipped
|
||||||
register: resource_result
|
register: resource_result
|
||||||
until: resource_result is succeeded
|
until: resource_result is succeeded
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
with_items: "{{ netchecker_templates }}"
|
with_items: "{{ netchecker_templates }}"
|
||||||
register: manifests
|
register: manifests
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Kubernetes Apps | Start Netchecker Resources
|
- name: Kubernetes Apps | Start Netchecker Resources
|
||||||
kube:
|
kube:
|
||||||
|
@ -39,4 +39,4 @@
|
||||||
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ manifests.results }}"
|
with_items: "{{ manifests.results }}"
|
||||||
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
|
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
secondaryclusterIP: "{{ skydns_server_secondary }}"
|
secondaryclusterIP: "{{ skydns_server_secondary }}"
|
||||||
when:
|
when:
|
||||||
- enable_nodelocaldns
|
- enable_nodelocaldns
|
||||||
- inventory_hostname == groups['kube-master'] | first
|
- inventory_hostname == groups['kube_control_plane'] | first
|
||||||
tags:
|
tags:
|
||||||
- nodelocaldns
|
- nodelocaldns
|
||||||
- coredns
|
- coredns
|
||||||
|
@ -39,7 +39,7 @@
|
||||||
{%- endif -%}
|
{%- endif -%}
|
||||||
when:
|
when:
|
||||||
- enable_nodelocaldns
|
- enable_nodelocaldns
|
||||||
- inventory_hostname == groups['kube-master'] | first
|
- inventory_hostname == groups['kube_control_plane'] | first
|
||||||
tags:
|
tags:
|
||||||
- nodelocaldns
|
- nodelocaldns
|
||||||
- coredns
|
- coredns
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
template:
|
template:
|
||||||
src: controller-manager-config.yml.j2
|
src: controller-manager-config.yml.j2
|
||||||
dest: "{{ kube_config_dir }}/controller-manager-config.yml"
|
dest: "{{ kube_config_dir }}/controller-manager-config.yml"
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: oci
|
tags: oci
|
||||||
|
|
||||||
- name: "OCI Cloud Controller | Slurp Configuration"
|
- name: "OCI Cloud Controller | Slurp Configuration"
|
||||||
|
@ -18,14 +18,14 @@
|
||||||
- name: "OCI Cloud Controller | Encode Configuration"
|
- name: "OCI Cloud Controller | Encode Configuration"
|
||||||
set_fact:
|
set_fact:
|
||||||
controller_manager_config_base64: "{{ controller_manager_config.content }}"
|
controller_manager_config_base64: "{{ controller_manager_config.content }}"
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: oci
|
tags: oci
|
||||||
|
|
||||||
- name: "OCI Cloud Controller | Generate Manifests"
|
- name: "OCI Cloud Controller | Generate Manifests"
|
||||||
template:
|
template:
|
||||||
src: oci-cloud-provider.yml.j2
|
src: oci-cloud-provider.yml.j2
|
||||||
dest: "{{ kube_config_dir }}/oci-cloud-provider.yml"
|
dest: "{{ kube_config_dir }}/oci-cloud-provider.yml"
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: oci
|
tags: oci
|
||||||
|
|
||||||
- name: "OCI Cloud Controller | Apply Manifests"
|
- name: "OCI Cloud Controller | Apply Manifests"
|
||||||
|
@ -33,5 +33,5 @@
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/oci-cloud-provider.yml"
|
filename: "{{ kube_config_dir }}/oci-cloud-provider.yml"
|
||||||
state: latest
|
state: latest
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: oci
|
tags: oci
|
||||||
|
|
|
@ -9,14 +9,14 @@
|
||||||
until: result.status == 200
|
until: result.status == 200
|
||||||
retries: 10
|
retries: 10
|
||||||
delay: 6
|
delay: 6
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Kubernetes Apps | Check AppArmor status
|
- name: Kubernetes Apps | Check AppArmor status
|
||||||
command: which apparmor_parser
|
command: which apparmor_parser
|
||||||
register: apparmor_status
|
register: apparmor_status
|
||||||
when:
|
when:
|
||||||
- podsecuritypolicy_enabled
|
- podsecuritypolicy_enabled
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
|
||||||
- name: Kubernetes Apps | Set apparmor_enabled
|
- name: Kubernetes Apps | Set apparmor_enabled
|
||||||
|
@ -24,7 +24,7 @@
|
||||||
apparmor_enabled: "{{ apparmor_status.rc == 0 }}"
|
apparmor_enabled: "{{ apparmor_status.rc == 0 }}"
|
||||||
when:
|
when:
|
||||||
- podsecuritypolicy_enabled
|
- podsecuritypolicy_enabled
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Kubernetes Apps | Render templates for PodSecurityPolicy
|
- name: Kubernetes Apps | Render templates for PodSecurityPolicy
|
||||||
template:
|
template:
|
||||||
|
@ -37,7 +37,7 @@
|
||||||
- {file: psp-crb.yml, type: rolebinding, name: psp-crb}
|
- {file: psp-crb.yml, type: rolebinding, name: psp-crb}
|
||||||
when:
|
when:
|
||||||
- podsecuritypolicy_enabled
|
- podsecuritypolicy_enabled
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Kubernetes Apps | Add policies, roles, bindings for PodSecurityPolicy
|
- name: Kubernetes Apps | Add policies, roles, bindings for PodSecurityPolicy
|
||||||
kube:
|
kube:
|
||||||
|
@ -52,7 +52,7 @@
|
||||||
delay: 6
|
delay: 6
|
||||||
with_items: "{{ psp_manifests.results }}"
|
with_items: "{{ psp_manifests.results }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
- not item is skipped
|
- not item is skipped
|
||||||
loop_control:
|
loop_control:
|
||||||
label: "{{ item.item.file }}"
|
label: "{{ item.item.file }}"
|
||||||
|
@ -64,7 +64,7 @@
|
||||||
register: node_crb_manifest
|
register: node_crb_manifest
|
||||||
when:
|
when:
|
||||||
- rbac_enabled
|
- rbac_enabled
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Apply workaround to allow all nodes with cert O=system:nodes to register
|
- name: Apply workaround to allow all nodes with cert O=system:nodes to register
|
||||||
kube:
|
kube:
|
||||||
|
@ -80,7 +80,7 @@
|
||||||
when:
|
when:
|
||||||
- rbac_enabled
|
- rbac_enabled
|
||||||
- node_crb_manifest.changed
|
- node_crb_manifest.changed
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Kubernetes Apps | Add webhook ClusterRole that grants access to proxy, stats, log, spec, and metrics on a kubelet
|
- name: Kubernetes Apps | Add webhook ClusterRole that grants access to proxy, stats, log, spec, and metrics on a kubelet
|
||||||
template:
|
template:
|
||||||
|
@ -90,7 +90,7 @@
|
||||||
when:
|
when:
|
||||||
- rbac_enabled
|
- rbac_enabled
|
||||||
- kubelet_authorization_mode_webhook
|
- kubelet_authorization_mode_webhook
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: node-webhook
|
tags: node-webhook
|
||||||
|
|
||||||
- name: Apply webhook ClusterRole
|
- name: Apply webhook ClusterRole
|
||||||
|
@ -104,7 +104,7 @@
|
||||||
- rbac_enabled
|
- rbac_enabled
|
||||||
- kubelet_authorization_mode_webhook
|
- kubelet_authorization_mode_webhook
|
||||||
- node_webhook_cr_manifest.changed
|
- node_webhook_cr_manifest.changed
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: node-webhook
|
tags: node-webhook
|
||||||
|
|
||||||
- name: Kubernetes Apps | Add ClusterRoleBinding for system:nodes to webhook ClusterRole
|
- name: Kubernetes Apps | Add ClusterRoleBinding for system:nodes to webhook ClusterRole
|
||||||
|
@ -115,7 +115,7 @@
|
||||||
when:
|
when:
|
||||||
- rbac_enabled
|
- rbac_enabled
|
||||||
- kubelet_authorization_mode_webhook
|
- kubelet_authorization_mode_webhook
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: node-webhook
|
tags: node-webhook
|
||||||
|
|
||||||
- name: Grant system:nodes the webhook ClusterRole
|
- name: Grant system:nodes the webhook ClusterRole
|
||||||
|
@ -129,7 +129,7 @@
|
||||||
- rbac_enabled
|
- rbac_enabled
|
||||||
- kubelet_authorization_mode_webhook
|
- kubelet_authorization_mode_webhook
|
||||||
- node_webhook_crb_manifest.changed
|
- node_webhook_crb_manifest.changed
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: node-webhook
|
tags: node-webhook
|
||||||
|
|
||||||
- include_tasks: oci.yml
|
- include_tasks: oci.yml
|
||||||
|
@ -140,7 +140,7 @@
|
||||||
|
|
||||||
- name: PriorityClass | Copy k8s-cluster-critical-pc.yml file
|
- name: PriorityClass | Copy k8s-cluster-critical-pc.yml file
|
||||||
copy: src=k8s-cluster-critical-pc.yml dest={{ kube_config_dir }}/k8s-cluster-critical-pc.yml
|
copy: src=k8s-cluster-critical-pc.yml dest={{ kube_config_dir }}/k8s-cluster-critical-pc.yml
|
||||||
when: inventory_hostname == groups['kube-master']|last
|
when: inventory_hostname == groups['kube_control_plane']|last
|
||||||
|
|
||||||
- name: PriorityClass | Create k8s-cluster-critical
|
- name: PriorityClass | Create k8s-cluster-critical
|
||||||
kube:
|
kube:
|
||||||
|
@ -149,4 +149,4 @@
|
||||||
resource: "PriorityClass"
|
resource: "PriorityClass"
|
||||||
filename: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml"
|
filename: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml"
|
||||||
state: latest
|
state: latest
|
||||||
when: inventory_hostname == groups['kube-master']|last
|
when: inventory_hostname == groups['kube_control_plane']|last
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
when:
|
when:
|
||||||
- cloud_provider is defined
|
- cloud_provider is defined
|
||||||
- cloud_provider == 'oci'
|
- cloud_provider == 'oci'
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Apply OCI RBAC
|
- name: Apply OCI RBAC
|
||||||
kube:
|
kube:
|
||||||
|
@ -15,4 +15,4 @@
|
||||||
when:
|
when:
|
||||||
- cloud_provider is defined
|
- cloud_provider is defined
|
||||||
- cloud_provider == 'oci'
|
- cloud_provider == 'oci'
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
|
@ -38,7 +38,7 @@
|
||||||
- { name: k8s-device-plugin-nvidia-daemonset, file: k8s-device-plugin-nvidia-daemonset.yml, type: daemonset }
|
- { name: k8s-device-plugin-nvidia-daemonset, file: k8s-device-plugin-nvidia-daemonset.yml, type: daemonset }
|
||||||
register: container_engine_accelerator_manifests
|
register: container_engine_accelerator_manifests
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0] and nvidia_driver_install_container
|
- inventory_hostname == groups['kube_control_plane'][0] and nvidia_driver_install_container
|
||||||
|
|
||||||
- name: Container Engine Acceleration Nvidia GPU | Apply manifests for nvidia accelerators
|
- name: Container Engine Acceleration Nvidia GPU | Apply manifests for nvidia accelerators
|
||||||
kube:
|
kube:
|
||||||
|
@ -51,4 +51,4 @@
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ container_engine_accelerator_manifests.results }}"
|
- "{{ container_engine_accelerator_manifests.results }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0] and nvidia_driver_install_container and nvidia_driver_install_supported
|
- inventory_hostname == groups['kube_control_plane'][0] and nvidia_driver_install_container and nvidia_driver_install_supported
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
dest: "{{ kube_config_dir }}/runtimeclass-crun.yml"
|
dest: "{{ kube_config_dir }}/runtimeclass-crun.yml"
|
||||||
mode: "0664"
|
mode: "0664"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: crun | Apply manifests
|
- name: crun | Apply manifests
|
||||||
kube:
|
kube:
|
||||||
|
@ -16,4 +16,4 @@
|
||||||
filename: "{{ kube_config_dir }}/runtimeclass-crun.yml"
|
filename: "{{ kube_config_dir }}/runtimeclass-crun.yml"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
with_items: "{{ kata_containers_templates }}"
|
with_items: "{{ kata_containers_templates }}"
|
||||||
register: kata_containers_manifests
|
register: kata_containers_manifests
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Kata Containers | Apply manifests
|
- name: Kata Containers | Apply manifests
|
||||||
kube:
|
kube:
|
||||||
|
@ -31,4 +31,4 @@
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ kata_containers_manifests.results }}"
|
with_items: "{{ kata_containers_manifests.results }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
- {name: aws-ebs-csi-controllerservice, file: aws-ebs-csi-controllerservice.yml}
|
- {name: aws-ebs-csi-controllerservice, file: aws-ebs-csi-controllerservice.yml}
|
||||||
- {name: aws-ebs-csi-nodeservice, file: aws-ebs-csi-nodeservice.yml}
|
- {name: aws-ebs-csi-nodeservice, file: aws-ebs-csi-nodeservice.yml}
|
||||||
register: aws_csi_manifests
|
register: aws_csi_manifests
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: aws-ebs-csi-driver
|
tags: aws-ebs-csi-driver
|
||||||
|
|
||||||
- name: AWS CSI Driver | Apply Manifests
|
- name: AWS CSI Driver | Apply Manifests
|
||||||
|
@ -20,7 +20,7 @@
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ aws_csi_manifests.results }}"
|
- "{{ aws_csi_manifests.results }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
- not item is skipped
|
- not item is skipped
|
||||||
loop_control:
|
loop_control:
|
||||||
label: "{{ item.item.file }}"
|
label: "{{ item.item.file }}"
|
||||||
|
|
|
@ -8,14 +8,14 @@
|
||||||
dest: "{{ kube_config_dir }}/azure_csi_cloud_config"
|
dest: "{{ kube_config_dir }}/azure_csi_cloud_config"
|
||||||
group: "{{ kube_cert_group }}"
|
group: "{{ kube_cert_group }}"
|
||||||
mode: 0640
|
mode: 0640
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: azure-csi-driver
|
tags: azure-csi-driver
|
||||||
|
|
||||||
- name: Azure CSI Driver | Get base64 cloud-config
|
- name: Azure CSI Driver | Get base64 cloud-config
|
||||||
slurp:
|
slurp:
|
||||||
src: "{{ kube_config_dir }}/azure_csi_cloud_config"
|
src: "{{ kube_config_dir }}/azure_csi_cloud_config"
|
||||||
register: cloud_config_secret
|
register: cloud_config_secret
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: azure-csi-driver
|
tags: azure-csi-driver
|
||||||
|
|
||||||
- name: Azure CSI Driver | Generate Manifests
|
- name: Azure CSI Driver | Generate Manifests
|
||||||
|
@ -30,7 +30,7 @@
|
||||||
- {name: azure-csi-azuredisk-node, file: azure-csi-azuredisk-node.yml}
|
- {name: azure-csi-azuredisk-node, file: azure-csi-azuredisk-node.yml}
|
||||||
- {name: azure-csi-node-info-crd.yml.j2, file: azure-csi-node-info-crd.yml}
|
- {name: azure-csi-node-info-crd.yml.j2, file: azure-csi-node-info-crd.yml}
|
||||||
register: azure_csi_manifests
|
register: azure_csi_manifests
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: azure-csi-driver
|
tags: azure-csi-driver
|
||||||
|
|
||||||
- name: Azure CSI Driver | Apply Manifests
|
- name: Azure CSI Driver | Apply Manifests
|
||||||
|
@ -41,7 +41,7 @@
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ azure_csi_manifests.results }}"
|
- "{{ azure_csi_manifests.results }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
- not item is skipped
|
- not item is skipped
|
||||||
loop_control:
|
loop_control:
|
||||||
label: "{{ item.item.file }}"
|
label: "{{ item.item.file }}"
|
||||||
|
|
|
@ -20,14 +20,14 @@
|
||||||
dest: "{{ kube_config_dir }}/cinder_cloud_config"
|
dest: "{{ kube_config_dir }}/cinder_cloud_config"
|
||||||
group: "{{ kube_cert_group }}"
|
group: "{{ kube_cert_group }}"
|
||||||
mode: 0640
|
mode: 0640
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: cinder-csi-driver
|
tags: cinder-csi-driver
|
||||||
|
|
||||||
- name: Cinder CSI Driver | Get base64 cloud-config
|
- name: Cinder CSI Driver | Get base64 cloud-config
|
||||||
slurp:
|
slurp:
|
||||||
src: "{{ kube_config_dir }}/cinder_cloud_config"
|
src: "{{ kube_config_dir }}/cinder_cloud_config"
|
||||||
register: cloud_config_secret
|
register: cloud_config_secret
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: cinder-csi-driver
|
tags: cinder-csi-driver
|
||||||
|
|
||||||
- name: Cinder CSI Driver | Generate Manifests
|
- name: Cinder CSI Driver | Generate Manifests
|
||||||
|
@ -43,7 +43,7 @@
|
||||||
- {name: cinder-csi-nodeplugin, file: cinder-csi-nodeplugin.yml}
|
- {name: cinder-csi-nodeplugin, file: cinder-csi-nodeplugin.yml}
|
||||||
- {name: cinder-csi-poddisruptionbudget, file: cinder-csi-poddisruptionbudget.yml}
|
- {name: cinder-csi-poddisruptionbudget, file: cinder-csi-poddisruptionbudget.yml}
|
||||||
register: cinder_csi_manifests
|
register: cinder_csi_manifests
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: cinder-csi-driver
|
tags: cinder-csi-driver
|
||||||
|
|
||||||
- name: Cinder CSI Driver | Apply Manifests
|
- name: Cinder CSI Driver | Apply Manifests
|
||||||
|
@ -54,7 +54,7 @@
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ cinder_csi_manifests.results }}"
|
- "{{ cinder_csi_manifests.results }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
- not item is skipped
|
- not item is skipped
|
||||||
loop_control:
|
loop_control:
|
||||||
label: "{{ item.item.file }}"
|
label: "{{ item.item.file }}"
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
- {name: volumesnapshotcontents, file: volumesnapshotcontents.yml}
|
- {name: volumesnapshotcontents, file: volumesnapshotcontents.yml}
|
||||||
- {name: volumesnapshots, file: volumesnapshots.yml}
|
- {name: volumesnapshots, file: volumesnapshots.yml}
|
||||||
register: csi_crd_manifests
|
register: csi_crd_manifests
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: csi-driver
|
tags: csi-driver
|
||||||
|
|
||||||
- name: CSI CRD | Apply Manifests
|
- name: CSI CRD | Apply Manifests
|
||||||
|
@ -20,7 +20,7 @@
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ csi_crd_manifests.results }}"
|
- "{{ csi_crd_manifests.results }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
- not item is skipped
|
- not item is skipped
|
||||||
loop_control:
|
loop_control:
|
||||||
label: "{{ item.item.file }}"
|
label: "{{ item.item.file }}"
|
||||||
|
|
|
@ -11,14 +11,14 @@
|
||||||
dest: "{{ kube_config_dir }}/cloud-sa.json"
|
dest: "{{ kube_config_dir }}/cloud-sa.json"
|
||||||
group: "{{ kube_cert_group }}"
|
group: "{{ kube_cert_group }}"
|
||||||
mode: 0640
|
mode: 0640
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: gcp-pd-csi-driver
|
tags: gcp-pd-csi-driver
|
||||||
|
|
||||||
- name: GCP PD CSI Driver | Get base64 cloud-sa.json
|
- name: GCP PD CSI Driver | Get base64 cloud-sa.json
|
||||||
slurp:
|
slurp:
|
||||||
src: "{{ kube_config_dir }}/cloud-sa.json"
|
src: "{{ kube_config_dir }}/cloud-sa.json"
|
||||||
register: gcp_cred_secret
|
register: gcp_cred_secret
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: gcp-pd-csi-driver
|
tags: gcp-pd-csi-driver
|
||||||
|
|
||||||
- name: GCP PD CSI Driver | Generate Manifests
|
- name: GCP PD CSI Driver | Generate Manifests
|
||||||
|
@ -31,7 +31,7 @@
|
||||||
- {name: gcp-pd-csi-controller, file: gcp-pd-csi-controller.yml}
|
- {name: gcp-pd-csi-controller, file: gcp-pd-csi-controller.yml}
|
||||||
- {name: gcp-pd-csi-node, file: gcp-pd-csi-node.yml}
|
- {name: gcp-pd-csi-node, file: gcp-pd-csi-node.yml}
|
||||||
register: gcp_pd_csi_manifests
|
register: gcp_pd_csi_manifests
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: gcp-pd-csi-driver
|
tags: gcp-pd-csi-driver
|
||||||
|
|
||||||
- name: GCP PD CSI Driver | Apply Manifests
|
- name: GCP PD CSI Driver | Apply Manifests
|
||||||
|
@ -42,7 +42,7 @@
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ gcp_pd_csi_manifests.results }}"
|
- "{{ gcp_pd_csi_manifests.results }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
- not item is skipped
|
- not item is skipped
|
||||||
loop_control:
|
loop_control:
|
||||||
label: "{{ item.item.file }}"
|
label: "{{ item.item.file }}"
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
mode: 0640
|
mode: 0640
|
||||||
with_items:
|
with_items:
|
||||||
- vsphere-csi-cloud-config
|
- vsphere-csi-cloud-config
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: vsphere-csi-driver
|
tags: vsphere-csi-driver
|
||||||
|
|
||||||
- name: vSphere CSI Driver | Generate Manifests
|
- name: vSphere CSI Driver | Generate Manifests
|
||||||
|
@ -21,13 +21,13 @@
|
||||||
- vsphere-csi-controller-ss.yml
|
- vsphere-csi-controller-ss.yml
|
||||||
- vsphere-csi-node.yml
|
- vsphere-csi-node.yml
|
||||||
register: vsphere_csi_manifests
|
register: vsphere_csi_manifests
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: vsphere-csi-driver
|
tags: vsphere-csi-driver
|
||||||
|
|
||||||
- name: vSphere CSI Driver | Generate a CSI secret manifest
|
- name: vSphere CSI Driver | Generate a CSI secret manifest
|
||||||
command: "{{ bin_dir }}/kubectl create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n kube-system --dry-run --save-config -o yaml"
|
command: "{{ bin_dir }}/kubectl create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n kube-system --dry-run --save-config -o yaml"
|
||||||
register: vsphere_csi_secret_manifest
|
register: vsphere_csi_secret_manifest
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
no_log: true
|
no_log: true
|
||||||
tags: vsphere-csi-driver
|
tags: vsphere-csi-driver
|
||||||
|
|
||||||
|
@ -35,7 +35,7 @@
|
||||||
command:
|
command:
|
||||||
cmd: "{{ bin_dir }}/kubectl apply -f -"
|
cmd: "{{ bin_dir }}/kubectl apply -f -"
|
||||||
stdin: "{{ vsphere_csi_secret_manifest.stdout }}"
|
stdin: "{{ vsphere_csi_secret_manifest.stdout }}"
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
no_log: true
|
no_log: true
|
||||||
tags: vsphere-csi-driver
|
tags: vsphere-csi-driver
|
||||||
|
|
||||||
|
@ -47,7 +47,7 @@
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ vsphere_csi_manifests.results }}"
|
- "{{ vsphere_csi_manifests.results }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
- not item is skipped
|
- not item is skipped
|
||||||
loop_control:
|
loop_control:
|
||||||
label: "{{ item.item }}"
|
label: "{{ item.item }}"
|
||||||
|
|
|
@ -6,7 +6,7 @@ dependencies:
|
||||||
- cloud_provider == "external"
|
- cloud_provider == "external"
|
||||||
- external_cloud_provider is defined
|
- external_cloud_provider is defined
|
||||||
- external_cloud_provider == "openstack"
|
- external_cloud_provider == "openstack"
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
- external-cloud-controller
|
- external-cloud-controller
|
||||||
- external-openstack
|
- external-openstack
|
||||||
|
@ -16,7 +16,7 @@ dependencies:
|
||||||
- cloud_provider == "external"
|
- cloud_provider == "external"
|
||||||
- external_cloud_provider is defined
|
- external_cloud_provider is defined
|
||||||
- external_cloud_provider == "vsphere"
|
- external_cloud_provider == "vsphere"
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
- external-cloud-controller
|
- external-cloud-controller
|
||||||
- external-vsphere
|
- external-vsphere
|
||||||
|
|
|
@ -20,14 +20,14 @@
|
||||||
dest: "{{ kube_config_dir }}/external_openstack_cloud_config"
|
dest: "{{ kube_config_dir }}/external_openstack_cloud_config"
|
||||||
group: "{{ kube_cert_group }}"
|
group: "{{ kube_cert_group }}"
|
||||||
mode: 0640
|
mode: 0640
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: external-openstack
|
tags: external-openstack
|
||||||
|
|
||||||
- name: External OpenStack Cloud Controller | Get base64 cloud-config
|
- name: External OpenStack Cloud Controller | Get base64 cloud-config
|
||||||
slurp:
|
slurp:
|
||||||
src: "{{ kube_config_dir }}/external_openstack_cloud_config"
|
src: "{{ kube_config_dir }}/external_openstack_cloud_config"
|
||||||
register: external_openstack_cloud_config_secret
|
register: external_openstack_cloud_config_secret
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: external-openstack
|
tags: external-openstack
|
||||||
|
|
||||||
- name: External OpenStack Cloud Controller | Generate Manifests
|
- name: External OpenStack Cloud Controller | Generate Manifests
|
||||||
|
@ -42,7 +42,7 @@
|
||||||
- {name: external-openstack-cloud-controller-manager-role-bindings, file: external-openstack-cloud-controller-manager-role-bindings.yml}
|
- {name: external-openstack-cloud-controller-manager-role-bindings, file: external-openstack-cloud-controller-manager-role-bindings.yml}
|
||||||
- {name: external-openstack-cloud-controller-manager-ds, file: external-openstack-cloud-controller-manager-ds.yml}
|
- {name: external-openstack-cloud-controller-manager-ds, file: external-openstack-cloud-controller-manager-ds.yml}
|
||||||
register: external_openstack_manifests
|
register: external_openstack_manifests
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: external-openstack
|
tags: external-openstack
|
||||||
|
|
||||||
- name: External OpenStack Cloud Controller | Apply Manifests
|
- name: External OpenStack Cloud Controller | Apply Manifests
|
||||||
|
@ -53,7 +53,7 @@
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ external_openstack_manifests.results }}"
|
- "{{ external_openstack_manifests.results }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
- not item is skipped
|
- not item is skipped
|
||||||
loop_control:
|
loop_control:
|
||||||
label: "{{ item.item.file }}"
|
label: "{{ item.item.file }}"
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
mode: 0640
|
mode: 0640
|
||||||
with_items:
|
with_items:
|
||||||
- external-vsphere-cpi-cloud-config
|
- external-vsphere-cpi-cloud-config
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: external-vsphere
|
tags: external-vsphere
|
||||||
|
|
||||||
- name: External vSphere Cloud Controller | Generate Manifests
|
- name: External vSphere Cloud Controller | Generate Manifests
|
||||||
|
@ -22,20 +22,20 @@
|
||||||
- external-vsphere-cloud-controller-manager-role-bindings.yml
|
- external-vsphere-cloud-controller-manager-role-bindings.yml
|
||||||
- external-vsphere-cloud-controller-manager-ds.yml
|
- external-vsphere-cloud-controller-manager-ds.yml
|
||||||
register: external_vsphere_manifests
|
register: external_vsphere_manifests
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: external-vsphere
|
tags: external-vsphere
|
||||||
|
|
||||||
- name: External vSphere Cloud Provider Interface | Create a CPI configMap manifest
|
- name: External vSphere Cloud Provider Interface | Create a CPI configMap manifest
|
||||||
command: "{{ bin_dir }}/kubectl create configmap cloud-config --from-file=vsphere.conf={{ kube_config_dir }}/external-vsphere-cpi-cloud-config -n kube-system --dry-run --save-config -o yaml"
|
command: "{{ bin_dir }}/kubectl create configmap cloud-config --from-file=vsphere.conf={{ kube_config_dir }}/external-vsphere-cpi-cloud-config -n kube-system --dry-run --save-config -o yaml"
|
||||||
register: external_vsphere_configmap_manifest
|
register: external_vsphere_configmap_manifest
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: external-vsphere
|
tags: external-vsphere
|
||||||
|
|
||||||
- name: External vSphere Cloud Provider Interface | Apply a CPI configMap manifest
|
- name: External vSphere Cloud Provider Interface | Apply a CPI configMap manifest
|
||||||
command:
|
command:
|
||||||
cmd: "{{ bin_dir }}/kubectl apply -f -"
|
cmd: "{{ bin_dir }}/kubectl apply -f -"
|
||||||
stdin: "{{ external_vsphere_configmap_manifest.stdout }}"
|
stdin: "{{ external_vsphere_configmap_manifest.stdout }}"
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags: external-vsphere
|
tags: external-vsphere
|
||||||
|
|
||||||
- name: External vSphere Cloud Controller | Apply Manifests
|
- name: External vSphere Cloud Controller | Apply Manifests
|
||||||
|
@ -46,7 +46,7 @@
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ external_vsphere_manifests.results }}"
|
- "{{ external_vsphere_manifests.results }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
- not item is skipped
|
- not item is skipped
|
||||||
loop_control:
|
loop_control:
|
||||||
label: "{{ item.item }}"
|
label: "{{ item.item }}"
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
path: "{{ kube_config_dir }}/addons/cephfs_provisioner"
|
path: "{{ kube_config_dir }}/addons/cephfs_provisioner"
|
||||||
state: absent
|
state: absent
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
- upgrade
|
- upgrade
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@
|
||||||
{{ bin_dir }}/kubectl delete namespace {{ cephfs_provisioner_namespace }}
|
{{ bin_dir }}/kubectl delete namespace {{ cephfs_provisioner_namespace }}
|
||||||
ignore_errors: yes
|
ignore_errors: yes
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
- upgrade
|
- upgrade
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@
|
||||||
{{ bin_dir }}/kubectl delete storageclass {{ cephfs_provisioner_storage_class }}
|
{{ bin_dir }}/kubectl delete storageclass {{ cephfs_provisioner_storage_class }}
|
||||||
ignore_errors: yes
|
ignore_errors: yes
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
- upgrade
|
- upgrade
|
||||||
|
|
||||||
|
@ -35,7 +35,7 @@
|
||||||
group: root
|
group: root
|
||||||
mode: 0755
|
mode: 0755
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: CephFS Provisioner | Templates list
|
- name: CephFS Provisioner | Templates list
|
||||||
set_fact:
|
set_fact:
|
||||||
|
@ -65,7 +65,7 @@
|
||||||
dest: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.file }}"
|
dest: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.file }}"
|
||||||
with_items: "{{ cephfs_provisioner_templates }}"
|
with_items: "{{ cephfs_provisioner_templates }}"
|
||||||
register: cephfs_provisioner_manifests
|
register: cephfs_provisioner_manifests
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: CephFS Provisioner | Apply manifests
|
- name: CephFS Provisioner | Apply manifests
|
||||||
kube:
|
kube:
|
||||||
|
@ -76,4 +76,4 @@
|
||||||
filename: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.item.file }}"
|
filename: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.item.file }}"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ cephfs_provisioner_manifests.results }}"
|
with_items: "{{ cephfs_provisioner_manifests.results }}"
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
group: root
|
group: root
|
||||||
mode: 0755
|
mode: 0755
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Local Path Provisioner | Create claim root dir
|
- name: Local Path Provisioner | Create claim root dir
|
||||||
file:
|
file:
|
||||||
|
@ -42,7 +42,7 @@
|
||||||
dest: "{{ kube_config_dir }}/addons/local_path_provisioner/{{ item.file }}"
|
dest: "{{ kube_config_dir }}/addons/local_path_provisioner/{{ item.file }}"
|
||||||
with_items: "{{ local_path_provisioner_templates }}"
|
with_items: "{{ local_path_provisioner_templates }}"
|
||||||
register: local_path_provisioner_manifests
|
register: local_path_provisioner_manifests
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Local Path Provisioner | Apply manifests
|
- name: Local Path Provisioner | Apply manifests
|
||||||
kube:
|
kube:
|
||||||
|
@ -53,4 +53,4 @@
|
||||||
filename: "{{ kube_config_dir }}/addons/local_path_provisioner/{{ item.item.file }}"
|
filename: "{{ kube_config_dir }}/addons/local_path_provisioner/{{ item.item.file }}"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ local_path_provisioner_manifests.results }}"
|
with_items: "{{ local_path_provisioner_manifests.results }}"
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
|
@ -42,7 +42,7 @@
|
||||||
dest: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.file }}"
|
dest: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.file }}"
|
||||||
with_items: "{{ local_volume_provisioner_templates }}"
|
with_items: "{{ local_volume_provisioner_templates }}"
|
||||||
register: local_volume_provisioner_manifests
|
register: local_volume_provisioner_manifests
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Local Volume Provisioner | Apply manifests
|
- name: Local Volume Provisioner | Apply manifests
|
||||||
kube:
|
kube:
|
||||||
|
@ -53,6 +53,6 @@
|
||||||
filename: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.item.file }}"
|
filename: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.item.file }}"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ local_volume_provisioner_manifests.results }}"
|
with_items: "{{ local_volume_provisioner_manifests.results }}"
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
loop_control:
|
loop_control:
|
||||||
label: "{{ item.item.file }}"
|
label: "{{ item.item.file }}"
|
||||||
|
|
|
@ -3,7 +3,7 @@ dependencies:
|
||||||
- role: kubernetes-apps/external_provisioner/local_volume_provisioner
|
- role: kubernetes-apps/external_provisioner/local_volume_provisioner
|
||||||
when:
|
when:
|
||||||
- local_volume_provisioner_enabled
|
- local_volume_provisioner_enabled
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
- apps
|
- apps
|
||||||
- local-volume-provisioner
|
- local-volume-provisioner
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
path: "{{ kube_config_dir }}/addons/rbd_provisioner"
|
path: "{{ kube_config_dir }}/addons/rbd_provisioner"
|
||||||
state: absent
|
state: absent
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
- upgrade
|
- upgrade
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@
|
||||||
{{ bin_dir }}/kubectl delete namespace {{ rbd_provisioner_namespace }}
|
{{ bin_dir }}/kubectl delete namespace {{ rbd_provisioner_namespace }}
|
||||||
ignore_errors: yes
|
ignore_errors: yes
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
- upgrade
|
- upgrade
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@
|
||||||
{{ bin_dir }}/kubectl delete storageclass {{ rbd_provisioner_storage_class }}
|
{{ bin_dir }}/kubectl delete storageclass {{ rbd_provisioner_storage_class }}
|
||||||
ignore_errors: yes
|
ignore_errors: yes
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
- upgrade
|
- upgrade
|
||||||
|
|
||||||
|
@ -35,7 +35,7 @@
|
||||||
group: root
|
group: root
|
||||||
mode: 0755
|
mode: 0755
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: RBD Provisioner | Templates list
|
- name: RBD Provisioner | Templates list
|
||||||
set_fact:
|
set_fact:
|
||||||
|
@ -65,7 +65,7 @@
|
||||||
dest: "{{ kube_config_dir }}/addons/rbd_provisioner/{{ item.file }}"
|
dest: "{{ kube_config_dir }}/addons/rbd_provisioner/{{ item.file }}"
|
||||||
with_items: "{{ rbd_provisioner_templates }}"
|
with_items: "{{ rbd_provisioner_templates }}"
|
||||||
register: rbd_provisioner_manifests
|
register: rbd_provisioner_manifests
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: RBD Provisioner | Apply manifests
|
- name: RBD Provisioner | Apply manifests
|
||||||
kube:
|
kube:
|
||||||
|
@ -76,4 +76,4 @@
|
||||||
filename: "{{ kube_config_dir }}/addons/rbd_provisioner/{{ item.item.file }}"
|
filename: "{{ kube_config_dir }}/addons/rbd_provisioner/{{ item.item.file }}"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ rbd_provisioner_manifests.results }}"
|
with_items: "{{ rbd_provisioner_manifests.results }}"
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
- { name: alb-ingress-deploy, file: alb-ingress-deploy.yml, type: deploy }
|
- { name: alb-ingress-deploy, file: alb-ingress-deploy.yml, type: deploy }
|
||||||
register: alb_ingress_manifests
|
register: alb_ingress_manifests
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: ALB Ingress Controller | Apply manifests
|
- name: ALB Ingress Controller | Apply manifests
|
||||||
kube:
|
kube:
|
||||||
|
@ -32,4 +32,4 @@
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ alb_ingress_manifests.results }}"
|
with_items: "{{ alb_ingress_manifests.results }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
group: root
|
group: root
|
||||||
mode: 0755
|
mode: 0755
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Ambassador | Templates list
|
- name: Ambassador | Templates list
|
||||||
set_fact:
|
set_fact:
|
||||||
|
@ -29,7 +29,7 @@
|
||||||
loop: "{{ ingress_ambassador_templates }}"
|
loop: "{{ ingress_ambassador_templates }}"
|
||||||
register: ingress_ambassador_manifests
|
register: ingress_ambassador_manifests
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Ambassador | Apply manifests
|
- name: Ambassador | Apply manifests
|
||||||
kube:
|
kube:
|
||||||
|
@ -41,7 +41,7 @@
|
||||||
state: "latest"
|
state: "latest"
|
||||||
loop: "{{ ingress_ambassador_manifests.results }}"
|
loop: "{{ ingress_ambassador_manifests.results }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
# load the AmbassadorInstallation _after_ the CustomResourceDefinition has been loaded
|
# load the AmbassadorInstallation _after_ the CustomResourceDefinition has been loaded
|
||||||
|
|
||||||
|
@ -57,7 +57,7 @@
|
||||||
loop: "{{ ingress_ambassador_cr_templates }}"
|
loop: "{{ ingress_ambassador_cr_templates }}"
|
||||||
register: ingress_ambassador_cr_manifests
|
register: ingress_ambassador_cr_manifests
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Ambassador | Apply AmbassadorInstallation
|
- name: Ambassador | Apply AmbassadorInstallation
|
||||||
kube:
|
kube:
|
||||||
|
@ -69,4 +69,4 @@
|
||||||
state: "latest"
|
state: "latest"
|
||||||
loop: "{{ ingress_ambassador_cr_manifests.results }}"
|
loop: "{{ ingress_ambassador_cr_manifests.results }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
path: "{{ kube_config_dir }}/addons/cert_manager"
|
path: "{{ kube_config_dir }}/addons/cert_manager"
|
||||||
state: absent
|
state: absent
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
- upgrade
|
- upgrade
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@
|
||||||
{{ bin_dir }}/kubectl delete namespace {{ cert_manager_namespace }}
|
{{ bin_dir }}/kubectl delete namespace {{ cert_manager_namespace }}
|
||||||
ignore_errors: yes
|
ignore_errors: yes
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
- upgrade
|
- upgrade
|
||||||
|
|
||||||
|
@ -26,7 +26,7 @@
|
||||||
group: root
|
group: root
|
||||||
mode: 0755
|
mode: 0755
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Cert Manager | Templates list
|
- name: Cert Manager | Templates list
|
||||||
set_fact:
|
set_fact:
|
||||||
|
@ -54,7 +54,7 @@
|
||||||
with_items: "{{ cert_manager_templates }}"
|
with_items: "{{ cert_manager_templates }}"
|
||||||
register: cert_manager_manifests
|
register: cert_manager_manifests
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Cert Manager | Apply manifests
|
- name: Cert Manager | Apply manifests
|
||||||
kube:
|
kube:
|
||||||
|
@ -65,12 +65,12 @@
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ cert_manager_manifests.results }}"
|
with_items: "{{ cert_manager_manifests.results }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Cert Manager | Wait for Webhook pods become ready
|
- name: Cert Manager | Wait for Webhook pods become ready
|
||||||
command: "{{ bin_dir }}/kubectl wait po --namespace={{ cert_manager_namespace }} --selector app=webhook --for=condition=Ready --timeout=600s"
|
command: "{{ bin_dir }}/kubectl wait po --namespace={{ cert_manager_namespace }} --selector app=webhook --for=condition=Ready --timeout=600s"
|
||||||
register: cert_manager_webhook_pods_ready
|
register: cert_manager_webhook_pods_ready
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Cert Manager | Create ClusterIssuer manifest
|
- name: Cert Manager | Create ClusterIssuer manifest
|
||||||
template:
|
template:
|
||||||
|
@ -78,7 +78,7 @@
|
||||||
dest: "{{ kube_config_dir }}/addons/cert_manager/clusterissuer-cert-manager.yml"
|
dest: "{{ kube_config_dir }}/addons/cert_manager/clusterissuer-cert-manager.yml"
|
||||||
register: cert_manager_clusterissuer_manifest
|
register: cert_manager_clusterissuer_manifest
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0] and cert_manager_webhook_pods_ready is succeeded
|
- inventory_hostname == groups['kube_control_plane'][0] and cert_manager_webhook_pods_ready is succeeded
|
||||||
|
|
||||||
- name: Cert Manager | Apply ClusterIssuer manifest
|
- name: Cert Manager | Apply ClusterIssuer manifest
|
||||||
kube:
|
kube:
|
||||||
|
@ -86,4 +86,4 @@
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/addons/cert_manager/clusterissuer-cert-manager.yml"
|
filename: "{{ kube_config_dir }}/addons/cert_manager/clusterissuer-cert-manager.yml"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
when: inventory_hostname == groups['kube-master'][0] and cert_manager_clusterissuer_manifest is succeeded
|
when: inventory_hostname == groups['kube_control_plane'][0] and cert_manager_clusterissuer_manifest is succeeded
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
group: root
|
group: root
|
||||||
mode: 0755
|
mode: 0755
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: NGINX Ingress Controller | Templates list
|
- name: NGINX Ingress Controller | Templates list
|
||||||
set_fact:
|
set_fact:
|
||||||
|
@ -38,7 +38,7 @@
|
||||||
with_items: "{{ ingress_nginx_templates }}"
|
with_items: "{{ ingress_nginx_templates }}"
|
||||||
register: ingress_nginx_manifests
|
register: ingress_nginx_manifests
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: NGINX Ingress Controller | Apply manifests
|
- name: NGINX Ingress Controller | Apply manifests
|
||||||
kube:
|
kube:
|
||||||
|
@ -50,4 +50,4 @@
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ ingress_nginx_manifests.results }}"
|
with_items: "{{ ingress_nginx_manifests.results }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
dependencies:
|
dependencies:
|
||||||
- role: kubernetes-apps/ansible
|
- role: kubernetes-apps/ansible
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- role: kubernetes-apps/helm
|
- role: kubernetes-apps/helm
|
||||||
when:
|
when:
|
||||||
|
@ -13,21 +13,21 @@ dependencies:
|
||||||
- role: kubernetes-apps/registry
|
- role: kubernetes-apps/registry
|
||||||
when:
|
when:
|
||||||
- registry_enabled
|
- registry_enabled
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
- registry
|
- registry
|
||||||
|
|
||||||
- role: kubernetes-apps/metrics_server
|
- role: kubernetes-apps/metrics_server
|
||||||
when:
|
when:
|
||||||
- metrics_server_enabled
|
- metrics_server_enabled
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
- metrics_server
|
- metrics_server
|
||||||
|
|
||||||
- role: kubernetes-apps/csi_driver/csi_crd
|
- role: kubernetes-apps/csi_driver/csi_crd
|
||||||
when:
|
when:
|
||||||
- cinder_csi_enabled
|
- cinder_csi_enabled
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
- csi-driver
|
- csi-driver
|
||||||
|
|
||||||
|
@ -69,19 +69,19 @@ dependencies:
|
||||||
- role: kubernetes-apps/persistent_volumes
|
- role: kubernetes-apps/persistent_volumes
|
||||||
when:
|
when:
|
||||||
- persistent_volumes_enabled
|
- persistent_volumes_enabled
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
- persistent_volumes
|
- persistent_volumes
|
||||||
|
|
||||||
- role: kubernetes-apps/snapshots
|
- role: kubernetes-apps/snapshots
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
- snapshots
|
- snapshots
|
||||||
- csi-driver
|
- csi-driver
|
||||||
|
|
||||||
- role: kubernetes-apps/container_runtimes
|
- role: kubernetes-apps/container_runtimes
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
- container-runtimes
|
- container-runtimes
|
||||||
|
|
||||||
|
@ -94,13 +94,13 @@ dependencies:
|
||||||
when:
|
when:
|
||||||
- cloud_provider is defined
|
- cloud_provider is defined
|
||||||
- cloud_provider == "oci"
|
- cloud_provider == "oci"
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
- oci
|
- oci
|
||||||
|
|
||||||
- role: kubernetes-apps/metallb
|
- role: kubernetes-apps/metallb
|
||||||
when:
|
when:
|
||||||
- metallb_enabled
|
- metallb_enabled
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
- metallb
|
- metallb
|
||||||
|
|
|
@ -22,7 +22,7 @@
|
||||||
register: apparmor_status
|
register: apparmor_status
|
||||||
when:
|
when:
|
||||||
- podsecuritypolicy_enabled
|
- podsecuritypolicy_enabled
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
|
||||||
- name: Kubernetes Apps | Set apparmor_enabled
|
- name: Kubernetes Apps | Set apparmor_enabled
|
||||||
|
@ -30,7 +30,7 @@
|
||||||
apparmor_enabled: "{{ apparmor_status.rc == 0 }}"
|
apparmor_enabled: "{{ apparmor_status.rc == 0 }}"
|
||||||
when:
|
when:
|
||||||
- podsecuritypolicy_enabled
|
- podsecuritypolicy_enabled
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: "Kubernetes Apps | Lay Down MetalLB"
|
- name: "Kubernetes Apps | Lay Down MetalLB"
|
||||||
become: true
|
become: true
|
||||||
|
@ -38,7 +38,7 @@
|
||||||
with_items: ["metallb.yml", "metallb-config.yml"]
|
with_items: ["metallb.yml", "metallb-config.yml"]
|
||||||
register: "rendering"
|
register: "rendering"
|
||||||
when:
|
when:
|
||||||
- "inventory_hostname == groups['kube-master'][0]"
|
- "inventory_hostname == groups['kube_control_plane'][0]"
|
||||||
|
|
||||||
- name: "Kubernetes Apps | Install and configure MetalLB"
|
- name: "Kubernetes Apps | Install and configure MetalLB"
|
||||||
kube:
|
kube:
|
||||||
|
@ -49,7 +49,7 @@
|
||||||
become: true
|
become: true
|
||||||
with_items: "{{ rendering.results }}"
|
with_items: "{{ rendering.results }}"
|
||||||
when:
|
when:
|
||||||
- "inventory_hostname == groups['kube-master'][0]"
|
- "inventory_hostname == groups['kube_control_plane'][0]"
|
||||||
|
|
||||||
- name: Kubernetes Apps | Check existing secret of MetalLB
|
- name: Kubernetes Apps | Check existing secret of MetalLB
|
||||||
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n metallb-system get secret memberlist"
|
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n metallb-system get secret memberlist"
|
||||||
|
@ -57,18 +57,18 @@
|
||||||
become: true
|
become: true
|
||||||
ignore_errors: yes
|
ignore_errors: yes
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Kubernetes Apps | Create random bytes for MetalLB
|
- name: Kubernetes Apps | Create random bytes for MetalLB
|
||||||
command: "openssl rand -base64 32"
|
command: "openssl rand -base64 32"
|
||||||
register: metallb_rand
|
register: metallb_rand
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
- metallb_secret.rc != 0
|
- metallb_secret.rc != 0
|
||||||
|
|
||||||
- name: Kubernetes Apps | Install secret of MetalLB if not existing
|
- name: Kubernetes Apps | Install secret of MetalLB if not existing
|
||||||
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n metallb-system create secret generic memberlist --from-literal=secretkey={{ metallb_rand.stdout }}"
|
command: "{{ bin_dir }}/kubectl --kubeconfig /etc/kubernetes/admin.conf -n metallb-system create secret generic memberlist --from-literal=secretkey={{ metallb_rand.stdout }}"
|
||||||
become: true
|
become: true
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
- metallb_secret.rc != 0
|
- metallb_secret.rc != 0
|
||||||
|
|
|
@ -2,14 +2,14 @@
|
||||||
# If all masters have node role, there are no tainted master and toleration should not be specified.
|
# If all masters have node role, there are no tainted master and toleration should not be specified.
|
||||||
- name: Check all masters are node or not
|
- name: Check all masters are node or not
|
||||||
set_fact:
|
set_fact:
|
||||||
masters_are_not_tainted: "{{ groups['kube-node'] | intersect(groups['kube-master']) == groups['kube-master'] }}"
|
masters_are_not_tainted: "{{ groups['kube-node'] | intersect(groups['kube_control_plane']) == groups['kube_control_plane'] }}"
|
||||||
|
|
||||||
- name: Metrics Server | Delete addon dir
|
- name: Metrics Server | Delete addon dir
|
||||||
file:
|
file:
|
||||||
path: "{{ kube_config_dir }}/addons/metrics_server"
|
path: "{{ kube_config_dir }}/addons/metrics_server"
|
||||||
state: absent
|
state: absent
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
tags:
|
tags:
|
||||||
- upgrade
|
- upgrade
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@
|
||||||
group: root
|
group: root
|
||||||
mode: 0755
|
mode: 0755
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Metrics Server | Templates list
|
- name: Metrics Server | Templates list
|
||||||
set_fact:
|
set_fact:
|
||||||
|
@ -43,7 +43,7 @@
|
||||||
with_items: "{{ metrics_server_templates }}"
|
with_items: "{{ metrics_server_templates }}"
|
||||||
register: metrics_server_manifests
|
register: metrics_server_manifests
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Metrics Server | Apply manifests
|
- name: Metrics Server | Apply manifests
|
||||||
kube:
|
kube:
|
||||||
|
@ -54,4 +54,4 @@
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ metrics_server_manifests.results }}"
|
with_items: "{{ metrics_server_manifests.results }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
|
@ -8,4 +8,4 @@
|
||||||
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ canal_manifests.results }}"
|
with_items: "{{ canal_manifests.results }}"
|
||||||
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
|
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ cilium_node_manifests.results }}"
|
with_items: "{{ cilium_node_manifests.results }}"
|
||||||
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
|
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
|
||||||
|
|
||||||
- name: Cilium | Wait for pods to run
|
- name: Cilium | Wait for pods to run
|
||||||
command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601
|
command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601
|
||||||
|
@ -17,4 +17,4 @@
|
||||||
retries: 30
|
retries: 30
|
||||||
delay: 10
|
delay: 10
|
||||||
ignore_errors: yes
|
ignore_errors: yes
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ flannel_node_manifests.results }}"
|
with_items: "{{ flannel_node_manifests.results }}"
|
||||||
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
|
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
|
||||||
|
|
||||||
- name: Flannel | Wait for flannel subnet.env file presence
|
- name: Flannel | Wait for flannel subnet.env file presence
|
||||||
wait_for:
|
wait_for:
|
||||||
|
|
|
@ -6,4 +6,4 @@
|
||||||
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ kube_ovn_node_manifests.results }}"
|
with_items: "{{ kube_ovn_node_manifests.results }}"
|
||||||
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
|
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
resource: "ds"
|
resource: "ds"
|
||||||
namespace: "kube-system"
|
namespace: "kube-system"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
delegate_to: "{{ groups['kube-master'] | first }}"
|
delegate_to: "{{ groups['kube_control_plane'] | first }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- name: kube-router | Wait for kube-router pods to be ready
|
- name: kube-router | Wait for kube-router pods to be ready
|
||||||
|
@ -18,6 +18,6 @@
|
||||||
retries: 30
|
retries: 30
|
||||||
delay: 10
|
delay: 10
|
||||||
ignore_errors: yes
|
ignore_errors: yes
|
||||||
delegate_to: "{{ groups['kube-master'] | first }}"
|
delegate_to: "{{ groups['kube_control_plane'] | first }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
|
@ -8,4 +8,4 @@
|
||||||
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ multus_manifest_1.results }} + {{ multus_manifest_2.results }}"
|
with_items: "{{ multus_manifest_1.results }} + {{ multus_manifest_2.results }}"
|
||||||
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
|
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
|
||||||
|
|
|
@ -6,4 +6,4 @@
|
||||||
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ ovn4nfv_node_manifests.results }}"
|
with_items: "{{ ovn4nfv_node_manifests.results }}"
|
||||||
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
|
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
resource: "ds"
|
resource: "ds"
|
||||||
namespace: "kube-system"
|
namespace: "kube-system"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
||||||
- name: Weave | Wait for Weave to become available
|
- name: Weave | Wait for Weave to become available
|
||||||
uri:
|
uri:
|
||||||
|
@ -18,4 +18,4 @@
|
||||||
retries: 180
|
retries: 180
|
||||||
delay: 5
|
delay: 5
|
||||||
until: "weave_status.status == 200 and 'Status: ready' in weave_status.content"
|
until: "weave_status.status == 200 and 'Status: ready' in weave_status.content"
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube_control_plane'][0]
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue