From 360aff4a571f4a8ac7603a4a68223e744fad1209 Mon Sep 17 00:00:00 2001 From: Cristian Calin <6627509+cristicalin@users.noreply.github.com> Date: Thu, 29 Apr 2021 15:20:50 +0300 Subject: [PATCH] Rename ansible groups to use _ instead of - (#7552) * rename ansible groups to use _ instead of - k8s-cluster -> k8s_cluster k8s-node -> k8s_node calico-rr -> calico_rr no-floating -> no_floating Note: kube-node,k8s-cluster groups in upgrade CI need clean-up after v2.16 is tagged * ensure old groups are mapped to the new ones --- README.md | 2 +- Vagrantfile | 4 +- cluster.yml | 24 ++--- .../aws_inventory/kubespray-aws-inventory.py | 4 +- .../generate-inventory/templates/inventory.j2 | 8 +- .../templates/inventory.j2 | 8 +- .../generate-templates/templates/minions.json | 4 +- contrib/dind/run-test-distros.sh | 2 +- contrib/inventory_builder/inventory.py | 28 +++--- .../inventory_builder/tests/test_inventory.py | 10 +- .../network-storage/glusterfs/glusterfs.yml | 2 +- .../glusterfs/inventory.example | 6 +- .../heketi/inventory.yml.sample | 4 +- contrib/packaging/rpm/kubespray.spec | 2 +- contrib/terraform/aws/templates/inventory.tpl | 8 +- .../exoscale/templates/inventory.tpl | 6 +- contrib/terraform/gcp/generate-inventory.sh | 6 +- contrib/terraform/openstack/README.md | 8 +- .../openstack/modules/compute/main.tf | 28 +++--- contrib/terraform/openstack/variables.tf | 4 +- contrib/terraform/packet/README.md | 2 +- contrib/terraform/packet/kubespray.tf | 6 +- .../terraform/upcloud/templates/inventory.tpl | 6 +- .../terraform/vsphere/templates/inventory.tpl | 6 +- docs/ansible.md | 26 ++--- docs/aws-ebs-csi.md | 2 +- docs/aws.md | 8 +- docs/azure-csi.md | 2 +- docs/calico.md | 22 ++--- docs/cinder-csi.md | 2 +- docs/containerd.md | 2 +- docs/cri-o.md | 4 +- docs/gcp-pd-csi.md | 2 +- docs/getting-started.md | 2 +- docs/ha-mode.md | 2 +- docs/integration.md | 4 +- docs/kata-containers.md | 2 +- docs/kube-ovn.md | 2 +- docs/large-deployments.md | 6 +- docs/macvlan.md | 6 +- docs/nodes.md | 4 +- docs/ovn4nfv.md | 2 +- docs/setting-up-your-first-cluster.md | 4 +- docs/test_cases.md | 4 +- docs/upgrades.md | 6 +- docs/vars.md | 6 +- docs/weave.md | 2 +- .../migrate_openstack_provider.yml | 2 +- extra_playbooks/upgrade-only-k8s.yml | 6 +- facts.yml | 2 +- inventory/local/hosts.ini | 6 +- .../{k8s-cluster => k8s_cluster}/addons.yml | 0 .../k8s-cluster.yml | 0 .../k8s-net-calico.yml | 0 .../k8s-net-canal.yml | 0 .../k8s-net-cilium.yml | 0 .../k8s-net-flannel.yml | 0 .../k8s-net-kube-router.yml | 0 .../k8s-net-macvlan.yml | 0 .../k8s-net-weave.yml | 0 inventory/sample/inventory.ini | 10 +- legacy_groups.yml | 42 ++++++++ recover-control-plane.yml | 10 +- remove-node.yml | 14 +-- reset.yml | 12 +-- roles/download/defaults/main.yml | 98 +++++++++---------- roles/download/tasks/prep_kubeadm_images.yml | 2 +- roles/etcd/defaults/main.yml | 2 +- roles/etcd/tasks/check_certs.yml | 12 +-- roles/etcd/tasks/gen_certs_script.yml | 16 +-- roles/etcd/tasks/main.yml | 4 +- .../csi_driver/cinder/tasks/main.yml | 4 +- .../openstack/tasks/main.yml | 4 +- .../local_volume_provisioner/tasks/main.yml | 2 +- .../ingress_controller/cert_manager/README.md | 4 +- roles/kubernetes-apps/metallb/README.md | 2 +- .../metrics_server/tasks/main.yml | 2 +- .../control-plane/tasks/kubeadm-setup.yml | 2 +- .../templates/kubeadm-config.v1beta2.yaml.j2 | 2 +- .../kubeadm/tasks/kubeadm_etcd_node.yml | 2 +- .../templates/kubeadm-client.conf.v1beta2.j2 | 2 +- .../templates/kubelet-config.v1beta1.yaml.j2 | 2 +- .../node/templates/kubelet.env.v1beta1.j2 | 2 +- .../preinstall/tasks/0020-verify-settings.yml | 16 +-- .../tasks/0050-create_directories.yml | 12 +-- .../preinstall/tasks/0090-etchosts.yml | 2 +- roles/kubernetes/tokens/tasks/gen_tokens.yml | 2 +- roles/kubespray-defaults/defaults/main.yaml | 4 +- .../kubespray-defaults/tasks/fallback_ips.yml | 4 +- roles/kubespray-defaults/tasks/no_proxy.yml | 4 +- roles/network_plugin/calico/tasks/install.yml | 12 +-- .../calico/templates/calico-config.yml.j2 | 2 +- .../kube-router/tasks/annotate.yml | 6 +- .../templates/centos-routes-macvlan.cfg.j2 | 2 +- .../templates/coreos-network-macvlan.cfg.j2 | 2 +- .../templates/debian-network-macvlan.cfg.j2 | 4 +- roles/remove-node/pre-remove/tasks/main.yml | 2 +- roles/reset/tasks/main.yml | 2 +- scale.yml | 18 ++-- tests/cloud_playbooks/delete-aws.yml | 2 +- .../roles/packet-ci/templates/inventory.j2 | 26 ++++- tests/templates/inventory-aws.j2 | 10 +- tests/templates/inventory-do.j2 | 14 +-- tests/templates/inventory-gce.j2 | 18 ++-- tests/testcases/040_check-network-adv.yml | 8 +- upgrade-cluster.yml | 26 ++--- 106 files changed, 403 insertions(+), 373 deletions(-) rename inventory/sample/group_vars/{k8s-cluster => k8s_cluster}/addons.yml (100%) rename inventory/sample/group_vars/{k8s-cluster => k8s_cluster}/k8s-cluster.yml (100%) rename inventory/sample/group_vars/{k8s-cluster => k8s_cluster}/k8s-net-calico.yml (100%) rename inventory/sample/group_vars/{k8s-cluster => k8s_cluster}/k8s-net-canal.yml (100%) rename inventory/sample/group_vars/{k8s-cluster => k8s_cluster}/k8s-net-cilium.yml (100%) rename inventory/sample/group_vars/{k8s-cluster => k8s_cluster}/k8s-net-flannel.yml (100%) rename inventory/sample/group_vars/{k8s-cluster => k8s_cluster}/k8s-net-kube-router.yml (100%) rename inventory/sample/group_vars/{k8s-cluster => k8s_cluster}/k8s-net-macvlan.yml (100%) rename inventory/sample/group_vars/{k8s-cluster => k8s_cluster}/k8s-net-weave.yml (100%) create mode 100644 legacy_groups.yml diff --git a/README.md b/README.md index 33f76b8c7..27d13aa80 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inv # Review and change parameters under ``inventory/mycluster/group_vars`` cat inventory/mycluster/group_vars/all/all.yml -cat inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml +cat inventory/mycluster/group_vars/k8s_cluster/k8s_cluster.yml # Deploy Kubespray with Ansible Playbook - run the playbook as root # The option `--become` is required, as for example writing SSL keys in /etc/, diff --git a/Vagrantfile b/Vagrantfile index 5ee9e4637..0a90b5170 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -254,8 +254,8 @@ Vagrant.configure("2") do |config| ansible.groups = { "etcd" => ["#{$instance_name_prefix}-[1:#{$etcd_instances}]"], "kube_control_plane" => ["#{$instance_name_prefix}-[1:#{$kube_master_instances}]"], - "kube-node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"], - "k8s-cluster:children" => ["kube_control_plane", "kube-node"], + "kube_node" => ["#{$instance_name_prefix}-[1:#{$kube_node_instances}]"], + "k8s_cluster:children" => ["kube_control_plane", "kube_node"], } end end diff --git a/cluster.yml b/cluster.yml index 6a169e9b0..c2ba9a7bd 100644 --- a/cluster.yml +++ b/cluster.yml @@ -2,14 +2,8 @@ - name: Check ansible version import_playbook: ansible_version.yml -- name: Add kube-master nodes to kube_control_plane - # This is for old inventory which contains kube-master instead of kube_control_plane - hosts: kube-master - gather_facts: false - tasks: - - name: add nodes to kube_control_plane group - group_by: - key: 'kube_control_plane' +- name: Ensure compatibility with old groups + import_playbook: legacy_groups.yml - hosts: bastion[0] gather_facts: False @@ -18,7 +12,7 @@ - { role: kubespray-defaults } - { role: bastion-ssh-config, tags: ["localhost", "bastion"] } -- hosts: k8s-cluster:etcd +- hosts: k8s_cluster:etcd strategy: linear any_errors_fatal: "{{ any_errors_fatal | default(true) }}" gather_facts: false @@ -31,7 +25,7 @@ tags: always import_playbook: facts.yml -- hosts: k8s-cluster:etcd +- hosts: k8s_cluster:etcd gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" environment: "{{ proxy_disable_env }}" @@ -54,7 +48,7 @@ etcd_events_cluster_setup: "{{ etcd_events_cluster_enabled }}" when: not etcd_kubeadm_enabled| default(false) -- hosts: k8s-cluster +- hosts: k8s_cluster gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" environment: "{{ proxy_disable_env }}" @@ -67,7 +61,7 @@ etcd_events_cluster_setup: false when: not etcd_kubeadm_enabled| default(false) -- hosts: k8s-cluster +- hosts: k8s_cluster gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" environment: "{{ proxy_disable_env }}" @@ -85,7 +79,7 @@ - { role: kubernetes/client, tags: client } - { role: kubernetes-apps/cluster_roles, tags: cluster-roles } -- hosts: k8s-cluster +- hosts: k8s_cluster gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" environment: "{{ proxy_disable_env }}" @@ -95,7 +89,7 @@ - { role: network_plugin, tags: network } - { role: kubernetes/node-label, tags: node-label } -- hosts: calico-rr +- hosts: calico_rr gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" environment: "{{ proxy_disable_env }}" @@ -131,7 +125,7 @@ - { role: kubespray-defaults } - { role: kubernetes-apps, tags: apps } -- hosts: k8s-cluster +- hosts: k8s_cluster gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" environment: "{{ proxy_disable_env }}" diff --git a/contrib/aws_inventory/kubespray-aws-inventory.py b/contrib/aws_inventory/kubespray-aws-inventory.py index 46ad6a063..3ad241c7e 100755 --- a/contrib/aws_inventory/kubespray-aws-inventory.py +++ b/contrib/aws_inventory/kubespray-aws-inventory.py @@ -35,7 +35,7 @@ class SearchEC2Tags(object): hosts['_meta'] = { 'hostvars': {} } ##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value. - for group in ["kube_control_plane", "kube-node", "etcd"]: + for group in ["kube_control_plane", "kube_node", "etcd"]: hosts[group] = [] tag_key = "kubespray-role" tag_value = ["*"+group+"*"] @@ -70,7 +70,7 @@ class SearchEC2Tags(object): hosts[group].append(dns_name) hosts['_meta']['hostvars'][dns_name] = ansible_host - hosts['k8s-cluster'] = {'children':['kube_control_plane', 'kube-node']} + hosts['k8s_cluster'] = {'children':['kube_control_plane', 'kube_node']} print(json.dumps(hosts, sort_keys=True, indent=2)) SearchEC2Tags() diff --git a/contrib/azurerm/roles/generate-inventory/templates/inventory.j2 b/contrib/azurerm/roles/generate-inventory/templates/inventory.j2 index 8a13cc635..6c5feb2cd 100644 --- a/contrib/azurerm/roles/generate-inventory/templates/inventory.j2 +++ b/contrib/azurerm/roles/generate-inventory/templates/inventory.j2 @@ -21,13 +21,13 @@ {% endif %} {% endfor %} -[kube-node] +[kube_node] {% for vm in vm_list %} -{% if 'kube-node' in vm.tags.roles %} +{% if 'kube_node' in vm.tags.roles %} {{ vm.name }} {% endif %} {% endfor %} -[k8s-cluster:children] -kube-node +[k8s_cluster:children] +kube_node kube_control_plane diff --git a/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 b/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 index 61183cd1d..6ab59df1b 100644 --- a/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 +++ b/contrib/azurerm/roles/generate-inventory_2/templates/inventory.j2 @@ -21,14 +21,14 @@ {% endif %} {% endfor %} -[kube-node] +[kube_node] {% for vm in vm_roles_list %} -{% if 'kube-node' in vm.tags.roles %} +{% if 'kube_node' in vm.tags.roles %} {{ vm.name }} {% endif %} {% endfor %} -[k8s-cluster:children] -kube-node +[k8s_cluster:children] +kube_node kube_control_plane diff --git a/contrib/azurerm/roles/generate-templates/templates/minions.json b/contrib/azurerm/roles/generate-templates/templates/minions.json index 3c122f34a..bd0d059cb 100644 --- a/contrib/azurerm/roles/generate-templates/templates/minions.json +++ b/contrib/azurerm/roles/generate-templates/templates/minions.json @@ -61,7 +61,7 @@ "[concat('Microsoft.Network/networkInterfaces/', 'minion-{{i}}-nic')]" ], "tags": { - "roles": "kube-node" + "roles": "kube_node" }, "apiVersion": "{{apiVersion}}", "properties": { @@ -112,4 +112,4 @@ } {% if not loop.last %},{% endif %} {% endfor %} ] -} \ No newline at end of file +} diff --git a/contrib/dind/run-test-distros.sh b/contrib/dind/run-test-distros.sh index 0e3510fd0..bd7e12223 100755 --- a/contrib/dind/run-test-distros.sh +++ b/contrib/dind/run-test-distros.sh @@ -46,7 +46,7 @@ test_distro() { pass_or_fail "$prefix: netcheck" || return 1 } -NODES=($(egrep ^kube-node hosts)) +NODES=($(egrep ^kube_node hosts)) NETCHECKER_HOST=localhost : ${OUTPUT_DIR:=./out} diff --git a/contrib/inventory_builder/inventory.py b/contrib/inventory_builder/inventory.py index 814085a73..184989fc3 100644 --- a/contrib/inventory_builder/inventory.py +++ b/contrib/inventory_builder/inventory.py @@ -44,8 +44,8 @@ import re import subprocess import sys -ROLES = ['all', 'kube_control_plane', 'kube-node', 'etcd', 'k8s-cluster', - 'calico-rr'] +ROLES = ['all', 'kube_control_plane', 'kube_node', 'etcd', 'k8s_cluster', + 'calico_rr'] PROTECTED_NAMES = ROLES AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'print_hostnames', 'load'] @@ -269,7 +269,7 @@ class KubesprayInventory(object): def purge_invalid_hosts(self, hostnames, protected_names=[]): for role in self.yaml_config['all']['children']: - if role != 'k8s-cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa + if role != 'k8s_cluster' and self.yaml_config['all']['children'][role]['hosts']: # noqa all_hosts = self.yaml_config['all']['children'][role]['hosts'].copy() # noqa for host in all_hosts.keys(): if host not in hostnames and host not in protected_names: @@ -290,7 +290,7 @@ class KubesprayInventory(object): if self.yaml_config['all']['hosts'] is None: self.yaml_config['all']['hosts'] = {host: None} self.yaml_config['all']['hosts'][host] = opts - elif group != 'k8s-cluster:children': + elif group != 'k8s_cluster:children': if self.yaml_config['all']['children'][group]['hosts'] is None: self.yaml_config['all']['children'][group]['hosts'] = { host: None} @@ -307,37 +307,37 @@ class KubesprayInventory(object): def set_k8s_cluster(self): k8s_cluster = {'children': {'kube_control_plane': None, - 'kube-node': None}} - self.yaml_config['all']['children']['k8s-cluster'] = k8s_cluster + 'kube_node': None}} + self.yaml_config['all']['children']['k8s_cluster'] = k8s_cluster def set_calico_rr(self, hosts): for host in hosts: if host in self.yaml_config['all']['children']['kube_control_plane']: # noqa - self.debug("Not adding {0} to calico-rr group because it " + self.debug("Not adding {0} to calico_rr group because it " "conflicts with kube_control_plane " "group".format(host)) continue - if host in self.yaml_config['all']['children']['kube-node']: - self.debug("Not adding {0} to calico-rr group because it " - "conflicts with kube-node group".format(host)) + if host in self.yaml_config['all']['children']['kube_node']: + self.debug("Not adding {0} to calico_rr group because it " + "conflicts with kube_node group".format(host)) continue - self.add_host_to_group('calico-rr', host) + self.add_host_to_group('calico_rr', host) def set_kube_node(self, hosts): for host in hosts: if len(self.yaml_config['all']['hosts']) >= SCALE_THRESHOLD: if host in self.yaml_config['all']['children']['etcd']['hosts']: # noqa - self.debug("Not adding {0} to kube-node group because of " + self.debug("Not adding {0} to kube_node group because of " "scale deployment and host is in etcd " "group.".format(host)) continue if len(self.yaml_config['all']['hosts']) >= MASSIVE_SCALE_THRESHOLD: # noqa if host in self.yaml_config['all']['children']['kube_control_plane']['hosts']: # noqa - self.debug("Not adding {0} to kube-node group because of " + self.debug("Not adding {0} to kube_node group because of " "scale deployment and host is in " "kube_control_plane group.".format(host)) continue - self.add_host_to_group('kube-node', host) + self.add_host_to_group('kube_node', host) def set_etcd(self, hosts): for host in hosts: diff --git a/contrib/inventory_builder/tests/test_inventory.py b/contrib/inventory_builder/tests/test_inventory.py index 4d04603a7..f9aa40bc1 100644 --- a/contrib/inventory_builder/tests/test_inventory.py +++ b/contrib/inventory_builder/tests/test_inventory.py @@ -241,8 +241,8 @@ class TestInventory(unittest.TestCase): self.inv.yaml_config['all']['hosts'].get(host), opt) def test_set_k8s_cluster(self): - group = 'k8s-cluster' - expected_hosts = ['kube-node', 'kube_control_plane'] + group = 'k8s_cluster' + expected_hosts = ['kube_node', 'kube_control_plane'] self.inv.set_k8s_cluster() for host in expected_hosts: @@ -251,7 +251,7 @@ class TestInventory(unittest.TestCase): self.inv.yaml_config['all']['children'][group]['children']) def test_set_kube_node(self): - group = 'kube-node' + group = 'kube_node' host = 'node1' self.inv.set_kube_node([host]) @@ -280,7 +280,7 @@ class TestInventory(unittest.TestCase): for h in range(3): self.assertFalse( list(hosts.keys())[h] in - self.inv.yaml_config['all']['children']['kube-node']['hosts']) + self.inv.yaml_config['all']['children']['kube_node']['hosts']) def test_scale_scenario_two(self): num_nodes = 500 @@ -296,7 +296,7 @@ class TestInventory(unittest.TestCase): for h in range(5): self.assertFalse( list(hosts.keys())[h] in - self.inv.yaml_config['all']['children']['kube-node']['hosts']) + self.inv.yaml_config['all']['children']['kube_node']['hosts']) def test_range2ips_range(self): changed_hosts = ['10.90.0.2', '10.90.0.4-10.90.0.6', '10.90.0.8'] diff --git a/contrib/network-storage/glusterfs/glusterfs.yml b/contrib/network-storage/glusterfs/glusterfs.yml index 8146dfc06..79fc3aeb9 100644 --- a/contrib/network-storage/glusterfs/glusterfs.yml +++ b/contrib/network-storage/glusterfs/glusterfs.yml @@ -15,7 +15,7 @@ roles: - { role: glusterfs/server } -- hosts: k8s-cluster +- hosts: k8s_cluster roles: - { role: glusterfs/client } diff --git a/contrib/network-storage/glusterfs/inventory.example b/contrib/network-storage/glusterfs/inventory.example index dc77b4b0a..f6c107070 100644 --- a/contrib/network-storage/glusterfs/inventory.example +++ b/contrib/network-storage/glusterfs/inventory.example @@ -23,15 +23,15 @@ # node2 # node3 -# [kube-node] +# [kube_node] # node2 # node3 # node4 # node5 # node6 -# [k8s-cluster:children] -# kube-node +# [k8s_cluster:children] +# kube_node # kube_control_plane # [gfs-cluster] diff --git a/contrib/network-storage/heketi/inventory.yml.sample b/contrib/network-storage/heketi/inventory.yml.sample index 46adbed44..e68ec9637 100644 --- a/contrib/network-storage/heketi/inventory.yml.sample +++ b/contrib/network-storage/heketi/inventory.yml.sample @@ -3,7 +3,7 @@ all: heketi_admin_key: "11elfeinhundertundelf" heketi_user_key: "!!einseinseins" children: - k8s-cluster: + k8s_cluster: vars: kubelet_fail_swap_on: false children: @@ -13,7 +13,7 @@ all: etcd: hosts: node2: - kube-node: + kube_node: hosts: &kube_nodes node1: node2: diff --git a/contrib/packaging/rpm/kubespray.spec b/contrib/packaging/rpm/kubespray.spec index e4c1808be..656f624be 100644 --- a/contrib/packaging/rpm/kubespray.spec +++ b/contrib/packaging/rpm/kubespray.spec @@ -51,7 +51,7 @@ export SKIP_PIP_INSTALL=1 %doc %{_docdir}/%{name}/inventory/sample/hosts.ini %config %{_sysconfdir}/%{name}/ansible.cfg %config %{_sysconfdir}/%{name}/inventory/sample/group_vars/all.yml -%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s-cluster.yml +%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s_cluster.yml %license %{_docdir}/%{name}/LICENSE %{python2_sitelib}/%{srcname}-%{release}-py%{python2_version}.egg-info %{_datarootdir}/%{name}/roles/ diff --git a/contrib/terraform/aws/templates/inventory.tpl b/contrib/terraform/aws/templates/inventory.tpl index d8fe2f995..baa9ea854 100644 --- a/contrib/terraform/aws/templates/inventory.tpl +++ b/contrib/terraform/aws/templates/inventory.tpl @@ -11,7 +11,7 @@ ${public_ip_address_bastion} ${list_master} -[kube-node] +[kube_node] ${list_node} @@ -19,10 +19,10 @@ ${list_node} ${list_etcd} -[k8s-cluster:children] -kube-node +[k8s_cluster:children] +kube_node kube_control_plane -[k8s-cluster:vars] +[k8s_cluster:vars] ${elb_api_fqdn} diff --git a/contrib/terraform/exoscale/templates/inventory.tpl b/contrib/terraform/exoscale/templates/inventory.tpl index 27b9e60f3..85ed1924b 100644 --- a/contrib/terraform/exoscale/templates/inventory.tpl +++ b/contrib/terraform/exoscale/templates/inventory.tpl @@ -11,9 +11,9 @@ supplementary_addresses_in_ssl_keys = [ "${api_lb_ip_address}" ] [etcd] ${list_master} -[kube-node] +[kube_node] ${list_worker} -[k8s-cluster:children] +[k8s_cluster:children] kube_control_plane -kube-node +kube_node diff --git a/contrib/terraform/gcp/generate-inventory.sh b/contrib/terraform/gcp/generate-inventory.sh index d266b1899..585a4f415 100755 --- a/contrib/terraform/gcp/generate-inventory.sh +++ b/contrib/terraform/gcp/generate-inventory.sh @@ -65,12 +65,12 @@ for name in "${MASTER_NAMES[@]}"; do done echo "" -echo "[kube-node]" +echo "[kube_node]" for name in "${WORKER_NAMES[@]}"; do echo "${name}" done echo "" -echo "[k8s-cluster:children]" +echo "[k8s_cluster:children]" echo "kube_control_plane" -echo "kube-node" +echo "kube_node" diff --git a/contrib/terraform/openstack/README.md b/contrib/terraform/openstack/README.md index 67bc0f066..7b7e9e1ce 100644 --- a/contrib/terraform/openstack/README.md +++ b/contrib/terraform/openstack/README.md @@ -263,8 +263,8 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`. |`number_of_bastions` | Number of bastion hosts to create. Scripts assume this is really just zero or one | |`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. | | `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks | -|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube-node` for tainting them as nodes, empty by default. | -|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube-ingress` for running ingress controller pods, empty by default. | +|`supplementary_master_groups` | To add ansible groups to the masters, such as `kube_node` for tainting them as nodes, empty by default. | +|`supplementary_node_groups` | To add ansible groups to the nodes, such as `kube_ingress` for running ingress controller pods, empty by default. | |`bastion_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, `["0.0.0.0/0"]` by default | |`master_allowed_remote_ips` | List of CIDR blocks allowed to initiate an API connection, `["0.0.0.0/0"]` by default | |`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default | @@ -421,7 +421,7 @@ terraform apply -var-file=cluster.tfvars ../../contrib/terraform/openstack ``` if you chose to create a bastion host, this script will create -`contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for Ansible to +`contrib/terraform/openstack/k8s_cluster.yml` with an ssh command for Ansible to be able to access your machines tunneling through the bastion's IP address. If you want to manually handle the ssh tunneling to these machines, please delete or move that file. If you want to use this, just leave it there, as ansible will @@ -546,7 +546,7 @@ bin_dir: /opt/bin cloud_provider: openstack ``` -Edit `inventory/$CLUSTER/group_vars/k8s-cluster/k8s-cluster.yml`: +Edit `inventory/$CLUSTER/group_vars/k8s_cluster/k8s_cluster.yml`: - Set variable **kube_network_plugin** to your desired networking plugin. - **flannel** works out-of-the-box diff --git a/contrib/terraform/openstack/modules/compute/main.tf b/contrib/terraform/openstack/modules/compute/main.tf index c9e5609f3..9409be3c6 100644 --- a/contrib/terraform/openstack/modules/compute/main.tf +++ b/contrib/terraform/openstack/modules/compute/main.tf @@ -204,7 +204,7 @@ resource "openstack_compute_instance_v2" "bastion" { } provisioner "local-exec" { - command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > group_vars/no-floating.yml" + command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > group_vars/no_floating.yml" } } @@ -245,13 +245,13 @@ resource "openstack_compute_instance_v2" "k8s_master" { metadata = { ssh_user = var.ssh_user - kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s-cluster" + kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster" depends_on = var.network_id use_access_ip = var.use_access_ip } provisioner "local-exec" { - command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml" + command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no_floating.yml" } } @@ -292,13 +292,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" { metadata = { ssh_user = var.ssh_user - kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s-cluster" + kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster" depends_on = var.network_id use_access_ip = var.use_access_ip } provisioner "local-exec" { - command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml" + command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no_floating.yml" } } @@ -337,7 +337,7 @@ resource "openstack_compute_instance_v2" "etcd" { metadata = { ssh_user = var.ssh_user - kubespray_groups = "etcd,no-floating" + kubespray_groups = "etcd,no_floating" depends_on = var.network_id use_access_ip = var.use_access_ip } @@ -379,7 +379,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" { metadata = { ssh_user = var.ssh_user - kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s-cluster,no-floating" + kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating" depends_on = var.network_id use_access_ip = var.use_access_ip } @@ -421,7 +421,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" { metadata = { ssh_user = var.ssh_user - kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s-cluster,no-floating" + kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s_cluster,no_floating" depends_on = var.network_id use_access_ip = var.use_access_ip } @@ -462,13 +462,13 @@ resource "openstack_compute_instance_v2" "k8s_node" { metadata = { ssh_user = var.ssh_user - kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}" + kubespray_groups = "kube_node,k8s_cluster,${var.supplementary_node_groups}" depends_on = var.network_id use_access_ip = var.use_access_ip } provisioner "local-exec" { - command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > group_vars/no-floating.yml" + command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, var.k8s_node_fips), 0)}/ > group_vars/no_floating.yml" } } @@ -507,7 +507,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" { metadata = { ssh_user = var.ssh_user - kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}" + kubespray_groups = "kube_node,k8s_cluster,no_floating,${var.supplementary_node_groups}" depends_on = var.network_id use_access_ip = var.use_access_ip } @@ -548,13 +548,13 @@ resource "openstack_compute_instance_v2" "k8s_nodes" { metadata = { ssh_user = var.ssh_user - kubespray_groups = "kube-node,k8s-cluster,%{if each.value.floating_ip == false}no-floating,%{endif}${var.supplementary_node_groups}" + kubespray_groups = "kube_node,k8s_cluster,%{if each.value.floating_ip == false}no_floating,%{endif}${var.supplementary_node_groups}" depends_on = var.network_id use_access_ip = var.use_access_ip } provisioner "local-exec" { - command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_nodes_fips : value.address]), 0)}/ > group_vars/no-floating.yml%{else}true%{endif}" + command = "%{if each.value.floating_ip}sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(concat(var.bastion_fips, [for key, value in var.k8s_nodes_fips : value.address]), 0)}/ > group_vars/no_floating.yml%{else}true%{endif}" } } @@ -593,7 +593,7 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" { metadata = { ssh_user = var.ssh_user_gfs - kubespray_groups = "gfs-cluster,network-storage,no-floating" + kubespray_groups = "gfs-cluster,network-storage,no_floating" depends_on = var.network_id use_access_ip = var.use_access_ip } diff --git a/contrib/terraform/openstack/variables.tf b/contrib/terraform/openstack/variables.tf index 6c6e85a8a..35bd62b60 100644 --- a/contrib/terraform/openstack/variables.tf +++ b/contrib/terraform/openstack/variables.tf @@ -177,12 +177,12 @@ variable "external_net" { } variable "supplementary_master_groups" { - description = "supplementary kubespray ansible groups for masters, such kube-node" + description = "supplementary kubespray ansible groups for masters, such kube_node" default = "" } variable "supplementary_node_groups" { - description = "supplementary kubespray ansible groups for worker nodes, such as kube-ingress" + description = "supplementary kubespray ansible groups for worker nodes, such as kube_ingress" default = "" } diff --git a/contrib/terraform/packet/README.md b/contrib/terraform/packet/README.md index 496e74206..532acb809 100644 --- a/contrib/terraform/packet/README.md +++ b/contrib/terraform/packet/README.md @@ -108,7 +108,7 @@ While the defaults in variables.tf will successfully deploy a cluster, it is rec Kubespray will pull down a Kubernetes configuration file to access this cluster by enabling the `kubeconfig_localhost: true` in the Kubespray configuration. -Edit `inventory/$CLUSTER/group_vars/k8s-cluster/k8s-cluster.yml` and comment back in the following line and change from `false` to `true`: +Edit `inventory/$CLUSTER/group_vars/k8s_cluster/k8s_cluster.yml` and comment back in the following line and change from `false` to `true`: `\# kubeconfig_localhost: false` becomes: `kubeconfig_localhost: true` diff --git a/contrib/terraform/packet/kubespray.tf b/contrib/terraform/packet/kubespray.tf index 00cf21ff0..18f901aea 100644 --- a/contrib/terraform/packet/kubespray.tf +++ b/contrib/terraform/packet/kubespray.tf @@ -19,7 +19,7 @@ resource "packet_device" "k8s_master" { operating_system = var.operating_system billing_cycle = var.billing_cycle project_id = var.packet_project_id - tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube_control_plane", "etcd", "kube-node"] + tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane", "etcd", "kube_node"] } resource "packet_device" "k8s_master_no_etcd" { @@ -32,7 +32,7 @@ resource "packet_device" "k8s_master_no_etcd" { operating_system = var.operating_system billing_cycle = var.billing_cycle project_id = var.packet_project_id - tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube_control_plane"] + tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_control_plane"] } resource "packet_device" "k8s_etcd" { @@ -58,6 +58,6 @@ resource "packet_device" "k8s_node" { operating_system = var.operating_system billing_cycle = var.billing_cycle project_id = var.packet_project_id - tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-node"] + tags = ["cluster-${var.cluster_name}", "k8s_cluster", "kube_node"] } diff --git a/contrib/terraform/upcloud/templates/inventory.tpl b/contrib/terraform/upcloud/templates/inventory.tpl index cb453e3ea..28ff28ac2 100644 --- a/contrib/terraform/upcloud/templates/inventory.tpl +++ b/contrib/terraform/upcloud/templates/inventory.tpl @@ -9,9 +9,9 @@ ${list_master} [etcd] ${list_master} -[kube-node] +[kube_node] ${list_worker} -[k8s-cluster:children] +[k8s_cluster:children] kube_control_plane -kube-node +kube_node diff --git a/contrib/terraform/vsphere/templates/inventory.tpl b/contrib/terraform/vsphere/templates/inventory.tpl index cb453e3ea..28ff28ac2 100644 --- a/contrib/terraform/vsphere/templates/inventory.tpl +++ b/contrib/terraform/vsphere/templates/inventory.tpl @@ -9,9 +9,9 @@ ${list_master} [etcd] ${list_master} -[kube-node] +[kube_node] ${list_worker} -[k8s-cluster:children] +[k8s_cluster:children] kube_control_plane -kube-node +kube_node diff --git a/docs/ansible.md b/docs/ansible.md index 99d72b4dc..0440eccf2 100644 --- a/docs/ansible.md +++ b/docs/ansible.md @@ -4,28 +4,28 @@ The inventory is composed of 3 groups: -* **kube-node** : list of kubernetes nodes where the pods will run. +* **kube_node** : list of kubernetes nodes where the pods will run. * **kube_control_plane** : list of servers where kubernetes control plane components (apiserver, scheduler, controller) will run. * **etcd**: list of servers to compose the etcd server. You should have at least 3 servers for failover purpose. -Note: do not modify the children of _k8s-cluster_, like putting -the _etcd_ group into the _k8s-cluster_, unless you are certain +Note: do not modify the children of _k8s_cluster_, like putting +the _etcd_ group into the _k8s_cluster_, unless you are certain to do that and you have it fully contained in the latter: ```ShellSession -k8s-cluster ⊂ etcd => kube-node ∩ etcd = etcd +k8s_cluster ⊂ etcd => kube_node ∩ etcd = etcd ``` -When _kube-node_ contains _etcd_, you define your etcd cluster to be as well schedulable for Kubernetes workloads. +When _kube_node_ contains _etcd_, you define your etcd cluster to be as well schedulable for Kubernetes workloads. If you want it a standalone, make sure those groups do not intersect. If you want the server to act both as control-plane and node, the server must be defined -on both groups _kube_control_plane_ and _kube-node_. If you want a standalone and +on both groups _kube_control_plane_ and _kube_node_. If you want a standalone and unschedulable master, the server must be defined only in the _kube_control_plane_ and -not _kube-node_. +not _kube_node_. There are also two special groups: -* **calico-rr** : explained for [advanced Calico networking cases](calico.md) +* **calico_rr** : explained for [advanced Calico networking cases](calico.md) * **bastion** : configure a bastion host if your nodes are not directly reachable Below is a complete inventory example: @@ -49,15 +49,15 @@ node1 node2 node3 -[kube-node] +[kube_node] node2 node3 node4 node5 node6 -[k8s-cluster:children] -kube-node +[k8s_cluster:children] +kube_node kube_control_plane ``` @@ -66,7 +66,7 @@ kube_control_plane The group variables to control main deployment options are located in the directory ``inventory/sample/group_vars``. Optional variables are located in the `inventory/sample/group_vars/all.yml`. Mandatory variables that are common for at least one role (or a node group) can be found in the -`inventory/sample/group_vars/k8s-cluster.yml`. +`inventory/sample/group_vars/k8s_cluster.yml`. There are also role vars for docker, kubernetes preinstall and master roles. According to the [ansible docs](https://docs.ansible.com/ansible/latest/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable), those cannot be overridden from the group vars. In order to override, one should use @@ -79,7 +79,7 @@ Layer | Comment ------|-------- **role defaults** | provides best UX to override things for Kubespray deployments inventory vars | Unused -**inventory group_vars** | Expects users to use ``all.yml``,``k8s-cluster.yml`` etc. to override things +**inventory group_vars** | Expects users to use ``all.yml``,``k8s_cluster.yml`` etc. to override things inventory host_vars | Unused playbook group_vars | Unused playbook host_vars | Unused diff --git a/docs/aws-ebs-csi.md b/docs/aws-ebs-csi.md index 4d8c96311..3a7684ef2 100644 --- a/docs/aws-ebs-csi.md +++ b/docs/aws-ebs-csi.md @@ -8,7 +8,7 @@ To set the number of replicas for the AWS CSI controller, you can change `aws_eb Make sure to add a role, for your EC2 instances hosting Kubernetes, that allows it to do the actions necessary to request a volume and attach it: [AWS CSI Policy](https://github.com/kubernetes-sigs/aws-ebs-csi-driver/blob/master/docs/example-iam-policy.json) -If you want to deploy the AWS EBS storage class used with the CSI Driver, you should set `persistent_volumes_enabled` in `group_vars/k8s-cluster/k8s-cluster.yml` to `true`. +If you want to deploy the AWS EBS storage class used with the CSI Driver, you should set `persistent_volumes_enabled` in `group_vars/k8s_cluster/k8s_cluster.yml` to `true`. You can now run the kubespray playbook (cluster.yml) to deploy Kubernetes over AWS EC2 with EBS CSI Driver enabled. diff --git a/docs/aws.md b/docs/aws.md index 0e680e0d1..b45508c61 100644 --- a/docs/aws.md +++ b/docs/aws.md @@ -33,16 +33,16 @@ This will produce an inventory that is passed into Ansible that looks like the f "etcd": [ "ip-172-31-3-xxx.us-east-2.compute.internal" ], - "k8s-cluster": { + "k8s_cluster": { "children": [ "kube_control_plane", - "kube-node" + "kube_node" ] }, "kube_control_plane": [ "ip-172-31-3-xxx.us-east-2.compute.internal" ], - "kube-node": [ + "kube_node": [ "ip-172-31-8-xxx.us-east-2.compute.internal" ] } @@ -51,7 +51,7 @@ This will produce an inventory that is passed into Ansible that looks like the f Guide: - Create instances in AWS as needed. -- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube_control_plane`, `etcd`, or `kube-node`. You can also share roles like `kube_control_plane, etcd` +- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube_control_plane`, `etcd`, or `kube_node`. You can also share roles like `kube_control_plane, etcd` - Copy the `kubespray-aws-inventory.py` script from `kubespray/contrib/aws_inventory` to the `kubespray/inventory` directory. - Set the following AWS credentials and info as environment variables in your terminal: diff --git a/docs/azure-csi.md b/docs/azure-csi.md index 95e7a667c..d4e04d275 100644 --- a/docs/azure-csi.md +++ b/docs/azure-csi.md @@ -8,7 +8,7 @@ To deploy Azure Disk CSI driver, uncomment the `azure_csi_enabled` option in `gr ## Azure Disk CSI Storage Class -If you want to deploy the Azure Disk storage class to provision volumes dynamically, you should set `persistent_volumes_enabled` in `group_vars/k8s-cluster/k8s-cluster.yml` to `true`. +If you want to deploy the Azure Disk storage class to provision volumes dynamically, you should set `persistent_volumes_enabled` in `group_vars/k8s_cluster/k8s_cluster.yml` to `true`. ## Parameters diff --git a/docs/calico.md b/docs/calico.md index 45d1b0e90..f090ca984 100644 --- a/docs/calico.md +++ b/docs/calico.md @@ -61,7 +61,7 @@ calico_network_backend: none ### Optional : Define the default pool CIDRs By default, `kube_pods_subnet` is used as the IP range CIDR for the default IP Pool, and `kube_pods_subnet_ipv6` for IPv6. -In some cases you may want to add several pools and not have them considered by Kubernetes as external (which means that they must be within or equal to the range defined in `kube_pods_subnet` and `kube_pods_subnet_ipv6` ), it starts with the default IP Pools of which IP range CIDRs can by defined in group_vars (k8s-cluster/k8s-net-calico.yml): +In some cases you may want to add several pools and not have them considered by Kubernetes as external (which means that they must be within or equal to the range defined in `kube_pods_subnet` and `kube_pods_subnet_ipv6` ), it starts with the default IP Pools of which IP range CIDRs can by defined in group_vars (k8s_cluster/k8s-net-calico.yml): ```ShellSession calico_pool_cidr: 10.233.64.0/20 @@ -88,14 +88,14 @@ In order to define peers on a per node basis, the `peers` variable must be defin NB: Ansible's `hash_behaviour` is by default set to "replace", thus defining both global and per node peers would end up with having only per node peers. If having both global and per node peers defined was meant to happen, global peers would have to be defined in hostvars for each host (as well as per node peers) Since calico 3.4, Calico supports advertising Kubernetes service cluster IPs over BGP, just as it advertises pod IPs. -This can be enabled by setting the following variable as follow in group_vars (k8s-cluster/k8s-net-calico.yml) +This can be enabled by setting the following variable as follow in group_vars (k8s_cluster/k8s-net-calico.yml) ```yml calico_advertise_cluster_ips: true ``` Since calico 3.10, Calico supports advertising Kubernetes service ExternalIPs over BGP in addition to cluster IPs advertising. -This can be enabled by setting the following variable in group_vars (k8s-cluster/k8s-net-calico.yml) +This can be enabled by setting the following variable in group_vars (k8s_cluster/k8s-net-calico.yml) ```yml calico_advertise_service_external_ips: @@ -121,9 +121,9 @@ recommended here: You need to edit your inventory and add: -* `calico-rr` group with nodes in it. `calico-rr` can be combined with - `kube-node` and/or `kube_control_plane`. `calico-rr` group also must be a child - group of `k8s-cluster` group. +* `calico_rr` group with nodes in it. `calico_rr` can be combined with + `kube_node` and/or `kube_control_plane`. `calico_rr` group also must be a child + group of `k8s_cluster` group. * `cluster_id` by route reflector node/group (see details [here](https://hub.docker.com/r/calico/routereflector/)) @@ -147,18 +147,18 @@ node2 node3 node4 -[kube-node] +[kube_node] node2 node3 node4 node5 -[k8s-cluster:children] -kube-node +[k8s_cluster:children] +kube_node kube_control_plane -calico-rr +calico_rr -[calico-rr] +[calico_rr] rr0 rr1 diff --git a/docs/cinder-csi.md b/docs/cinder-csi.md index 86379affe..b7dadf1e4 100644 --- a/docs/cinder-csi.md +++ b/docs/cinder-csi.md @@ -10,7 +10,7 @@ You need to source the OpenStack credentials you use to deploy your machines tha Make sure the hostnames in your `inventory` file are identical to your instance names in OpenStack. Otherwise [cinder](https://docs.openstack.org/cinder/latest/) won't work as expected. -If you want to deploy the cinder provisioner used with Cinder CSI Driver, you should set `persistent_volumes_enabled` in `group_vars/k8s-cluster/k8s-cluster.yml` to `true`. +If you want to deploy the cinder provisioner used with Cinder CSI Driver, you should set `persistent_volumes_enabled` in `group_vars/k8s_cluster/k8s_cluster.yml` to `true`. You can now run the kubespray playbook (cluster.yml) to deploy Kubernetes over OpenStack with Cinder CSI Driver enabled. diff --git a/docs/containerd.md b/docs/containerd.md index 98de3c23c..452e9990a 100644 --- a/docs/containerd.md +++ b/docs/containerd.md @@ -5,7 +5,7 @@ Kubespray supports basic functionality for using containerd as the default conta _To use the containerd container runtime set the following variables:_ -## k8s-cluster.yml +## k8s_cluster.yml ```yaml container_manager: containerd diff --git a/docs/cri-o.md b/docs/cri-o.md index a96c3f579..ab7bdc1cf 100644 --- a/docs/cri-o.md +++ b/docs/cri-o.md @@ -16,7 +16,7 @@ skip_downloads: false etcd_kubeadm_enabled: true ``` -## k8s-cluster/k8s-cluster.yml +## k8s_cluster/k8s_cluster.yml ```yaml container_manager: crio @@ -52,7 +52,7 @@ This parameter controls not just the number of processes but also the amount of (since a thread is technically a process with shared memory). See [cri-o#1921] In order to increase the default `pids_limit` for cri-o based deployments you need to set the `crio_pids_limit` -for your `k8s-cluster` ansible group or per node depending on the use case. +for your `k8s_cluster` ansible group or per node depending on the use case. ```yaml crio_pids_limit: 4096 diff --git a/docs/gcp-pd-csi.md b/docs/gcp-pd-csi.md index 448d3cadf..88fa06039 100644 --- a/docs/gcp-pd-csi.md +++ b/docs/gcp-pd-csi.md @@ -6,7 +6,7 @@ To deploy GCP Persistent Disk CSI driver, uncomment the `gcp_pd_csi_enabled` opt ## GCP Persistent Disk Storage Class -If you want to deploy the GCP Persistent Disk storage class to provision volumes dynamically, you should set `persistent_volumes_enabled` in `group_vars/k8s-cluster/k8s-cluster.yml` to `true`. +If you want to deploy the GCP Persistent Disk storage class to provision volumes dynamically, you should set `persistent_volumes_enabled` in `group_vars/k8s_cluster/k8s_cluster.yml` to `true`. ## GCP credentials diff --git a/docs/getting-started.md b/docs/getting-started.md index 18a50e017..ed90b88fb 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -79,7 +79,7 @@ var in inventory. By default, Kubespray configures kube_control_plane hosts with insecure access to kube-apiserver via port 8080. A kubeconfig file is not necessary in this case, because kubectl will use to connect. The kubeconfig files -generated will point to localhost (on kube_control_planes) and kube-node hosts will +generated will point to localhost (on kube_control_planes) and kube_node hosts will connect either to a localhost nginx proxy or to a loadbalancer if configured. More details on this process are in the [HA guide](/docs/ha-mode.md). diff --git a/docs/ha-mode.md b/docs/ha-mode.md index 668558f17..3bc9134ab 100644 --- a/docs/ha-mode.md +++ b/docs/ha-mode.md @@ -81,7 +81,7 @@ loadbalancer_apiserver: port on the VIP address) This domain name, or default "lb-apiserver.kubernetes.local", will be inserted -into the `/etc/hosts` file of all servers in the `k8s-cluster` group and wired +into the `/etc/hosts` file of all servers in the `k8s_cluster` group and wired into the generated self-signed TLS/SSL certificates as well. Note that the HAProxy service should as well be HA and requires a VIP management, which is out of scope of this doc. diff --git a/docs/integration.md b/docs/integration.md index 09c044fa8..31d6f0bd4 100644 --- a/docs/integration.md +++ b/docs/integration.md @@ -52,10 +52,10 @@ You could rename *all.yml* config to something else, i.e. *kubespray.yml* and cr ```ini ... #Kargo groups: - [kube-node:children] + [kube_node:children] kubenode - [k8s-cluster:children] + [k8s_cluster:children] kubernetes [etcd:children] diff --git a/docs/kata-containers.md b/docs/kata-containers.md index c09118806..4a5a45525 100644 --- a/docs/kata-containers.md +++ b/docs/kata-containers.md @@ -10,7 +10,7 @@ _Qemu_ is the only hypervisor supported by Kubespray. To use Kata Containers, set the following variables: -**k8s-cluster.yml**: +**k8s_cluster.yml**: ```yaml container_manager: containerd diff --git a/docs/kube-ovn.md b/docs/kube-ovn.md index 375c7a4d5..3ddc270da 100644 --- a/docs/kube-ovn.md +++ b/docs/kube-ovn.md @@ -12,7 +12,7 @@ kernel version 3.10.0-862 has a nat related bug that will affect ovs function, p ## How to use it -Enable kube-ovn in `group_vars/k8s-cluster/k8s-cluster.yml` +Enable kube-ovn in `group_vars/k8s_cluster/k8s_cluster.yml` ```yml ... diff --git a/docs/large-deployments.md b/docs/large-deployments.md index ec6618bf4..d41201029 100644 --- a/docs/large-deployments.md +++ b/docs/large-deployments.md @@ -37,9 +37,9 @@ For a large scaled deployments, consider the following configuration changes: * Tune network prefix sizes. Those are ``kube_network_node_prefix``, ``kube_service_addresses`` and ``kube_pods_subnet``. -* Add calico-rr nodes if you are deploying with Calico or Canal. Nodes recover - from host/network interruption much quicker with calico-rr. Note that - calico-rr role must be on a host without kube_control_plane or kube-node role (but +* Add calico_rr nodes if you are deploying with Calico or Canal. Nodes recover + from host/network interruption much quicker with calico_rr. Note that + calico_rr role must be on a host without kube_control_plane or kube_node role (but etcd role is okay). * Check out the diff --git a/docs/macvlan.md b/docs/macvlan.md index 51a4ba957..2d0de074b 100644 --- a/docs/macvlan.md +++ b/docs/macvlan.md @@ -2,7 +2,7 @@ ## How to use it -* Enable macvlan in `group_vars/k8s-cluster/k8s-cluster.yml` +* Enable macvlan in `group_vars/k8s_cluster/k8s_cluster.yml` ```yml ... @@ -10,7 +10,7 @@ kube_network_plugin: macvlan ... ``` -* Adjust the `macvlan_interface` in `group_vars/k8s-cluster/k8s-net-macvlan.yml` or by host in the `host.yml` file: +* Adjust the `macvlan_interface` in `group_vars/k8s_cluster/k8s-net-macvlan.yml` or by host in the `host.yml` file: ```yml all: @@ -34,7 +34,7 @@ add `kube_proxy_masquerade_all: true` in `group_vars/all/all.yml` The nodelocal dns IP is not reacheable. -Disable it in `sample/group_vars/k8s-cluster/k8s-cluster.yml` +Disable it in `sample/group_vars/k8s_cluster/k8s_cluster.yml` ```yml enable_nodelocaldns: false diff --git a/docs/nodes.md b/docs/nodes.md index f369a5f3d..00fe2481b 100644 --- a/docs/nodes.md +++ b/docs/nodes.md @@ -17,7 +17,7 @@ Modify the order of your master list by pushing your first entry to any other po node-1: node-2: node-3: - kube-node: + kube_node: hosts: node-1: node-2: @@ -38,7 +38,7 @@ change your inventory to: node-2: node-3: node-1: - kube-node: + kube_node: hosts: node-2: node-3: diff --git a/docs/ovn4nfv.md b/docs/ovn4nfv.md index 9d120a72c..9106f6032 100644 --- a/docs/ovn4nfv.md +++ b/docs/ovn4nfv.md @@ -4,7 +4,7 @@ Intro to [ovn4nfv-k8s-plugin](https://github.com/opnfv/ovn4nfv-k8s-plugin) ## How to use it -* Enable ovn4nfv in `group_vars/k8s-cluster/k8s-cluster.yml` +* Enable ovn4nfv in `group_vars/k8s_cluster/k8s_cluster.yml` ```yml ... diff --git a/docs/setting-up-your-first-cluster.md b/docs/setting-up-your-first-cluster.md index 184d4bc81..65645f93d 100644 --- a/docs/setting-up-your-first-cluster.md +++ b/docs/setting-up-your-first-cluster.md @@ -225,7 +225,7 @@ worker-0, worker-1 and worker-2 are worker nodes. Also update the `ip` to the re remove the `access_ip`. The main configuration for the cluster is stored in -`inventory/mycluster/group_vars/k8s-cluster/k8s-cluster.yml`. In this file we +`inventory/mycluster/group_vars/k8s_cluster/k8s_cluster.yml`. In this file we will update the `supplementary_addresses_in_ssl_keys` with a list of the IP addresses of the controller nodes. In that way we can access the kubernetes API server as an administrator from outside the VPC network. You @@ -234,7 +234,7 @@ The main configuration for the cluster is stored in Kubespray also offers to easily enable popular kubernetes add-ons. You can modify the -list of add-ons in `inventory/mycluster/group_vars/k8s-cluster/addons.yml`. +list of add-ons in `inventory/mycluster/group_vars/k8s_cluster/addons.yml`. Let's enable the metrics server as this is a crucial monitoring element for the kubernetes cluster, just change the 'false' to 'true' for `metrics_server_enabled`. diff --git a/docs/test_cases.md b/docs/test_cases.md index 2a8f5e920..738b7b196 100644 --- a/docs/test_cases.md +++ b/docs/test_cases.md @@ -2,11 +2,11 @@ There are four node layout types: `default`, `separate`, `ha`, and `scale`. -`default` is a non-HA two nodes setup with one separate `kube-node` +`default` is a non-HA two nodes setup with one separate `kube_node` and the `etcd` group merged with the `kube_control_plane`. `separate` layout is when there is only node of each type, which includes - a kube_control_plane, kube-node, and etcd cluster member. + a kube_control_plane, kube_node, and etcd cluster member. `ha` layout consists of two etcd nodes, two masters and a single worker node, with role intersection. diff --git a/docs/upgrades.md b/docs/upgrades.md index f0cde5e87..abbbc2f7a 100644 --- a/docs/upgrades.md +++ b/docs/upgrades.md @@ -68,9 +68,9 @@ If you want to manually control the upgrade procedure, you can use the variables For instance, if you're on v2.6.0, then check out v2.7.0, run the upgrade, check out the next tag, and run the next upgrade, etc. -Assuming you don't explicitly define a kubernetes version in your k8s-cluster.yml, you simply check out the next tag and run the upgrade-cluster.yml playbook +Assuming you don't explicitly define a kubernetes version in your k8s_cluster.yml, you simply check out the next tag and run the upgrade-cluster.yml playbook -* If you do define kubernetes version in your inventory (e.g. group_vars/k8s-cluster.yml) then either make sure to update it before running upgrade-cluster, or specify the new version you're upgrading to: `ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml -e kube_version=v1.11.3` +* If you do define kubernetes version in your inventory (e.g. group_vars/k8s_cluster.yml) then either make sure to update it before running upgrade-cluster, or specify the new version you're upgrading to: `ansible-playbook -i inventory/mycluster/hosts.ini -b upgrade-cluster.yml -e kube_version=v1.11.3` Otherwise, the upgrade will leave your cluster at the same k8s version defined in your inventory vars. @@ -232,7 +232,7 @@ Previous HEAD position was 6f97687d Release 2.8 robust san handling (#4478) HEAD is now at a4e65c7c Upgrade to Ansible >2.7.0 (#4471) ``` -:warning: IMPORTANT: Some of the variable formats changed in the k8s-cluster.yml between 2.8.5 and 2.9.0 :warning: +:warning: IMPORTANT: Some of the variable formats changed in the k8s_cluster.yml between 2.8.5 and 2.9.0 :warning: If you do not keep your inventory copy up to date, **your upgrade will fail** and your first master will be left non-functional until fixed and re-run. diff --git a/docs/vars.md b/docs/vars.md index 8e6218fad..be5c7d934 100644 --- a/docs/vars.md +++ b/docs/vars.md @@ -38,7 +38,7 @@ Some variables of note include: and access_ip are undefined * *loadbalancer_apiserver* - If defined, all hosts will connect to this address instead of localhost for kube_control_planes and kube_control_plane[0] for - kube-nodes. See more details in the + kube_nodes. See more details in the [HA guide](/docs/ha-mode.md). * *loadbalancer_apiserver_localhost* - makes all hosts to connect to the apiserver internally load balanced endpoint. Mutual exclusive to the @@ -59,14 +59,14 @@ following default cluster parameters: * *kube_pods_subnet* - Subnet for Pod IPs (default is 10.233.64.0/18). Must not overlap with kube_service_addresses. * *kube_network_node_prefix* - Subnet allocated per-node for pod IPs. Remaining - bits in kube_pods_subnet dictates how many kube-nodes can be in cluster. Setting this > 25 will + bits in kube_pods_subnet dictates how many kube_nodes can be in cluster. Setting this > 25 will raise an assertion in playbooks if the `kubelet_max_pods` var also isn't adjusted accordingly (assertion not applicable to calico which doesn't use this as a hard limit, see [Calico IP block sizes](https://docs.projectcalico.org/reference/resources/ippool#block-sizes). * *enable_dual_stack_networks* - Setting this to true will provision both IPv4 and IPv6 networking for pods and services. * *kube_service_addresses_ipv6* - Subnet for cluster IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1000/116``). Must not overlap with ``kube_pods_subnet_ipv6``. * *kube_pods_subnet_ipv6* - Subnet for Pod IPv6 IPs (default is ``fd85:ee78:d8a6:8607::1:0000/112``). Must not overlap with ``kube_service_addresses_ipv6``. -* *kube_network_node_prefix_ipv6* - Subnet allocated per-node for pod IPv6 IPs. Remaining bits in ``kube_pods_subnet_ipv6`` dictates how many kube-nodes can be in cluster. +* *kube_network_node_prefix_ipv6* - Subnet allocated per-node for pod IPv6 IPs. Remaining bits in ``kube_pods_subnet_ipv6`` dictates how many kube_nodes can be in cluster. * *skydns_server* - Cluster IP for DNS (default is 10.233.0.3) * *skydns_server_secondary* - Secondary Cluster IP for CoreDNS used with coredns_dual deployment (default is 10.233.0.4) * *enable_coredns_k8s_external* - If enabled, it configures the [k8s_external plugin](https://coredns.io/plugins/k8s_external/) diff --git a/docs/weave.md b/docs/weave.md index 6d7e94956..30fa49444 100644 --- a/docs/weave.md +++ b/docs/weave.md @@ -11,7 +11,7 @@ Weave encryption is supported for all communication * To use Weave encryption, specify a strong password (if no password, no encryption) ```ShellSession -# In file ./inventory/sample/group_vars/k8s-cluster.yml +# In file ./inventory/sample/group_vars/k8s_cluster.yml weave_password: EnterPasswordHere ``` diff --git a/extra_playbooks/migrate_openstack_provider.yml b/extra_playbooks/migrate_openstack_provider.yml index 9d4adbaa9..2ce86d5c5 100644 --- a/extra_playbooks/migrate_openstack_provider.yml +++ b/extra_playbooks/migrate_openstack_provider.yml @@ -1,5 +1,5 @@ --- -- hosts: kube-node:kube_control_plane +- hosts: kube_node:kube_control_plane tasks: - name: Remove old cloud provider config file: diff --git a/extra_playbooks/upgrade-only-k8s.yml b/extra_playbooks/upgrade-only-k8s.yml index 5bdbd012d..13ebcc4bd 100644 --- a/extra_playbooks/upgrade-only-k8s.yml +++ b/extra_playbooks/upgrade-only-k8s.yml @@ -16,7 +16,7 @@ - { role: kubespray-defaults} - { role: bastion-ssh-config, tags: ["localhost", "bastion"]} -- hosts: k8s-cluster:etcd:calico-rr +- hosts: k8s_cluster:etcd:calico_rr any_errors_fatal: "{{ any_errors_fatal | default(true) }}" gather_facts: false vars: @@ -27,7 +27,7 @@ - { role: kubespray-defaults} - { role: bootstrap-os, tags: bootstrap-os} -- hosts: k8s-cluster:etcd:calico-rr +- hosts: k8s_cluster:etcd:calico_rr any_errors_fatal: "{{ any_errors_fatal | default(true) }}" roles: - { role: kubespray-defaults} @@ -47,7 +47,7 @@ - { role: upgrade/post-upgrade, tags: post-upgrade } - name: Finally handle worker upgrades, based on given batch size - hosts: kube-node:!kube_control_plane + hosts: kube_node:!kube_control_plane any_errors_fatal: "{{ any_errors_fatal | default(true) }}" serial: "{{ serial | default('20%') }}" roles: diff --git a/facts.yml b/facts.yml index e0281ee40..fae86eb30 100644 --- a/facts.yml +++ b/facts.yml @@ -1,6 +1,6 @@ --- - name: Gather facts - hosts: k8s-cluster:etcd:calico-rr + hosts: k8s_cluster:etcd:calico_rr gather_facts: False tasks: - name: Gather minimal facts diff --git a/inventory/local/hosts.ini b/inventory/local/hosts.ini index 7c3bc9559..4a6197e49 100644 --- a/inventory/local/hosts.ini +++ b/inventory/local/hosts.ini @@ -6,9 +6,9 @@ node1 [etcd] node1 -[kube-node] +[kube_node] node1 -[k8s-cluster:children] -kube-node +[k8s_cluster:children] +kube_node kube_control_plane diff --git a/inventory/sample/group_vars/k8s-cluster/addons.yml b/inventory/sample/group_vars/k8s_cluster/addons.yml similarity index 100% rename from inventory/sample/group_vars/k8s-cluster/addons.yml rename to inventory/sample/group_vars/k8s_cluster/addons.yml diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml b/inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml similarity index 100% rename from inventory/sample/group_vars/k8s-cluster/k8s-cluster.yml rename to inventory/sample/group_vars/k8s_cluster/k8s-cluster.yml diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-net-calico.yml b/inventory/sample/group_vars/k8s_cluster/k8s-net-calico.yml similarity index 100% rename from inventory/sample/group_vars/k8s-cluster/k8s-net-calico.yml rename to inventory/sample/group_vars/k8s_cluster/k8s-net-calico.yml diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-net-canal.yml b/inventory/sample/group_vars/k8s_cluster/k8s-net-canal.yml similarity index 100% rename from inventory/sample/group_vars/k8s-cluster/k8s-net-canal.yml rename to inventory/sample/group_vars/k8s_cluster/k8s-net-canal.yml diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-net-cilium.yml b/inventory/sample/group_vars/k8s_cluster/k8s-net-cilium.yml similarity index 100% rename from inventory/sample/group_vars/k8s-cluster/k8s-net-cilium.yml rename to inventory/sample/group_vars/k8s_cluster/k8s-net-cilium.yml diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-net-flannel.yml b/inventory/sample/group_vars/k8s_cluster/k8s-net-flannel.yml similarity index 100% rename from inventory/sample/group_vars/k8s-cluster/k8s-net-flannel.yml rename to inventory/sample/group_vars/k8s_cluster/k8s-net-flannel.yml diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-net-kube-router.yml b/inventory/sample/group_vars/k8s_cluster/k8s-net-kube-router.yml similarity index 100% rename from inventory/sample/group_vars/k8s-cluster/k8s-net-kube-router.yml rename to inventory/sample/group_vars/k8s_cluster/k8s-net-kube-router.yml diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-net-macvlan.yml b/inventory/sample/group_vars/k8s_cluster/k8s-net-macvlan.yml similarity index 100% rename from inventory/sample/group_vars/k8s-cluster/k8s-net-macvlan.yml rename to inventory/sample/group_vars/k8s_cluster/k8s-net-macvlan.yml diff --git a/inventory/sample/group_vars/k8s-cluster/k8s-net-weave.yml b/inventory/sample/group_vars/k8s_cluster/k8s-net-weave.yml similarity index 100% rename from inventory/sample/group_vars/k8s-cluster/k8s-net-weave.yml rename to inventory/sample/group_vars/k8s_cluster/k8s-net-weave.yml diff --git a/inventory/sample/inventory.ini b/inventory/sample/inventory.ini index b450bc068..99a630922 100644 --- a/inventory/sample/inventory.ini +++ b/inventory/sample/inventory.ini @@ -23,16 +23,16 @@ # node2 # node3 -[kube-node] +[kube_node] # node2 # node3 # node4 # node5 # node6 -[calico-rr] +[calico_rr] -[k8s-cluster:children] +[k8s_cluster:children] kube_control_plane -kube-node -calico-rr +kube_node +calico_rr diff --git a/legacy_groups.yml b/legacy_groups.yml new file mode 100644 index 000000000..85e6f9ccb --- /dev/null +++ b/legacy_groups.yml @@ -0,0 +1,42 @@ +--- +# This is an inventory compatibility playbook to ensure we keep compatibility with old style group names + +- name: Add kube-master nodes to kube_control_plane + hosts: kube-master + gather_facts: false + tasks: + - name: add nodes to kube_control_plane group + group_by: + key: 'kube_control_plane' + +- name: Add kube-node nodes to kube_node + hosts: kube-node + gather_facts: false + tasks: + - name: add nodes to kube_node group + group_by: + key: 'kube_node' + +- name: Add k8s-cluster nodes to k8s_cluster + hosts: k8s-cluster + gather_facts: false + tasks: + - name: add nodes to k8s_cluster group + group_by: + key: 'k8s_cluster' + +- name: Add calico-rr nodes to calico_rr + hosts: calico-rr + gather_facts: false + tasks: + - name: add nodes to calico_rr group + group_by: + key: 'calico_rr' + +- name: Add no-floating nodes to no_floating + hosts: no-floating + gather_facts: false + tasks: + - name: add nodes to no-floating group + group_by: + key: 'no_floating' diff --git a/recover-control-plane.yml b/recover-control-plane.yml index c2b28d093..03d573d3b 100644 --- a/recover-control-plane.yml +++ b/recover-control-plane.yml @@ -2,14 +2,8 @@ - name: Check ansible version import_playbook: ansible_version.yml -- name: Add kube-master nodes to kube_control_plane - # This is for old inventory which contains kube-master instead of kube_control_plane - hosts: kube-master - gather_facts: false - tasks: - - name: add nodes to kube_control_plane group - group_by: - key: 'kube_control_plane' +- name: Ensure compatibility with old groups + import_playbook: legacy_groups.yml - hosts: bastion[0] gather_facts: False diff --git a/remove-node.yml b/remove-node.yml index 27c886035..ddf56614e 100644 --- a/remove-node.yml +++ b/remove-node.yml @@ -2,16 +2,10 @@ - name: Check ansible version import_playbook: ansible_version.yml -- name: Add kube-master nodes to kube_control_plane - # This is for old inventory which contains kube-master instead of kube_control_plane - hosts: kube-master - gather_facts: false - tasks: - - name: add nodes to kube_control_plane group - group_by: - key: 'kube_control_plane' +- name: Ensure compatibility with old groups + import_playbook: legacy_groups.yml -- hosts: "{{ node | default('etcd:k8s-cluster:calico-rr') }}" +- hosts: "{{ node | default('etcd:k8s_cluster:calico_rr') }}" gather_facts: no environment: "{{ proxy_disable_env }}" vars_prompt: @@ -34,7 +28,7 @@ - { role: bootstrap-os, tags: bootstrap-os } - { role: remove-node/pre-remove, tags: pre-remove } -- hosts: "{{ node | default('kube-node') }}" +- hosts: "{{ node | default('kube_node') }}" gather_facts: no environment: "{{ proxy_disable_env }}" roles: diff --git a/reset.yml b/reset.yml index 81f2389d4..80d8f158c 100644 --- a/reset.yml +++ b/reset.yml @@ -2,14 +2,8 @@ - name: Check ansible version import_playbook: ansible_version.yml -- name: Add kube-master nodes to kube_control_plane - # This is for old inventory which contains kube-master instead of kube_control_plane - hosts: kube-master - gather_facts: false - tasks: - - name: add nodes to kube_control_plane group - group_by: - key: 'kube_control_plane' +- name: Ensure compatibility with old groups + import_playbook: legacy_groups.yml - hosts: bastion[0] gather_facts: False @@ -21,7 +15,7 @@ - name: Gather facts import_playbook: facts.yml -- hosts: etcd:k8s-cluster:calico-rr +- hosts: etcd:k8s_cluster:calico_rr gather_facts: False vars_prompt: name: "reset_confirmation" diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 7d50050c8..066ee3e4a 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -548,7 +548,7 @@ downloads: tag: "{{ netcheck_server_image_tag }}" sha256: "{{ netcheck_server_digest_checksum|default(None) }}" groups: - - k8s-cluster + - k8s_cluster netcheck_agent: enabled: "{{ deploy_netchecker }}" @@ -557,7 +557,7 @@ downloads: tag: "{{ netcheck_agent_image_tag }}" sha256: "{{ netcheck_agent_digest_checksum|default(None) }}" groups: - - k8s-cluster + - k8s_cluster etcd: container: "{{ etcd_deployment_type != 'host' }}" @@ -588,7 +588,7 @@ downloads: owner: "root" mode: "0755" groups: - - k8s-cluster + - k8s_cluster kubeadm: enabled: true @@ -601,7 +601,7 @@ downloads: owner: "root" mode: "0755" groups: - - k8s-cluster + - k8s_cluster kubelet: enabled: true @@ -614,7 +614,7 @@ downloads: owner: "root" mode: "0755" groups: - - k8s-cluster + - k8s_cluster kubectl: enabled: true @@ -640,7 +640,7 @@ downloads: owner: "root" mode: "0755" groups: - - k8s-cluster + - k8s_cluster crun: file: true @@ -653,7 +653,7 @@ downloads: owner: "root" mode: "0755" groups: - - k8s-cluster + - k8s_cluster kata_containers: enabled: "{{ kata_containers_enabled }}" @@ -666,7 +666,7 @@ downloads: owner: "root" mode: "0755" groups: - - k8s-cluster + - k8s_cluster nerdctl: file: true @@ -679,7 +679,7 @@ downloads: owner: "root" mode: "0755" groups: - - k8s-cluster + - k8s_cluster cilium: enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}" @@ -688,7 +688,7 @@ downloads: tag: "{{ cilium_image_tag }}" sha256: "{{ cilium_digest_checksum|default(None) }}" groups: - - k8s-cluster + - k8s_cluster cilium_init: enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}" @@ -697,7 +697,7 @@ downloads: tag: "{{ cilium_init_image_tag }}" sha256: "{{ cilium_init_digest_checksum|default(None) }}" groups: - - k8s-cluster + - k8s_cluster cilium_operator: enabled: "{{ kube_network_plugin == 'cilium' or cilium_deploy_additionally | default(false) | bool }}" @@ -706,7 +706,7 @@ downloads: tag: "{{ cilium_operator_image_tag }}" sha256: "{{ cilium_operator_digest_checksum|default(None) }}" groups: - - k8s-cluster + - k8s_cluster multus: enabled: "{{ kube_network_plugin_multus }}" @@ -715,7 +715,7 @@ downloads: tag: "{{ multus_image_tag }}" sha256: "{{ multus_digest_checksum|default(None) }}" groups: - - k8s-cluster + - k8s_cluster flannel: enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}" @@ -724,7 +724,7 @@ downloads: tag: "{{ flannel_image_tag }}" sha256: "{{ flannel_digest_checksum|default(None) }}" groups: - - k8s-cluster + - k8s_cluster calicoctl: enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}" @@ -737,7 +737,7 @@ downloads: owner: "root" mode: "0755" groups: - - k8s-cluster + - k8s_cluster calico_node: enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}" @@ -746,7 +746,7 @@ downloads: tag: "{{ calico_node_image_tag }}" sha256: "{{ calico_node_digest_checksum|default(None) }}" groups: - - k8s-cluster + - k8s_cluster calico_cni: enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}" @@ -755,7 +755,7 @@ downloads: tag: "{{ calico_cni_image_tag }}" sha256: "{{ calico_cni_digest_checksum|default(None) }}" groups: - - k8s-cluster + - k8s_cluster calico_policy: enabled: "{{ enable_network_policy and kube_network_plugin in ['calico', 'canal'] }}" @@ -764,7 +764,7 @@ downloads: tag: "{{ calico_policy_image_tag }}" sha256: "{{ calico_policy_digest_checksum|default(None) }}" groups: - - k8s-cluster + - k8s_cluster calico_typha: enabled: "{{ typha_enabled }}" @@ -773,7 +773,7 @@ downloads: tag: "{{ calico_typha_image_tag }}" sha256: "{{ calico_typha_digest_checksum|default(None) }}" groups: - - k8s-cluster + - k8s_cluster calico_crds: file: true @@ -799,7 +799,7 @@ downloads: tag: "{{ weave_kube_image_tag }}" sha256: "{{ weave_kube_digest_checksum|default(None) }}" groups: - - k8s-cluster + - k8s_cluster weave_npc: enabled: "{{ kube_network_plugin == 'weave' }}" @@ -808,7 +808,7 @@ downloads: tag: "{{ weave_npc_image_tag }}" sha256: "{{ weave_npc_digest_checksum|default(None) }}" groups: - - k8s-cluster + - k8s_cluster ovn4nfv: enabled: "{{ kube_network_plugin == 'ovn4nfv' }}" @@ -817,7 +817,7 @@ downloads: tag: "{{ ovn4nfv_k8s_plugin_image_tag }}" sha256: "{{ ovn4nfv_k8s_plugin_digest_checksum|default(None) }}" groups: - - k8s-cluster + - k8s_cluster kube_ovn: enabled: "{{ kube_network_plugin == 'kube-ovn' }}" @@ -826,7 +826,7 @@ downloads: tag: "{{ kube_ovn_container_image_tag }}" sha256: "{{ kube_ovn_digest_checksum|default(None) }}" groups: - - k8s-cluster + - k8s_cluster kube_router: enabled: "{{ kube_network_plugin == 'kube-router' }}" @@ -835,7 +835,7 @@ downloads: tag: "{{ kube_router_image_tag }}" sha256: "{{ kube_router_digest_checksum|default(None) }}" groups: - - k8s-cluster + - k8s_cluster pod_infra: enabled: true @@ -844,7 +844,7 @@ downloads: tag: "{{ pod_infra_image_tag }}" sha256: "{{ pod_infra_digest_checksum|default(None) }}" groups: - - k8s-cluster + - k8s_cluster install_socat: enabled: "{{ ansible_os_family in ['Flatcar Container Linux by Kinvolk'] }}" @@ -853,7 +853,7 @@ downloads: tag: "{{ install_socat_image_tag }}" sha256: "{{ install_socat_digest_checksum|default(None) }}" groups: - - k8s-cluster + - k8s_cluster nginx: enabled: "{{ loadbalancer_apiserver_localhost and loadbalancer_apiserver_type == 'nginx' }}" @@ -862,7 +862,7 @@ downloads: tag: "{{ nginx_image_tag }}" sha256: "{{ nginx_digest_checksum|default(None) }}" groups: - - kube-node + - kube_node haproxy: enabled: "{{ loadbalancer_apiserver_localhost and loadbalancer_apiserver_type == 'haproxy' }}" @@ -871,7 +871,7 @@ downloads: tag: "{{ haproxy_image_tag }}" sha256: "{{ haproxy_digest_checksum|default(None) }}" groups: - - kube-node + - kube_node coredns: enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}" @@ -889,7 +889,7 @@ downloads: tag: "{{ nodelocaldns_image_tag }}" sha256: "{{ nodelocaldns_digest_checksum|default(None) }}" groups: - - k8s-cluster + - k8s_cluster dnsautoscaler: enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}" @@ -927,7 +927,7 @@ downloads: tag: "{{ registry_image_tag }}" sha256: "{{ registry_digest_checksum|default(None) }}" groups: - - kube-node + - kube_node registry_proxy: enabled: "{{ registry_enabled }}" @@ -936,7 +936,7 @@ downloads: tag: "{{ registry_proxy_image_tag }}" sha256: "{{ registry_proxy_digest_checksum|default(None) }}" groups: - - kube-node + - kube_node metrics_server: enabled: "{{ metrics_server_enabled }}" @@ -964,7 +964,7 @@ downloads: tag: "{{ local_volume_provisioner_image_tag }}" sha256: "{{ local_volume_provisioner_digest_checksum|default(None) }}" groups: - - kube-node + - kube_node cephfs_provisioner: enabled: "{{ cephfs_provisioner_enabled }}" @@ -973,7 +973,7 @@ downloads: tag: "{{ cephfs_provisioner_image_tag }}" sha256: "{{ cephfs_provisioner_digest_checksum|default(None) }}" groups: - - kube-node + - kube_node rbd_provisioner: enabled: "{{ rbd_provisioner_enabled }}" @@ -982,7 +982,7 @@ downloads: tag: "{{ rbd_provisioner_image_tag }}" sha256: "{{ rbd_provisioner_digest_checksum|default(None) }}" groups: - - kube-node + - kube_node local_path_provisioner: enabled: "{{ local_path_provisioner_enabled }}" @@ -991,7 +991,7 @@ downloads: tag: "{{ local_path_provisioner_image_tag }}" sha256: "{{ local_path_provisioner_digest_checksum|default(None) }}" groups: - - kube-node + - kube_node ingress_nginx_controller: enabled: "{{ ingress_nginx_enabled }}" @@ -1000,7 +1000,7 @@ downloads: tag: "{{ ingress_nginx_controller_image_tag }}" sha256: "{{ ingress_nginx_controller_digest_checksum|default(None) }}" groups: - - kube-node + - kube_node ingress_ambassador_controller: enabled: "{{ ingress_ambassador_enabled }}" @@ -1009,7 +1009,7 @@ downloads: tag: "{{ ingress_ambassador_image_tag }}" sha256: "{{ ingress_ambassador_digest_checksum|default(None) }}" groups: - - kube-node + - kube_node ingress_alb_controller: enabled: "{{ ingress_alb_enabled }}" @@ -1018,7 +1018,7 @@ downloads: tag: "{{ alb_ingress_image_tag }}" sha256: "{{ ingress_alb_controller_digest_checksum|default(None) }}" groups: - - kube-node + - kube_node cert_manager_controller: enabled: "{{ cert_manager_enabled }}" @@ -1027,7 +1027,7 @@ downloads: tag: "{{ cert_manager_controller_image_tag }}" sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}" groups: - - kube-node + - kube_node cert_manager_cainjector: enabled: "{{ cert_manager_enabled }}" @@ -1036,7 +1036,7 @@ downloads: tag: "{{ cert_manager_cainjector_image_tag }}" sha256: "{{ cert_manager_cainjector_digest_checksum|default(None) }}" groups: - - kube-node + - kube_node cert_manager_webhook: enabled: "{{ cert_manager_enabled }}" @@ -1045,7 +1045,7 @@ downloads: tag: "{{ cert_manager_webhook_image_tag }}" sha256: "{{ cert_manager_webhook_digest_checksum|default(None) }}" groups: - - kube-node + - kube_node csi_attacher: enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}" @@ -1054,7 +1054,7 @@ downloads: tag: "{{ csi_attacher_image_tag }}" sha256: "{{ csi_attacher_digest_checksum|default(None) }}" groups: - - kube-node + - kube_node csi_provisioner: enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}" @@ -1063,7 +1063,7 @@ downloads: tag: "{{ csi_provisioner_image_tag }}" sha256: "{{ csi_provisioner_digest_checksum|default(None) }}" groups: - - kube-node + - kube_node csi_snapshotter: enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}" @@ -1072,7 +1072,7 @@ downloads: tag: "{{ csi_snapshotter_image_tag }}" sha256: "{{ csi_snapshotter_digest_checksum|default(None) }}" groups: - - kube-node + - kube_node snapshot_controller: enabled: "{{ cinder_csi_enabled }}" @@ -1081,7 +1081,7 @@ downloads: tag: "{{ snapshot_controller_image_tag }}" sha256: "{{ snapshot_controller_digest_checksum|default(None) }}" groups: - - kube-node + - kube_node csi_resizer: enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}" @@ -1090,7 +1090,7 @@ downloads: tag: "{{ csi_resizer_image_tag }}" sha256: "{{ csi_resizer_digest_checksum|default(None) }}" groups: - - kube-node + - kube_node csi_node_driver_registrar: enabled: "{{ cinder_csi_enabled or aws_ebs_csi_enabled }}" @@ -1099,7 +1099,7 @@ downloads: tag: "{{ csi_node_driver_registrar_image_tag }}" sha256: "{{ csi_node_driver_registrar_digest_checksum|default(None) }}" groups: - - kube-node + - kube_node cinder_csi_plugin: enabled: "{{ cinder_csi_enabled }}" @@ -1108,7 +1108,7 @@ downloads: tag: "{{ cinder_csi_plugin_image_tag }}" sha256: "{{ cinder_csi_plugin_digest_checksum|default(None) }}" groups: - - kube-node + - kube_node aws_ebs_csi_plugin: enabled: "{{ aws_ebs_csi_enabled }}" @@ -1117,7 +1117,7 @@ downloads: tag: "{{ aws_ebs_csi_plugin_image_tag }}" sha256: "{{ aws_ebs_csi_plugin_digest_checksum|default(None) }}" groups: - - kube-node + - kube_node dashboard: enabled: "{{ dashboard_enabled }}" diff --git a/roles/download/tasks/prep_kubeadm_images.yml b/roles/download/tasks/prep_kubeadm_images.yml index fa829e8f0..c520a9416 100644 --- a/roles/download/tasks/prep_kubeadm_images.yml +++ b/roles/download/tasks/prep_kubeadm_images.yml @@ -55,7 +55,7 @@ container: true repo: "{{ item | regex_replace('^(.*):.*$','\\1') }}" tag: "{{ item | regex_replace('^.*:(.*)$','\\1') }}" - groups: k8s-cluster + groups: k8s_cluster loop: "{{ kubeadm_images_list | flatten(levels=1) }}" register: kubeadm_images_cooked run_once: true diff --git a/roles/etcd/defaults/main.yml b/roles/etcd/defaults/main.yml index 6415a5618..c11758e95 100644 --- a/roles/etcd/defaults/main.yml +++ b/roles/etcd/defaults/main.yml @@ -55,7 +55,7 @@ etcd_memory_limit: "{% if ansible_memtotal_mb < 4096 %}512M{% else %}0{% endif % etcd_blkio_weight: 1000 -etcd_node_cert_hosts: "{{ groups['k8s-cluster'] | union(groups.get('calico-rr', [])) }}" +etcd_node_cert_hosts: "{{ groups['k8s_cluster'] | union(groups.get('calico_rr', [])) }}" etcd_compaction_retention: "8" diff --git a/roles/etcd/tasks/check_certs.yml b/roles/etcd/tasks/check_certs.yml index 611026b48..ed0580b55 100644 --- a/roles/etcd/tasks/check_certs.yml +++ b/roles/etcd/tasks/check_certs.yml @@ -33,8 +33,8 @@ stat: path: "{{ etcd_cert_dir }}/{{ item }}" register: etcd_node_certs - when: (('calico-rr' in groups and inventory_hostname in groups['calico-rr']) or - inventory_hostname in groups['k8s-cluster']) + when: (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or + inventory_hostname in groups['k8s_cluster']) with_items: - ca.pem - node-{{ inventory_hostname }}.pem @@ -56,7 +56,7 @@ '{{ etcd_cert_dir }}/member-{{ host }}.pem', '{{ etcd_cert_dir }}/member-{{ host }}-key.pem', {% endfor %} - {% set k8s_nodes = groups['k8s-cluster']|union(groups['calico-rr']|default([]))|unique|sort %} + {% set k8s_nodes = groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort %} {% for host in k8s_nodes %} '{{ etcd_cert_dir }}/node-{{ host }}.pem', '{{ etcd_cert_dir }}/node-{{ host }}-key.pem' @@ -89,7 +89,7 @@ set_fact: gen_node_certs: |- { - {% set k8s_nodes = groups['k8s-cluster']|union(groups['calico-rr']|default([]))|unique|sort -%} + {% set k8s_nodes = groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort -%} {% set existing_certs = etcdcert_master.files|map(attribute='path')|list|sort %} {% for host in k8s_nodes -%} {% set host_cert = "%s/node-%s.pem"|format(etcd_cert_dir, host) %} @@ -125,8 +125,8 @@ set_fact: kubernetes_host_requires_sync: true when: - - (('calico-rr' in groups and inventory_hostname in groups['calico-rr']) or - inventory_hostname in groups['k8s-cluster']) and + - (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or + inventory_hostname in groups['k8s_cluster']) and inventory_hostname not in groups['etcd'] - (not etcd_node_certs.results[0].stat.exists|default(false)) or (not etcd_node_certs.results[1].stat.exists|default(false)) or diff --git a/roles/etcd/tasks/gen_certs_script.yml b/roles/etcd/tasks/gen_certs_script.yml index 893e61c19..1f438f986 100644 --- a/roles/etcd/tasks/gen_certs_script.yml +++ b/roles/etcd/tasks/gen_certs_script.yml @@ -59,7 +59,7 @@ {{ m }} {% endif %} {% endfor %}" - - HOSTS: "{% for h in (groups['k8s-cluster'] + groups['calico-rr']|default([]))|unique %} + - HOSTS: "{% for h in (groups['k8s_cluster'] + groups['calico_rr']|default([]))|unique %} {% if gen_node_certs[h] %} {{ h }} {% endif %} @@ -109,7 +109,7 @@ src: "{{ item }}" register: etcd_master_node_certs with_items: - - "[{% for node in (groups['k8s-cluster'] + groups['calico-rr']|default([]))|unique %} + - "[{% for node in (groups['k8s_cluster'] + groups['calico_rr']|default([]))|unique %} '{{ etcd_cert_dir }}/node-{{ node }}.pem', '{{ etcd_cert_dir }}/node-{{ node }}-key.pem', {% endfor %}]" @@ -144,8 +144,8 @@ - name: "Check_certs | Set 'sync_certs' to true on nodes" set_fact: sync_certs: true - when: (('calico-rr' in groups and inventory_hostname in groups['calico-rr']) or - inventory_hostname in groups['k8s-cluster']) and + when: (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or + inventory_hostname in groups['k8s_cluster']) and inventory_hostname not in groups['etcd'] with_items: - "{{ my_etcd_node_certs }}" @@ -159,8 +159,8 @@ register: etcd_node_certs check_mode: no delegate_to: "{{ groups['etcd'][0] }}" - when: (('calico-rr' in groups and inventory_hostname in groups['calico-rr']) or - inventory_hostname in groups['k8s-cluster']) and + when: (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or + inventory_hostname in groups['k8s_cluster']) and sync_certs|default(false) and inventory_hostname not in groups['etcd'] - name: Gen_certs | Copy certs on nodes @@ -170,8 +170,8 @@ no_log: true changed_when: false check_mode: no - when: (('calico-rr' in groups and inventory_hostname in groups['calico-rr']) or - inventory_hostname in groups['k8s-cluster']) and + when: (('calico_rr' in groups and inventory_hostname in groups['calico_rr']) or + inventory_hostname in groups['k8s_cluster']) and sync_certs|default(false) and inventory_hostname not in groups['etcd'] notify: set etcd_secret_changed diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml index 966c555d5..98890e238 100644 --- a/roles/etcd/tasks/main.yml +++ b/roles/etcd/tasks/main.yml @@ -20,7 +20,7 @@ register: "etcd_client_cert_serial_result" changed_when: false when: - - inventory_hostname in groups['k8s-cluster']|union(groups['calico-rr']|default([]))|unique|sort + - inventory_hostname in groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort tags: - master - network @@ -29,7 +29,7 @@ set_fact: etcd_client_cert_serial: "{{ etcd_client_cert_serial_result.stdout.split('=')[1] }}" when: - - inventory_hostname in groups['k8s-cluster']|union(groups['calico-rr']|default([]))|unique|sort + - inventory_hostname in groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort tags: - master - network diff --git a/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml b/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml index 47ba0a1d0..a62a9db39 100644 --- a/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml +++ b/roles/kubernetes-apps/csi_driver/cinder/tasks/main.yml @@ -5,11 +5,11 @@ - name: Cinder CSI Driver | Write cacert file include_tasks: cinder-write-cacert.yml run_once: true - loop: "{{ groups['k8s-cluster'] }}" + loop: "{{ groups['k8s_cluster'] }}" loop_control: loop_var: delegate_host_to_write_cacert when: - - inventory_hostname in groups['k8s-cluster'] + - inventory_hostname in groups['k8s_cluster'] - cinder_cacert is defined - cinder_cacert | length > 0 tags: cinder-csi-driver diff --git a/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml b/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml index 7c89fdbdf..dd3528094 100644 --- a/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml +++ b/roles/kubernetes-apps/external_cloud_controller/openstack/tasks/main.yml @@ -5,11 +5,11 @@ - name: External OpenStack Cloud Controller | Write cacert file include_tasks: openstack-write-cacert.yml run_once: true - loop: "{{ groups['k8s-cluster'] }}" + loop: "{{ groups['k8s_cluster'] }}" loop_control: loop_var: delegate_host_to_write_cacert when: - - inventory_hostname in groups['k8s-cluster'] + - inventory_hostname in groups['k8s_cluster'] - external_openstack_cacert is defined - external_openstack_cacert | length > 0 tags: external-openstack diff --git a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml index 88a178825..404aee389 100644 --- a/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml +++ b/roles/kubernetes-apps/external_provisioner/local_volume_provisioner/tasks/main.yml @@ -4,7 +4,7 @@ include_tasks: basedirs.yml loop_control: loop_var: delegate_host_base_dir - loop: "{{ groups['k8s-cluster'] | product(local_volume_provisioner_storage_classes.keys()) | list }}" + loop: "{{ groups['k8s_cluster'] | product(local_volume_provisioner_storage_classes.keys()) | list }}" - name: Local Volume Provisioner | Create addon dir file: diff --git a/roles/kubernetes-apps/ingress_controller/cert_manager/README.md b/roles/kubernetes-apps/ingress_controller/cert_manager/README.md index 47969d5f4..4d7c957c6 100644 --- a/roles/kubernetes-apps/ingress_controller/cert_manager/README.md +++ b/roles/kubernetes-apps/ingress_controller/cert_manager/README.md @@ -33,7 +33,7 @@ LS0tLS1CRUdJTiBSU0Eg... For further information, read the official [Cert-Manager CA Configuration](https://cert-manager.io/docs/configuration/ca/) doc. -Once the base64 encoded values have been added to `templates\secret-cert-manager.yml.j2`, cert-manager can now be enabled by editing your K8s cluster addons inventory e.g. `inventory\sample\group_vars\k8s-cluster\addons.yml` and setting `cert_manager_enabled` to true. +Once the base64 encoded values have been added to `templates\secret-cert-manager.yml.j2`, cert-manager can now be enabled by editing your K8s cluster addons inventory e.g. `inventory\sample\group_vars\k8s_cluster\addons.yml` and setting `cert_manager_enabled` to true. ```ini # Cert manager deployment @@ -46,7 +46,7 @@ If you don't have a TLS Root CA certificate and key available, you can create th A common use-case for cert-manager is requesting TLS signed certificates to secure your ingress resources. This can be done by simply adding annotations to your Ingress resources and cert-manager will facilitate creating the Certificate resource for you. A small sub-component of cert-manager, ingress-shim, is responsible for this. -To enable the Nginx Ingress controller as part of your Kubespray deployment, simply edit your K8s cluster addons inventory e.g. `inventory\sample\group_vars\k8s-cluster\addons.yml` and set `ingress_nginx_enabled` to true. +To enable the Nginx Ingress controller as part of your Kubespray deployment, simply edit your K8s cluster addons inventory e.g. `inventory\sample\group_vars\k8s_cluster\addons.yml` and set `ingress_nginx_enabled` to true. ```ini # Nginx ingress controller deployment diff --git a/roles/kubernetes-apps/metallb/README.md b/roles/kubernetes-apps/metallb/README.md index a898d096c..1456a6e8a 100644 --- a/roles/kubernetes-apps/metallb/README.md +++ b/roles/kubernetes-apps/metallb/README.md @@ -11,7 +11,7 @@ It deploys MetalLB into Kubernetes and sets up a layer 2 or BGP load-balancer. In the default, MetalLB is not deployed into your Kubernetes cluster. You can override the defaults by copying the contents of roles/kubernetes-apps/metallb/defaults/main.yml -to somewhere in inventory/mycluster/group_vars such as inventory/mycluster/groups_vars/k8s-cluster/addons.yml +to somewhere in inventory/mycluster/group_vars such as inventory/mycluster/groups_vars/k8s_cluster/addons.yml and updating metallb_enabled option to `true`. In addition you need to update metallb_ip_range option on the addons.yml at least for suiting your network environment, because MetalLB allocates external IP addresses from this metallb_ip_range option. diff --git a/roles/kubernetes-apps/metrics_server/tasks/main.yml b/roles/kubernetes-apps/metrics_server/tasks/main.yml index c3be4b830..fdc9fc1e9 100644 --- a/roles/kubernetes-apps/metrics_server/tasks/main.yml +++ b/roles/kubernetes-apps/metrics_server/tasks/main.yml @@ -2,7 +2,7 @@ # If all masters have node role, there are no tainted master and toleration should not be specified. - name: Check all masters are node or not set_fact: - masters_are_not_tainted: "{{ groups['kube-node'] | intersect(groups['kube_control_plane']) == groups['kube_control_plane'] }}" + masters_are_not_tainted: "{{ groups['kube_node'] | intersect(groups['kube_control_plane']) == groups['kube_control_plane'] }}" - name: Metrics Server | Delete addon dir file: diff --git a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml index 2c60fa7ee..b362a2a49 100644 --- a/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml +++ b/roles/kubernetes/control-plane/tasks/kubeadm-setup.yml @@ -192,5 +192,5 @@ with_items: - "node-role.kubernetes.io/master:NoSchedule-" - "node-role.kubernetes.io/control-plane:NoSchedule-" - when: inventory_hostname in groups['kube-node'] + when: inventory_hostname in groups['kube_node'] failed_when: false diff --git a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2 b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2 index 2be6e860c..11c3e714b 100644 --- a/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2 +++ b/roles/kubernetes/control-plane/templates/kubeadm-config.v1beta2.yaml.j2 @@ -16,7 +16,7 @@ nodeRegistration: {% if kube_override_hostname|default('') %} name: {{ kube_override_hostname }} {% endif %} -{% if inventory_hostname in groups['kube_control_plane'] and inventory_hostname not in groups['kube-node'] %} +{% if inventory_hostname in groups['kube_control_plane'] and inventory_hostname not in groups['kube_node'] %} taints: - effect: NoSchedule key: node-role.kubernetes.io/master diff --git a/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml b/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml index 787613e60..c87b840c1 100644 --- a/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml +++ b/roles/kubernetes/kubeadm/tasks/kubeadm_etcd_node.yml @@ -50,7 +50,7 @@ register: "etcd_client_cert_serial_result" changed_when: false when: - - inventory_hostname in groups['k8s-cluster']|union(groups['calico-rr']|default([]))|unique|sort + - inventory_hostname in groups['k8s_cluster']|union(groups['calico_rr']|default([]))|unique|sort tags: - network diff --git a/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1beta2.j2 b/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1beta2.j2 index c92569ec1..143a731ed 100644 --- a/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1beta2.j2 +++ b/roles/kubernetes/kubeadm/templates/kubeadm-client.conf.v1beta2.j2 @@ -21,7 +21,7 @@ caCertPath: {{ kube_cert_dir }}/ca.crt nodeRegistration: name: {{ kube_override_hostname }} criSocket: {{ cri_socket }} -{% if 'calico-rr' in group_names and 'kube-node' not in group_names %} +{% if 'calico_rr' in group_names and 'kube_node' not in group_names %} taints: - effect: NoSchedule key: node-role.kubernetes.io/calico-rr diff --git a/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2 b/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2 index 868d1bc93..c11af1184 100644 --- a/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2 +++ b/roles/kubernetes/node/templates/kubelet-config.v1beta1.yaml.j2 @@ -81,7 +81,7 @@ resolvConf: "{{ kube_resolv_conf }}" {% if kubelet_config_extra_args %} {{ kubelet_config_extra_args | to_nice_yaml(indent=2) }} {% endif %} -{% if inventory_hostname in groups['kube-node'] and kubelet_node_config_extra_args %} +{% if inventory_hostname in groups['kube_node'] and kubelet_node_config_extra_args %} {{ kubelet_node_config_extra_args | to_nice_yaml(indent=2) }} {% endif %} {% if tls_min_version is defined %} diff --git a/roles/kubernetes/node/templates/kubelet.env.v1beta1.j2 b/roles/kubernetes/node/templates/kubelet.env.v1beta1.j2 index 68c04fd36..3ca1ffdd7 100644 --- a/roles/kubernetes/node/templates/kubelet.env.v1beta1.j2 +++ b/roles/kubernetes/node/templates/kubelet.env.v1beta1.j2 @@ -34,7 +34,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}" {% endif %} {% endif %} -KUBELET_ARGS="{{ kubelet_args_base }} {% if node_taints|default([]) %}--register-with-taints={{ node_taints | join(',') }} {% endif %} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}{% if inventory_hostname in groups['kube-node'] %}{% if kubelet_node_custom_flags is string %} {{kubelet_node_custom_flags}} {% else %}{% for flag in kubelet_node_custom_flags %} {{flag}} {% endfor %}{% endif %}{% endif %}" +KUBELET_ARGS="{{ kubelet_args_base }} {% if node_taints|default([]) %}--register-with-taints={{ node_taints | join(',') }} {% endif %} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}{% if inventory_hostname in groups['kube_node'] %}{% if kubelet_node_custom_flags is string %} {{kubelet_node_custom_flags}} {% else %}{% for flag in kubelet_node_custom_flags %} {{flag}} {% endfor %}{% endif %}{% endif %}" {% if kubelet_flexvolumes_plugins_dir is defined %} KUBELET_VOLUME_PLUGIN="--volume-plugin-dir={{ kubelet_flexvolumes_plugins_dir }}" {% endif %} diff --git a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml index a232694e9..720fa55b8 100644 --- a/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml +++ b/roles/kubernetes/preinstall/tasks/0020-verify-settings.yml @@ -1,10 +1,10 @@ --- -- name: Stop if either kube_control_plane or kube-node group is empty +- name: Stop if either kube_control_plane or kube_node group is empty assert: that: "groups.get('{{ item }}')" with_items: - kube_control_plane - - kube-node + - kube_node run_once: true when: not ignore_assert_errors @@ -86,7 +86,7 @@ that: ansible_memtotal_mb >= minimal_node_memory_mb when: - not ignore_assert_errors - - inventory_hostname in groups['kube-node'] + - inventory_hostname in groups['kube_node'] # This assertion will fail on the safe side: One can indeed schedule more pods # on a node than the CIDR-range has space for when additional pods use the host @@ -99,7 +99,7 @@ msg: "Do not schedule more pods on a node than inet addresses are available." when: - not ignore_assert_errors - - inventory_hostname in groups['k8s-cluster'] + - inventory_hostname in groups['k8s_cluster'] - kube_network_node_prefix is defined - kube_network_plugin != 'calico' @@ -207,14 +207,14 @@ - inventory_hostname == groups['kube_control_plane'][0] run_once: yes -- name: "Check that calico_rr nodes are in k8s-cluster group" +- name: "Check that calico_rr nodes are in k8s_cluster group" assert: that: - - '"k8s-cluster" in group_names' - msg: "calico-rr must be a child group of k8s-cluster group" + - '"k8s_cluster" in group_names' + msg: "calico_rr must be a child group of k8s_cluster group" when: - kube_network_plugin == 'calico' - - '"calico-rr" in group_names' + - '"calico_rr" in group_names' - name: "Check that kube_service_addresses is a network range" assert: diff --git a/roles/kubernetes/preinstall/tasks/0050-create_directories.yml b/roles/kubernetes/preinstall/tasks/0050-create_directories.yml index 402040674..f184670ab 100644 --- a/roles/kubernetes/preinstall/tasks/0050-create_directories.yml +++ b/roles/kubernetes/preinstall/tasks/0050-create_directories.yml @@ -4,7 +4,7 @@ path: "{{ item }}" state: directory owner: kube - when: inventory_hostname in groups['k8s-cluster'] + when: inventory_hostname in groups['k8s_cluster'] become: true tags: - kubelet @@ -28,7 +28,7 @@ path: "{{ item }}" state: directory owner: root - when: inventory_hostname in groups['k8s-cluster'] + when: inventory_hostname in groups['k8s_cluster'] become: true tags: - kubelet @@ -51,7 +51,7 @@ get_mime: no register: kube_cert_compat_dir_check when: - - inventory_hostname in groups['k8s-cluster'] + - inventory_hostname in groups['k8s_cluster'] - kube_cert_dir != kube_cert_compat_dir - name: Create kubernetes kubeadm compat cert dir (kubernetes/kubeadm issue 1498) @@ -60,7 +60,7 @@ dest: "{{ kube_cert_compat_dir }}" state: link when: - - inventory_hostname in groups['k8s-cluster'] + - inventory_hostname in groups['k8s_cluster'] - kube_cert_dir != kube_cert_compat_dir - not kube_cert_compat_dir_check.stat.exists @@ -75,7 +75,7 @@ - "/var/lib/calico" when: - kube_network_plugin in ["calico", "weave", "canal", "flannel", "cilium", "kube-ovn", "ovn4nfv", "kube-router", "macvlan"] - - inventory_hostname in groups['k8s-cluster'] + - inventory_hostname in groups['k8s_cluster'] tags: - network - cilium @@ -96,7 +96,7 @@ mode: "{{ local_volume_provisioner_directory_mode }}" with_items: "{{ local_volume_provisioner_storage_classes.keys() | list }}" when: - - inventory_hostname in groups['k8s-cluster'] + - inventory_hostname in groups['k8s_cluster'] - local_volume_provisioner_enabled tags: - persistent_volumes diff --git a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml index b011fd57b..95bc711dc 100644 --- a/roles/kubernetes/preinstall/tasks/0090-etchosts.yml +++ b/roles/kubernetes/preinstall/tasks/0090-etchosts.yml @@ -2,7 +2,7 @@ - name: Hosts | create list from inventory set_fact: etc_hosts_inventory_block: |- - {% for item in (groups['k8s-cluster'] + groups['etcd']|default([]) + groups['calico-rr']|default([]))|unique -%} + {% for item in (groups['k8s_cluster'] + groups['etcd']|default([]) + groups['calico_rr']|default([]))|unique -%} {% if 'access_ip' in hostvars[item] or 'ip' in hostvars[item] or 'ansible_default_ipv4' in hostvars[item] -%} {{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }} {%- if ('ansible_hostname' in hostvars[item] and item != hostvars[item]['ansible_hostname']) %} {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }} {{ hostvars[item]['ansible_hostname'] }}{% endif %} {{ item }}.{{ dns_domain }} {{ item }} diff --git a/roles/kubernetes/tokens/tasks/gen_tokens.yml b/roles/kubernetes/tokens/tasks/gen_tokens.yml index 40d4910d2..aa1cf214a 100644 --- a/roles/kubernetes/tokens/tasks/gen_tokens.yml +++ b/roles/kubernetes/tokens/tasks/gen_tokens.yml @@ -27,7 +27,7 @@ TOKEN_DIR: "{{ kube_token_dir }}" with_nested: - [ 'system:kubelet' ] - - "{{ groups['kube-node'] }}" + - "{{ groups['kube_node'] }}" register: gentoken_node changed_when: "'Added' in gentoken_node.stdout" run_once: yes diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index 6ca0b9844..edafbe567 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -147,8 +147,8 @@ kube_log_level: 2 kube_network_plugin: calico kube_network_plugin_multus: false -# Determines if calico-rr group exists -peer_with_calico_rr: "{{ 'calico-rr' in groups and groups['calico-rr']|length > 0 }}" +# Determines if calico_rr group exists +peer_with_calico_rr: "{{ 'calico_rr' in groups and groups['calico_rr']|length > 0 }}" # Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) calico_datastore: "kdd" diff --git a/roles/kubespray-defaults/tasks/fallback_ips.yml b/roles/kubespray-defaults/tasks/fallback_ips.yml index 291bd3fcc..acca31c0c 100644 --- a/roles/kubespray-defaults/tasks/fallback_ips.yml +++ b/roles/kubespray-defaults/tasks/fallback_ips.yml @@ -7,7 +7,7 @@ tags: always include_tasks: fallback_ips_gather.yml when: hostvars[delegate_host_to_gather_facts].ansible_default_ipv4 is not defined - loop: "{{ groups['k8s-cluster']|default([]) + groups['etcd']|default([]) + groups['calico-rr']|default([]) }}" + loop: "{{ groups['k8s_cluster']|default([]) + groups['etcd']|default([]) + groups['calico_rr']|default([]) }}" loop_control: loop_var: delegate_host_to_gather_facts run_once: yes @@ -16,7 +16,7 @@ set_fact: fallback_ips_base: | --- - {% for item in (groups['k8s-cluster']|default([]) + groups['etcd']|default([]) + groups['calico-rr']|default([]))|unique %} + {% for item in (groups['k8s_cluster']|default([]) + groups['etcd']|default([]) + groups['calico_rr']|default([]))|unique %} {% set found = hostvars[item].get('ansible_default_ipv4') %} {{ item }}: "{{ found.get('address', '127.0.0.1') }}" {% endfor %} diff --git a/roles/kubespray-defaults/tasks/no_proxy.yml b/roles/kubespray-defaults/tasks/no_proxy.yml index 984bb50a2..6e6a5c9bb 100644 --- a/roles/kubespray-defaults/tasks/no_proxy.yml +++ b/roles/kubespray-defaults/tasks/no_proxy.yml @@ -9,9 +9,9 @@ {%- if no_proxy_exclude_workers | default(false) -%} {% set cluster_or_master = 'kube_control_plane' %} {%- else -%} - {% set cluster_or_master = 'k8s-cluster' %} + {% set cluster_or_master = 'k8s_cluster' %} {%- endif -%} - {%- for item in (groups[cluster_or_master] + groups['etcd']|default([]) + groups['calico-rr']|default([]))|unique -%} + {%- for item in (groups[cluster_or_master] + groups['etcd']|default([]) + groups['calico_rr']|default([]))|unique -%} {{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }}, {%- if item != hostvars[item].get('ansible_hostname', '') -%} {{ hostvars[item]['ansible_hostname'] }}, diff --git a/roles/network_plugin/calico/tasks/install.yml b/roles/network_plugin/calico/tasks/install.yml index 3dabd56ca..fb202611f 100644 --- a/roles/network_plugin/calico/tasks/install.yml +++ b/roles/network_plugin/calico/tasks/install.yml @@ -193,7 +193,7 @@ nodeToNodeMeshEnabled: "false" when: - peer_with_router|default(false) or peer_with_calico_rr|default(false) - - inventory_hostname in groups['k8s-cluster'] + - inventory_hostname in groups['k8s_cluster'] run_once: yes - name: Calico | Set up BGP Configuration @@ -264,7 +264,7 @@ until: output.rc == 0 delay: "{{ retry_stagger | random + 3 }}" with_items: - - "{{ groups['calico-rr'] | default([]) }}" + - "{{ groups['calico_rr'] | default([]) }}" when: - inventory_hostname == groups['kube_control_plane'][0] - peer_with_calico_rr|default(false) @@ -290,7 +290,7 @@ until: output.rc == 0 delay: "{{ retry_stagger | random + 3 }}" with_items: - - "{{ groups['calico-rr'] | default([]) }}" + - "{{ groups['calico_rr'] | default([]) }}" when: - inventory_hostname == groups['kube_control_plane'][0] - peer_with_calico_rr|default(false) @@ -368,9 +368,9 @@ delay: "{{ retry_stagger | random + 3 }}" when: - peer_with_router|default(false) - - inventory_hostname in groups['k8s-cluster'] + - inventory_hostname in groups['k8s_cluster'] - local_as is defined - - groups['calico-rr'] | default([]) | length == 0 + - groups['calico_rr'] | default([]) | length == 0 - name: Calico | Configure peering with router(s) at node scope command: @@ -396,4 +396,4 @@ - "{{ peers|selectattr('scope','undefined')|list|default([]) | union(peers|selectattr('scope','defined')|selectattr('scope','equalto', 'node')|list|default([])) }}" when: - peer_with_router|default(false) - - inventory_hostname in groups['k8s-cluster'] + - inventory_hostname in groups['k8s_cluster'] diff --git a/roles/network_plugin/calico/templates/calico-config.yml.j2 b/roles/network_plugin/calico/templates/calico-config.yml.j2 index f13576ffc..b3645d2d6 100644 --- a/roles/network_plugin/calico/templates/calico-config.yml.j2 +++ b/roles/network_plugin/calico/templates/calico-config.yml.j2 @@ -22,6 +22,6 @@ data: cluster_type: "kubespray,bgp" calico_backend: "bird" {% endif %} -{% if inventory_hostname in groups['k8s-cluster'] and peer_with_router|default(false) %} +{% if inventory_hostname in groups['k8s_cluster'] and peer_with_router|default(false) %} as: "{{ local_as|default(global_as_num) }}" {% endif -%} diff --git a/roles/network_plugin/kube-router/tasks/annotate.yml b/roles/network_plugin/kube-router/tasks/annotate.yml index 6be517bc4..30190124d 100644 --- a/roles/network_plugin/kube-router/tasks/annotate.yml +++ b/roles/network_plugin/kube-router/tasks/annotate.yml @@ -6,16 +6,16 @@ delegate_to: "{{ groups['kube_control_plane'][0] }}" when: kube_router_annotations_master is defined and inventory_hostname in groups['kube_control_plane'] -- name: kube-router | Add annotations on kube-node +- name: kube-router | Add annotations on kube_node command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}" with_items: - "{{ kube_router_annotations_node }}" delegate_to: "{{ groups['kube_control_plane'][0] }}" - when: kube_router_annotations_node is defined and inventory_hostname in groups['kube-node'] + when: kube_router_annotations_node is defined and inventory_hostname in groups['kube_node'] - name: kube-router | Add common annotations on all servers command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}" with_items: - "{{ kube_router_annotations_all }}" delegate_to: "{{ groups['kube_control_plane'][0] }}" - when: kube_router_annotations_all is defined and inventory_hostname in groups['k8s-cluster'] + when: kube_router_annotations_all is defined and inventory_hostname in groups['k8s_cluster'] diff --git a/roles/network_plugin/macvlan/templates/centos-routes-macvlan.cfg.j2 b/roles/network_plugin/macvlan/templates/centos-routes-macvlan.cfg.j2 index 51b9ff51f..60400dd49 100644 --- a/roles/network_plugin/macvlan/templates/centos-routes-macvlan.cfg.j2 +++ b/roles/network_plugin/macvlan/templates/centos-routes-macvlan.cfg.j2 @@ -1,4 +1,4 @@ -{% for host in groups['kube-node'] %} +{% for host in groups['kube_node'] %} {% if hostvars[host]['access_ip'] is defined %} {% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %} {{ hostvars[host]['node_pod_cidr'] }} via {{ hostvars[host]['access_ip'] }} diff --git a/roles/network_plugin/macvlan/templates/coreos-network-macvlan.cfg.j2 b/roles/network_plugin/macvlan/templates/coreos-network-macvlan.cfg.j2 index ea96cb404..696eba501 100644 --- a/roles/network_plugin/macvlan/templates/coreos-network-macvlan.cfg.j2 +++ b/roles/network_plugin/macvlan/templates/coreos-network-macvlan.cfg.j2 @@ -4,7 +4,7 @@ Name=mac0 [Network] Address={{ node_pod_cidr|ipaddr('net')|ipaddr(1)|ipaddr('address') }}/{{ node_pod_cidr|ipaddr('prefix') }} -{% for host in groups['kube-node'] %} +{% for host in groups['kube_node'] %} {% if hostvars[host]['access_ip'] is defined %} {% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %} [Route] diff --git a/roles/network_plugin/macvlan/templates/debian-network-macvlan.cfg.j2 b/roles/network_plugin/macvlan/templates/debian-network-macvlan.cfg.j2 index 0f2cbc15f..9edd6d157 100644 --- a/roles/network_plugin/macvlan/templates/debian-network-macvlan.cfg.j2 +++ b/roles/network_plugin/macvlan/templates/debian-network-macvlan.cfg.j2 @@ -5,7 +5,7 @@ iface mac0 inet static netmask {{ node_pod_cidr|ipaddr('netmask') }} broadcast {{ node_pod_cidr|ipaddr('broadcast') }} pre-up ip link add link {{ macvlan_interface }} mac0 type macvlan mode bridge -{% for host in groups['kube-node'] %} +{% for host in groups['kube_node'] %} {% if hostvars[host]['access_ip'] is defined %} {% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %} post-up ip route add {{ hostvars[host]['node_pod_cidr'] }} via {{ hostvars[host]['access_ip'] }} @@ -15,7 +15,7 @@ iface mac0 inet static {% if enable_nat_default_gateway %} post-up iptables -t nat -I POSTROUTING -s {{ node_pod_cidr|ipaddr('net') }} -o {{ node_default_gateway_interface }} -j MASQUERADE {% endif %} -{% for host in groups['kube-node'] %} +{% for host in groups['kube_node'] %} {% if hostvars[host]['access_ip'] is defined %} {% if hostvars[host]['node_pod_cidr'] != node_pod_cidr %} post-down ip route del {{ hostvars[host]['node_pod_cidr'] }} via {{ hostvars[host]['access_ip'] }} diff --git a/roles/remove-node/pre-remove/tasks/main.yml b/roles/remove-node/pre-remove/tasks/main.yml index 541d1b441..2cafaeb7f 100644 --- a/roles/remove-node/pre-remove/tasks/main.yml +++ b/roles/remove-node/pre-remove/tasks/main.yml @@ -15,7 +15,7 @@ --grace-period {{ drain_grace_period }} --timeout {{ drain_timeout }} --delete-local-data {{ hostvars[item]['kube_override_hostname']|default(item) }} - loop: "{{ node.split(',') | default(groups['kube-node']) }}" + loop: "{{ node.split(',') | default(groups['kube_node']) }}" # ignore servers that are not nodes when: hostvars[item]['kube_override_hostname']|default(item) in nodes.stdout_lines register: result diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index d216cd5ce..735dd4c32 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -207,7 +207,7 @@ - name: Clear IPVS virtual server table command: "ipvsadm -C" when: - - kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s-cluster'] + - kube_proxy_mode == 'ipvs' and inventory_hostname in groups['k8s_cluster'] - name: reset | check kube-ipvs0 network device stat: diff --git a/scale.yml b/scale.yml index caecfef70..5e218791a 100644 --- a/scale.yml +++ b/scale.yml @@ -2,14 +2,8 @@ - name: Check ansible version import_playbook: ansible_version.yml -- name: Add kube-master nodes to kube_control_plane - # This is for old inventory which contains kube-master instead of kube_control_plane - hosts: kube-master - gather_facts: false - tasks: - - name: add nodes to kube_control_plane group - group_by: - key: 'kube_control_plane' +- name: Ensure compatibility with old groups + import_playbook: legacy_groups.yml - hosts: bastion[0] gather_facts: False @@ -19,7 +13,7 @@ - { role: bastion-ssh-config, tags: ["localhost", "bastion"] } - name: Bootstrap any new workers - hosts: kube-node + hosts: kube_node strategy: linear any_errors_fatal: "{{ any_errors_fatal | default(true) }}" gather_facts: false @@ -52,7 +46,7 @@ - { role: download, tags: download, when: "not skip_downloads and download_run_once and not download_localhost" } - name: Target only workers to get kubelet installed and checking in on any new nodes(engine) - hosts: kube-node + hosts: kube_node gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" environment: "{{ proxy_disable_env }}" @@ -64,7 +58,7 @@ - { role: etcd, tags: etcd, etcd_cluster_setup: false, when: "not etcd_kubeadm_enabled|default(false)" } - name: Target only workers to get kubelet installed and checking in on any new nodes(node) - hosts: kube-node + hosts: kube_node gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" environment: "{{ proxy_disable_env }}" @@ -95,7 +89,7 @@ when: kubeadm_certificate_key is not defined - name: Target only workers to get kubelet installed and checking in on any new nodes(network) - hosts: kube-node + hosts: kube_node gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" environment: "{{ proxy_disable_env }}" diff --git a/tests/cloud_playbooks/delete-aws.yml b/tests/cloud_playbooks/delete-aws.yml index b72caf0ee..02f9b06c7 100644 --- a/tests/cloud_playbooks/delete-aws.yml +++ b/tests/cloud_playbooks/delete-aws.yml @@ -1,5 +1,5 @@ --- -- hosts: kube-node +- hosts: kube_node become: False tasks: diff --git a/tests/cloud_playbooks/roles/packet-ci/templates/inventory.j2 b/tests/cloud_playbooks/roles/packet-ci/templates/inventory.j2 index 4158788c9..1ead107a0 100644 --- a/tests/cloud_playbooks/roles/packet-ci/templates/inventory.j2 +++ b/tests/cloud_playbooks/roles/packet-ci/templates/inventory.j2 @@ -11,9 +11,13 @@ instance-1 [kube-master] instance-1 +# TODO(cristicalin): Remove kube-node,k8s-cluster groups from this file after releasing v2.16. [kube-node] instance-2 +[kube_node] +instance-2 + [etcd] instance-3 {% elif mode is defined and mode in ["ha", "ha-scale"] %} @@ -28,6 +32,9 @@ instance-2 [kube-node] instance-3 +[kube_node] +instance-3 + [etcd] instance-1 instance-2 @@ -42,6 +49,9 @@ instance-1 [kube-node] instance-2 +[kube_node] +instance-2 + [etcd] instance-1 {% elif mode == "aio" %} @@ -54,6 +64,9 @@ instance-1 [kube-node] instance-1 +[kube_node] +instance-1 + [etcd] instance-1 {% elif mode == "ha-recover" %} @@ -68,6 +81,9 @@ instance-2 [kube-node] instance-3 +[kube_node] +instance-3 + [etcd] instance-3 instance-1 @@ -92,6 +108,9 @@ instance-2 [kube-node] instance-3 +[kube_node] +instance-3 + [etcd] instance-3 instance-1 @@ -111,6 +130,11 @@ kube-node kube-master calico-rr -[calico-rr] +[k8s_cluster:children] +kube_node +kube_master +calico_rr + +[calico_rr] [fake_hosts] diff --git a/tests/templates/inventory-aws.j2 b/tests/templates/inventory-aws.j2 index 538f1bdac..e3c5373f2 100644 --- a/tests/templates/inventory-aws.j2 +++ b/tests/templates/inventory-aws.j2 @@ -6,7 +6,7 @@ node3 ansible_ssh_host={{ec2.instances[2].public_ip}} ansible_ssh_user={{ssh_use node1 node2 -[kube-node] +[kube_node] node1 node2 node3 @@ -15,12 +15,12 @@ node3 node1 node2 -[k8s-cluster:children] -kube-node +[k8s_cluster:children] +kube_node kube_control_plane -calico-rr +calico_rr -[calico-rr] +[calico_rr] [broken_kube_control_plane] node2 diff --git a/tests/templates/inventory-do.j2 b/tests/templates/inventory-do.j2 index c24d40180..fb5436123 100644 --- a/tests/templates/inventory-do.j2 +++ b/tests/templates/inventory-do.j2 @@ -6,7 +6,7 @@ [kube_control_plane] {{droplets.results[0].droplet.name}} -[kube-node] +[kube_node] {{droplets.results[1].droplet.name}} [etcd] @@ -16,7 +16,7 @@ {{droplets.results[0].droplet.name}} {{droplets.results[1].droplet.name}} -[kube-node] +[kube_node] {{droplets.results[2].droplet.name}} [etcd] @@ -32,16 +32,16 @@ [kube_control_plane] {{droplets.results[0].droplet.name}} -[kube-node] +[kube_node] {{droplets.results[1].droplet.name}} [etcd] {{droplets.results[0].droplet.name}} {% endif %} -[calico-rr] +[calico_rr] -[k8s-cluster:children] -kube-node +[k8s_cluster:children] +kube_node kube_control_plane -calico-rr +calico_rr diff --git a/tests/templates/inventory-gce.j2 b/tests/templates/inventory-gce.j2 index e1e0bc451..33e9bbc73 100644 --- a/tests/templates/inventory-gce.j2 +++ b/tests/templates/inventory-gce.j2 @@ -12,7 +12,7 @@ [kube_control_plane] {{node1}} -[kube-node] +[kube_node] {{node2}} [etcd] @@ -23,7 +23,7 @@ {{node1}} {{node2}} -[kube-node] +[kube_node] {{node3}} [etcd] @@ -41,7 +41,7 @@ [kube_control_plane] {{node1}} -[kube-node] +[kube_node] {{node2}} [etcd] @@ -50,24 +50,24 @@ [kube_control_plane] {{node1}} -[kube-node] +[kube_node] {{node1}} [etcd] {{node1}} {% endif %} -[k8s-cluster:children] -kube-node +[k8s_cluster:children] +kube_node kube_control_plane -calico-rr +calico_rr -[calico-rr] +[calico_rr] {% if mode is defined and mode in ["scale", "separate-scale", "ha-scale"] %} [fake_hosts] fake_scale_host[1:200] -[kube-node:children] +[kube_node:children] fake_hosts {% endif %} diff --git a/tests/testcases/040_check-network-adv.yml b/tests/testcases/040_check-network-adv.yml index 174c9750c..18cf6daf1 100644 --- a/tests/testcases/040_check-network-adv.yml +++ b/tests/testcases/040_check-network-adv.yml @@ -1,5 +1,5 @@ --- -- hosts: kube-node +- hosts: kube_node tasks: - name: Test tunl0 routes shell: "set -o pipefail && ! /sbin/ip ro | grep '/26 via' | grep -v tunl0" @@ -9,7 +9,7 @@ - (ipip|default(true) or cloud_provider is defined) - kube_network_plugin|default('calico') == 'calico' -- hosts: k8s-cluster +- hosts: k8s_cluster vars: agent_report_interval: 10 netcheck_namespace: default @@ -44,7 +44,7 @@ args: executable: /bin/bash register: nca_pod - until: nca_pod.stdout_lines|length >= groups['k8s-cluster']|intersect(ansible_play_hosts)|length * 2 + until: nca_pod.stdout_lines|length >= groups['k8s_cluster']|intersect(ansible_play_hosts)|length * 2 retries: 3 delay: 10 failed_when: false @@ -76,7 +76,7 @@ delay: "{{ agent_report_interval }}" until: agents.content|length > 0 and agents.content[0] == '{' and - agents.content|from_json|length >= groups['k8s-cluster']|intersect(ansible_play_hosts)|length * 2 + agents.content|from_json|length >= groups['k8s_cluster']|intersect(ansible_play_hosts)|length * 2 failed_when: false no_log: true diff --git a/upgrade-cluster.yml b/upgrade-cluster.yml index 6fd30537b..5b6d7b207 100644 --- a/upgrade-cluster.yml +++ b/upgrade-cluster.yml @@ -2,14 +2,8 @@ - name: Check ansible version import_playbook: ansible_version.yml -- name: Add kube-master nodes to kube_control_plane - # This is for old inventory which contains kube-master instead of kube_control_plane - hosts: kube-master - gather_facts: false - tasks: - - name: add nodes to kube_control_plane group - group_by: - key: 'kube_control_plane' +- name: Ensure compatibility with old groups + import_playbook: legacy_groups.yml - hosts: bastion[0] gather_facts: False @@ -18,7 +12,7 @@ - { role: kubespray-defaults } - { role: bastion-ssh-config, tags: ["localhost", "bastion"] } -- hosts: k8s-cluster:etcd:calico-rr +- hosts: k8s_cluster:etcd:calico_rr strategy: linear any_errors_fatal: "{{ any_errors_fatal | default(true) }}" gather_facts: false @@ -46,7 +40,7 @@ - { role: download, tags: download, when: "not skip_downloads and download_run_once and not download_localhost" } - name: Prepare nodes for upgrade - hosts: k8s-cluster:etcd:calico-rr + hosts: k8s_cluster:etcd:calico_rr gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" environment: "{{ proxy_disable_env }}" @@ -56,7 +50,7 @@ - { role: download, tags: download, when: "not skip_downloads" } - name: Upgrade container engine on non-cluster nodes - hosts: etcd:calico-rr:!k8s-cluster + hosts: etcd:calico_rr:!k8s_cluster gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" environment: "{{ proxy_disable_env }}" @@ -78,7 +72,7 @@ etcd_events_cluster_setup: false when: not etcd_kubeadm_enabled | default(false) -- hosts: k8s-cluster +- hosts: k8s_cluster gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" environment: "{{ proxy_disable_env }}" @@ -110,7 +104,7 @@ - { role: upgrade/post-upgrade, tags: post-upgrade } - name: Upgrade calico and external cloud provider on all masters, calico-rrs, and nodes - hosts: kube_control_plane:calico-rr:kube-node + hosts: kube_control_plane:calico_rr:kube_node gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" serial: "{{ serial | default('20%') }}" @@ -123,7 +117,7 @@ - { role: kubernetes-apps/policy_controller, tags: policy-controller } - name: Finally handle worker upgrades, based on given batch size - hosts: kube-node:calico-rr:!kube_control_plane + hosts: kube_node:calico_rr:!kube_control_plane gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" environment: "{{ proxy_disable_env }}" @@ -145,7 +139,7 @@ - { role: kubespray-defaults } - { role: win_nodes/kubernetes_patch, tags: ["master", "win_nodes"] } -- hosts: calico-rr +- hosts: calico_rr gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" environment: "{{ proxy_disable_env }}" @@ -161,7 +155,7 @@ - { role: kubespray-defaults } - { role: kubernetes-apps, tags: apps } -- hosts: k8s-cluster +- hosts: k8s_cluster gather_facts: False any_errors_fatal: "{{ any_errors_fatal | default(true) }}" environment: "{{ proxy_disable_env }}"