c12s-kubespray/tests/cloud_playbooks/roles/packet-ci/templates/inventory.j2
qvicksilver ac2135e450
Fix recover-control-plane to work with etcd 3.3.x and add CI (#5500)
* Fix recover-control-plane to work with etcd 3.3.x and add CI

* Set default values for testcase

* Add actual test jobs

* Attempt to satisty gitlab ci linter

* Fix ansible targets

* Set etcd_member_name as stated in the docs...

* Recovering from 0 masters is not supported yet

* Add other master to broken_kube-master group as well

* Increase number of retries to see if etcd needs more time to heal

* Make number of retries for ETCD loops configurable, increase it for recovery CI and document it
2020-02-11 01:38:01 -08:00

97 lines
1.2 KiB
Django/Jinja

[all]
{% for instance in vms.results %}
instance-{{ loop.index }} ansible_ssh_host={{instance.stdout}}
{% endfor %}
{% if mode is defined and mode in ["separate", "separate-scale"] %}
[kube-master]
instance-1
[kube-node]
instance-2
[etcd]
instance-3
{% elif mode is defined and mode in ["ha", "ha-scale"] %}
[kube-master]
instance-1
instance-2
[kube-node]
instance-3
[etcd]
instance-1
instance-2
instance-3
{% elif mode == "default" %}
[kube-master]
instance-1
[kube-node]
instance-2
[etcd]
instance-1
{% elif mode == "aio" %}
[kube-master]
instance-1
[kube-node]
instance-1
[etcd]
instance-1
[vault]
instance-1
{% elif mode == "ha-recover" %}
[kube-master]
instance-1
instance-2
[kube-node]
instance-3
[etcd]
instance-3
instance-1
instance-2
[broken_kube-master]
instance-2
[broken_etcd]
instance-2 etcd_member_name=etcd3
{% elif mode == "ha-recover-noquorum" %}
[kube-master]
instance-3
instance-1
instance-2
[kube-node]
instance-3
[etcd]
instance-3
instance-1
instance-2
[broken_kube-master]
instance-1
instance-2
[broken_etcd]
instance-1 etcd_member_name=etcd2
instance-2 etcd_member_name=etcd3
{% endif %}
[k8s-cluster:children]
kube-node
kube-master
calico-rr
[calico-rr]
[fake_hosts]