From 18a42e4b38939017a2e34dbd1da042fc7ef10f15 Mon Sep 17 00:00:00 2001 From: Spencer Smith Date: Wed, 24 May 2017 15:49:21 -0400 Subject: [PATCH] add scale.yml to do minimum needed for a node bootstrap --- docs/getting-started.md | 12 ++++++++++++ scale.yml | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+) create mode 100644 scale.yml diff --git a/docs/getting-started.md b/docs/getting-started.md index 5c61ef764..6e323d9cd 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -55,3 +55,15 @@ ansible-playbook -i my_inventory/inventory.cfg cluster.yml -b -v \ ``` See more details in the [ansible guide](ansible.md). + +Adding nodes +-------------------------- + +You may want to add worker nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters. + +- Add the new worker node to your inventory under kube-node (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)). +- Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`: +``` +ansible-playbook -i my_inventory/inventory.cfg scale.yml -b -v \ + --private-key=~/.ssh/private_key +``` \ No newline at end of file diff --git a/scale.yml b/scale.yml new file mode 100644 index 000000000..02e79aa37 --- /dev/null +++ b/scale.yml @@ -0,0 +1,34 @@ +--- + +##Bootstrap any new workers +- hosts: kube-node + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + gather_facts: false + vars: + ansible_ssh_pipelining: false + roles: + - { role: kargo-defaults} + - { role: bootstrap-os, tags: bootstrap-os} + +##We still have to gather facts about our masters and etcd nodes +- hosts: k8s-cluster:etcd:calico-rr + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + vars: + ansible_ssh_pipelining: true + gather_facts: true + +##Target only workers to get kubelet installed and checking in on any new nodes +- hosts: kube-node + any_errors_fatal: "{{ any_errors_fatal | default(true) }}" + roles: + - { role: kargo-defaults} + - { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade } + - { role: kubernetes/preinstall, tags: preinstall } + - { role: docker, tags: docker } + - role: rkt + tags: rkt + when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]" + - { role: etcd, tags: etcd, etcd_cluster_setup: false } + - { role: vault, tags: vault, when: "cert_management == 'vault'"} + - { role: kubernetes/node, tags: node } + - { role: network_plugin, tags: network }