Merge pull request #1064 from kubernetes-incubator/rework_vars

Add default var role
This commit is contained in:
Bogdan Dobrelya 2017-02-23 21:48:23 +01:00 committed by GitHub
commit 797a9efb12
12 changed files with 242 additions and 181 deletions

View file

@ -2,8 +2,8 @@
- hosts: localhost - hosts: localhost
gather_facts: False gather_facts: False
roles: roles:
- bastion-ssh-config - { role: kargo-defaults}
tags: [localhost, bastion] - { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
- hosts: k8s-cluster:etcd:calico-rr - hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: true any_errors_fatal: true
@ -13,9 +13,8 @@
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled. # fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
ansible_ssh_pipelining: false ansible_ssh_pipelining: false
roles: roles:
- bootstrap-os - { role: kargo-defaults}
tags: - { role: bootstrap-os, tags: bootstrap-os}
- bootstrap-os
- hosts: k8s-cluster:etcd:calico-rr - hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: true any_errors_fatal: true
@ -26,6 +25,7 @@
- hosts: k8s-cluster:etcd:calico-rr - hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: true any_errors_fatal: true
roles: roles:
- { role: kargo-defaults}
- { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade } - { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade }
- { role: kubernetes/preinstall, tags: preinstall } - { role: kubernetes/preinstall, tags: preinstall }
- { role: docker, tags: docker } - { role: docker, tags: docker }
@ -36,47 +36,56 @@
- hosts: etcd:k8s-cluster:vault - hosts: etcd:k8s-cluster:vault
any_errors_fatal: true any_errors_fatal: true
roles: roles:
- { role: kargo-defaults, when: "cert_management == 'vault'" }
- { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" } - { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" }
- hosts: etcd:!k8s-cluster - hosts: etcd:!k8s-cluster
any_errors_fatal: true any_errors_fatal: true
roles: roles:
- { role: kargo-defaults}
- { role: etcd, tags: etcd } - { role: etcd, tags: etcd }
- hosts: k8s-cluster - hosts: k8s-cluster
any_errors_fatal: true any_errors_fatal: true
roles: roles:
- { role: kargo-defaults}
- { role: etcd, tags: etcd } - { role: etcd, tags: etcd }
- hosts: etcd:k8s-cluster:vault - hosts: etcd:k8s-cluster:vault
any_errors_fatal: true any_errors_fatal: true
roles: roles:
- { role: kargo-defaults}
- { role: vault, tags: vault, when: "cert_management == 'vault'"} - { role: vault, tags: vault, when: "cert_management == 'vault'"}
- hosts: k8s-cluster - hosts: k8s-cluster
any_errors_fatal: true any_errors_fatal: true
roles: roles:
- { role: kargo-defaults}
- { role: kubernetes/node, tags: node } - { role: kubernetes/node, tags: node }
- { role: network_plugin, tags: network } - { role: network_plugin, tags: network }
- hosts: kube-master - hosts: kube-master
any_errors_fatal: true any_errors_fatal: true
roles: roles:
- { role: kargo-defaults}
- { role: kubernetes/master, tags: master } - { role: kubernetes/master, tags: master }
- { role: kubernetes-apps/network_plugin, tags: network } - { role: kubernetes-apps/network_plugin, tags: network }
- hosts: calico-rr - hosts: calico-rr
any_errors_fatal: true any_errors_fatal: true
roles: roles:
- { role: kargo-defaults}
- { role: network_plugin/calico/rr, tags: network } - { role: network_plugin/calico/rr, tags: network }
- hosts: k8s-cluster - hosts: k8s-cluster
any_errors_fatal: true any_errors_fatal: true
roles: roles:
- { role: kargo-defaults}
- { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq } - { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq }
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf } - { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf }
- hosts: kube-master[0] - hosts: kube-master[0]
any_errors_fatal: true any_errors_fatal: true
roles: roles:
- { role: kargo-defaults}
- { role: kubernetes-apps, tags: apps } - { role: kubernetes-apps, tags: apps }

View file

@ -1,33 +0,0 @@
## Required for bootstrap-os/preinstall/download roles and setting facts
# Valid bootstrap options (required): ubuntu, coreos, centos, none
bootstrap_os: none
# Directory where the binaries will be installed
bin_dir: /usr/local/bin
docker_bin_dir: /usr/bin
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
local_release_dir: "/tmp/releases"
# Random shifts for retrying failed ops like pushing/downloading
retry_stagger: 5
kube_service_addresses: 10.233.0.0/18
kube_apiserver_port: 443 # (https)
kube_apiserver_insecure_port: 8080 # (http)
# DNS configuration.
# Kubernetes cluster name, also will be used as DNS domain
cluster_name: cluster.local
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
ndots: 2
# Can be dnsmasq_kubedns, kubedns or none
dns_mode: dnsmasq_kubedns
# Can be docker_dns, host_resolvconf or none
resolvconf_mode: docker_dns
# Deploy netchecker app to verify DNS resolve as an HTTP service
deploy_netchecker: false
# Ip address of the kubernetes skydns service
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
dns_domain: "{{ cluster_name }}"

View file

@ -1,38 +0,0 @@
## Required for bootstrap-os/preinstall/download roles and setting facts
# Valid bootstrap options (required): ubuntu, coreos, centos, none
bootstrap_os: none
# Directory where the binaries will be installed
bin_dir: /usr/local/bin
docker_bin_dir: /usr/bin
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
local_release_dir: "/tmp/releases"
# Random shifts for retrying failed ops like pushing/downloading
retry_stagger: 5
# Settings for containerized control plane (etcd/secrets)
etcd_deployment_type: docker
cert_management: script
vault_deployment_type: docker
kube_service_addresses: 10.233.0.0/18
kube_apiserver_port: 443 # (https)
kube_apiserver_insecure_port: 8080 # (http)
# DNS configuration.
# Kubernetes cluster name, also will be used as DNS domain
cluster_name: cluster.local
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
ndots: 2
# Can be dnsmasq_kubedns, kubedns or none
dns_mode: dnsmasq_kubedns
# Can be docker_dns, host_resolvconf or none
resolvconf_mode: docker_dns
# Deploy netchecker app to verify DNS resolve as an HTTP service
deploy_netchecker: false
# Ip address of the kubernetes skydns service
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
dns_domain: "{{ cluster_name }}"

View file

@ -1,113 +1,115 @@
# Valid bootstrap options (required): ubuntu, coreos, centos, none # # Valid bootstrap options (required): ubuntu, coreos, centos, none
bootstrap_os: none # bootstrap_os: none
# Directory where the binaries will be installed # # Directory where the binaries will be installed
bin_dir: /usr/local/bin # bin_dir: /usr/local/bin
# Kubernetes configuration dirs and system namespace. # # Kubernetes configuration dirs and system namespace.
# Those are where all the additional config stuff goes # # Those are where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernets. # # the kubernetes normally puts in /srv/kubernets.
# This puts them in a sane location and namespace. # # This puts them in a sane location and namespace.
# Editting those values will almost surely break something. # # Editting those values will almost surely break something.
kube_config_dir: /etc/kubernetes # kube_config_dir: /etc/kubernetes
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" # kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
kube_manifest_dir: "{{ kube_config_dir }}/manifests" # kube_manifest_dir: "{{ kube_config_dir }}/manifests"
system_namespace: kube-system # system_namespace: kube-system
# Logging directory (sysvinit systems) # # Logging directory (sysvinit systems)
kube_log_dir: "/var/log/kubernetes" # kube_log_dir: "/var/log/kubernetes"
# This is where all the cert scripts and certs will be located # # This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl" # kube_cert_dir: "{{ kube_config_dir }}/ssl"
# This is where all of the bearer tokens will be stored # # This is where all of the bearer tokens will be stored
kube_token_dir: "{{ kube_config_dir }}/tokens" # kube_token_dir: "{{ kube_config_dir }}/tokens"
# This is where to save basic auth file # # This is where to save basic auth file
kube_users_dir: "{{ kube_config_dir }}/users" # kube_users_dir: "{{ kube_config_dir }}/users"
## Change this to use another Kubernetes version, e.g. a current beta release # kube_api_anonymous_auth: false
kube_version: v1.5.3
# Where the binaries will be downloaded. # ## Change this to use another Kubernetes version, e.g. a current beta release
# Note: ensure that you've enough disk space (about 1G) # kube_version: v1.5.3
local_release_dir: "/tmp/releases"
# Random shifts for retrying failed ops like pushing/downloading
retry_stagger: 5
# This is the group that the cert creation scripts chgrp the # # Where the binaries will be downloaded.
# cert files to. Not really changable... # # Note: ensure that you've enough disk space (about 1G)
kube_cert_group: kube-cert # local_release_dir: "/tmp/releases"
# # Random shifts for retrying failed ops like pushing/downloading
# retry_stagger: 5
# Cluster Loglevel configuration # # This is the group that the cert creation scripts chgrp the
kube_log_level: 2 # # cert files to. Not really changable...
# kube_cert_group: kube-cert
# Users to create for basic auth in Kubernetes API via HTTP # # Cluster Loglevel configuration
kube_api_pwd: "changeme" # kube_log_level: 2
kube_users:
kube:
pass: "{{kube_api_pwd}}"
role: admin
root:
pass: "{{kube_api_pwd}}"
role: admin
# Choose network plugin (calico, weave or flannel) # # Users to create for basic auth in Kubernetes API via HTTP
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing # kube_api_pwd: "changeme"
kube_network_plugin: calico # kube_users:
# kube:
# pass: "{{kube_api_pwd}}"
# role: admin
# root:
# pass: "{{kube_api_pwd}}"
# role: admin
# Kubernetes internal network for services, unused block of space. # # Choose network plugin (calico, weave or flannel)
kube_service_addresses: 10.233.0.0/18 # # Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
# kube_network_plugin: calico
# internal network. When used, it will assign IP # # Kubernetes internal network for services, unused block of space.
# addresses from this range to individual pods. # kube_service_addresses: 10.233.0.0/18
# This network must be unused in your network infrastructure!
kube_pods_subnet: 10.233.64.0/18
# internal network node size allocation (optional). This is the size allocated # # internal network. When used, it will assign IP
# to each node on your network. With these defaults you should have # # addresses from this range to individual pods.
# room for 4096 nodes with 254 pods per node. # # This network must be unused in your network infrastructure!
kube_network_node_prefix: 24 # kube_pods_subnet: 10.233.64.0/18
# The port the API Server will be listening on. # # internal network node size allocation (optional). This is the size allocated
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" # # to each node on your network. With these defaults you should have
kube_apiserver_port: 443 # (https) # # room for 4096 nodes with 254 pods per node.
kube_apiserver_insecure_port: 8080 # (http) # kube_network_node_prefix: 24
# DNS configuration. # # The port the API Server will be listening on.
# Kubernetes cluster name, also will be used as DNS domain # kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
cluster_name: cluster.local # kube_apiserver_port: 443 # (https)
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods # kube_apiserver_insecure_port: 8080 # (http)
ndots: 2
# Can be dnsmasq_kubedns, kubedns or none
dns_mode: dnsmasq_kubedns
# Can be docker_dns, host_resolvconf or none
resolvconf_mode: docker_dns
# Deploy netchecker app to verify DNS resolve as an HTTP service
deploy_netchecker: false
# Ip address of the kubernetes skydns service
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
dns_domain: "{{ cluster_name }}"
# Path used to store Docker data # # DNS configuration.
docker_daemon_graph: "/var/lib/docker" # # Kubernetes cluster name, also will be used as DNS domain
# cluster_name: cluster.local
# # Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
# ndots: 2
# # Can be dnsmasq_kubedns, kubedns or none
# dns_mode: dnsmasq_kubedns
# # Can be docker_dns, host_resolvconf or none
# resolvconf_mode: docker_dns
# # Deploy netchecker app to verify DNS resolve as an HTTP service
# deploy_netchecker: false
# # Ip address of the kubernetes skydns service
# skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
# dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
# dns_domain: "{{ cluster_name }}"
## A string of extra options to pass to the docker daemon. # # Path used to store Docker data
## This string should be exactly as you wish it to appear. # docker_daemon_graph: "/var/lib/docker"
## An obvious use case is allowing insecure-registry access
## to self hosted registries like so:
docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} --iptables=false"
docker_bin_dir: "/usr/bin"
# Settings for containerized control plane (etcd/kubelet/secrets) # ## A string of extra options to pass to the docker daemon.
etcd_deployment_type: docker # ## This string should be exactly as you wish it to appear.
kubelet_deployment_type: docker # ## An obvious use case is allowing insecure-registry access
cert_management: script # ## to self hosted registries like so:
vault_deployment_type: docker # docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} --iptables=false"
# docker_bin_dir: "/usr/bin"
# K8s image pull policy (imagePullPolicy) # # Settings for containerized control plane (etcd/kubelet/secrets)
k8s_image_pull_policy: IfNotPresent # etcd_deployment_type: docker
# kubelet_deployment_type: docker
# cert_management: script
# vault_deployment_type: docker
# Monitoring apps for k8s # # K8s image pull policy (imagePullPolicy)
efk_enabled: false # k8s_image_pull_policy: IfNotPresent
# # Monitoring apps for k8s
# efk_enabled: false

View file

@ -1,9 +0,0 @@
# Kubernetes 1.5 added a new flag to the apiserver to disable anonymous auth. In previos versions, anonymous auth was
# not implemented. As the new flag defaults to true, we have to explicetely disable it. Change this line if you want the
# 1.5 default behavior. The flag is actually only added if the used kubernetes version is >= 1.5
kube_api_anonymous_auth: false
# Kubernetes internal network for services, unused block of space.
kube_service_addresses: 10.233.0.0/18
kube_version: v1.5.3

View file

@ -14,4 +14,5 @@
when: reset_confirmation != "yes" when: reset_confirmation != "yes"
roles: roles:
- { role: kargo-defaults}
- { role: reset, tags: reset } - { role: reset, tags: reset }

View file

@ -0,0 +1,114 @@
## Required for bootstrap-os/preinstall/download roles and setting facts
# Valid bootstrap options (required): ubuntu, coreos, centos, none
bootstrap_os: none
kube_api_anonymous_auth: false
## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: v1.5.3
# Directory where the binaries will be installed
bin_dir: /usr/local/bin
docker_bin_dir: /usr/bin
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
local_release_dir: "/tmp/releases"
# Random shifts for retrying failed ops like pushing/downloading
retry_stagger: 5
# DNS configuration.
# Kubernetes cluster name, also will be used as DNS domain
cluster_name: cluster.local
# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
ndots: 2
# Can be dnsmasq_kubedns, kubedns or none
dns_mode: dnsmasq_kubedns
# Can be docker_dns, host_resolvconf or none
resolvconf_mode: docker_dns
# Deploy netchecker app to verify DNS resolve as an HTTP service
deploy_netchecker: false
# Ip address of the kubernetes skydns service
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
dns_domain: "{{ cluster_name }}"
# Kubernetes configuration dirs and system namespace.
# Those are where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernets.
# This puts them in a sane location and namespace.
# Editting those values will almost surely break something.
kube_config_dir: /etc/kubernetes
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
system_namespace: kube-system
# Logging directory (sysvinit systems)
kube_log_dir: "/var/log/kubernetes"
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl"
# This is where all of the bearer tokens will be stored
kube_token_dir: "{{ kube_config_dir }}/tokens"
# This is where to save basic auth file
kube_users_dir: "{{ kube_config_dir }}/users"
# This is the group that the cert creation scripts chgrp the
# cert files to. Not really changable...
kube_cert_group: kube-cert
# Cluster Loglevel configuration
kube_log_level: 2
# Users to create for basic auth in Kubernetes API via HTTP
kube_api_pwd: "changeme"
kube_users:
kube:
pass: "{{kube_api_pwd}}"
role: admin
root:
pass: "{{kube_api_pwd}}"
role: admin
# Choose network plugin (calico, weave or flannel)
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
kube_network_plugin: calico
# Kubernetes internal network for services, unused block of space.
kube_service_addresses: 10.233.0.0/18
# internal network. When used, it will assign IP
# addresses from this range to individual pods.
# This network must be unused in your network infrastructure!
kube_pods_subnet: 10.233.64.0/18
# internal network node size allocation (optional). This is the size allocated
# to each node on your network. With these defaults you should have
# room for 4096 nodes with 254 pods per node.
kube_network_node_prefix: 24
# The port the API Server will be listening on.
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
kube_apiserver_port: 443 # (https)
kube_apiserver_insecure_port: 8080 # (http)
# Path used to store Docker data
docker_daemon_graph: "/var/lib/docker"
## A string of extra options to pass to the docker daemon.
## This string should be exactly as you wish it to appear.
## An obvious use case is allowing insecure-registry access
## to self hosted registries like so:
docker_options: "--insecure-registry={{ kube_service_addresses }} --graph={{ docker_daemon_graph }} --iptables=false"
# Settings for containerized control plane (etcd/kubelet/secrets)
etcd_deployment_type: docker
kubelet_deployment_type: docker
cert_management: script
vault_deployment_type: docker
# K8s image pull policy (imagePullPolicy)
k8s_image_pull_policy: IfNotPresent
efk_enabled: false

View file

@ -0,0 +1,5 @@
- name: Configure defaults
debug:
msg: "Check roles/kargo-defaults/defaults/main.yml"
tags:
- always

View file

@ -34,6 +34,7 @@
register: running_pods register: running_pods
- set_fact: - set_fact:
kube_pods_subnet: 10.233.64.0/18
pod_names: "{{ (pods.stdout | from_json)['items'] | map(attribute = 'metadata.name') | list }}" pod_names: "{{ (pods.stdout | from_json)['items'] | map(attribute = 'metadata.name') | list }}"
pod_ips: "{{ (pods.stdout | from_json)['items'] | selectattr('status.podIP', 'defined') | map(attribute = 'status.podIP') | list }}" pod_ips: "{{ (pods.stdout | from_json)['items'] | selectattr('status.podIP', 'defined') | map(attribute = 'status.podIP') | list }}"
pods_hostnet: | pods_hostnet: |

View file

@ -2,8 +2,8 @@
- hosts: localhost - hosts: localhost
gather_facts: False gather_facts: False
roles: roles:
- bastion-ssh-config - { role: kargo-defaults}
tags: [localhost, bastion] - { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
- hosts: k8s-cluster:etcd:calico-rr - hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: true any_errors_fatal: true
@ -13,9 +13,8 @@
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled. # fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
ansible_ssh_pipelining: false ansible_ssh_pipelining: false
roles: roles:
- bootstrap-os - { role: kargo-defaults}
tags: - { role: bootstrap-os, tags: bootstrap-os}
- bootstrap-os
- hosts: k8s-cluster:etcd:calico-rr - hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: true any_errors_fatal: true
@ -26,6 +25,7 @@
- hosts: k8s-cluster:etcd:calico-rr - hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: true any_errors_fatal: true
roles: roles:
- { role: kargo-defaults}
- { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade } - { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade }
- { role: kubernetes/preinstall, tags: preinstall } - { role: kubernetes/preinstall, tags: preinstall }
- { role: docker, tags: docker } - { role: docker, tags: docker }
@ -36,21 +36,25 @@
- hosts: etcd:k8s-cluster:vault - hosts: etcd:k8s-cluster:vault
any_errors_fatal: true any_errors_fatal: true
roles: roles:
- { role: kargo-defaults, when: "cert_management == 'vault'" }
- { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" } - { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" }
- hosts: etcd:!k8s-cluster - hosts: etcd:!k8s-cluster
any_errors_fatal: true any_errors_fatal: true
roles: roles:
- { role: kargo-defaults}
- { role: etcd, tags: etcd } - { role: etcd, tags: etcd }
- hosts: k8s-cluster - hosts: k8s-cluster
any_errors_fatal: true any_errors_fatal: true
roles: roles:
- { role: kargo-defaults}
- { role: etcd, tags: etcd } - { role: etcd, tags: etcd }
- hosts: etcd:k8s-cluster:vault - hosts: etcd:k8s-cluster:vault
any_errors_fatal: true any_errors_fatal: true
roles: roles:
- { role: kargo-defaults, when: "cert_management == 'vault'"}}
- { role: vault, tags: vault, when: "cert_management == 'vault'"} - { role: vault, tags: vault, when: "cert_management == 'vault'"}
#Handle upgrades to master components first to maintain backwards compat. #Handle upgrades to master components first to maintain backwards compat.
@ -58,6 +62,7 @@
any_errors_fatal: true any_errors_fatal: true
serial: 1 serial: 1
roles: roles:
- { role: kargo-defaults}
- { role: upgrade/pre-upgrade, tags: pre-upgrade } - { role: upgrade/pre-upgrade, tags: pre-upgrade }
- { role: kubernetes/node, tags: node } - { role: kubernetes/node, tags: node }
- { role: kubernetes/master, tags: master } - { role: kubernetes/master, tags: master }
@ -69,6 +74,7 @@
any_errors_fatal: true any_errors_fatal: true
serial: "{{ serial | default('20%') }}" serial: "{{ serial | default('20%') }}"
roles: roles:
- { role: kargo-defaults}
- { role: upgrade/pre-upgrade, tags: pre-upgrade } - { role: upgrade/pre-upgrade, tags: pre-upgrade }
- { role: kubernetes/node, tags: node } - { role: kubernetes/node, tags: node }
- { role: network_plugin, tags: network } - { role: network_plugin, tags: network }
@ -78,15 +84,18 @@
- hosts: calico-rr - hosts: calico-rr
any_errors_fatal: true any_errors_fatal: true
roles: roles:
- { role: kargo-defaults}
- { role: network_plugin/calico/rr, tags: network } - { role: network_plugin/calico/rr, tags: network }
- hosts: k8s-cluster - hosts: k8s-cluster
any_errors_fatal: true any_errors_fatal: true
roles: roles:
- { role: kargo-defaults}
- { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq } - { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq }
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf } - { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf }
- hosts: kube-master[0] - hosts: kube-master[0]
any_errors_fatal: true any_errors_fatal: true
roles: roles:
- { role: kargo-defaults}
- { role: kubernetes-apps, tags: apps } - { role: kubernetes-apps, tags: apps }