Merge branch 'master' into master
This commit is contained in:
commit
16961f69f2
141 changed files with 654 additions and 262 deletions
10
Vagrantfile
vendored
10
Vagrantfile
vendored
|
@ -3,7 +3,7 @@
|
||||||
|
|
||||||
require 'fileutils'
|
require 'fileutils'
|
||||||
|
|
||||||
Vagrant.require_version ">= 1.9.0"
|
Vagrant.require_version ">= 2.0.0"
|
||||||
|
|
||||||
CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb")
|
CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb")
|
||||||
|
|
||||||
|
@ -135,12 +135,6 @@ Vagrant.configure("2") do |config|
|
||||||
|
|
||||||
config.vm.network :private_network, ip: ip
|
config.vm.network :private_network, ip: ip
|
||||||
|
|
||||||
# workaround for Vagrant 1.9.1 and centos vm
|
|
||||||
# https://github.com/hashicorp/vagrant/issues/8096
|
|
||||||
if Vagrant::VERSION == "1.9.1" && $os == "centos"
|
|
||||||
config.vm.provision "shell", inline: "service network restart", run: "always"
|
|
||||||
end
|
|
||||||
|
|
||||||
# Disable swap for each vm
|
# Disable swap for each vm
|
||||||
config.vm.provision "shell", inline: "swapoff -a"
|
config.vm.provision "shell", inline: "swapoff -a"
|
||||||
|
|
||||||
|
@ -164,7 +158,7 @@ Vagrant.configure("2") do |config|
|
||||||
if File.exist?(File.join(File.dirname($inventory), "hosts"))
|
if File.exist?(File.join(File.dirname($inventory), "hosts"))
|
||||||
ansible.inventory_path = $inventory
|
ansible.inventory_path = $inventory
|
||||||
end
|
end
|
||||||
ansible.sudo = true
|
ansible.become = true
|
||||||
ansible.limit = "all"
|
ansible.limit = "all"
|
||||||
ansible.host_key_checking = false
|
ansible.host_key_checking = false
|
||||||
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache"]
|
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache"]
|
||||||
|
|
|
@ -13,3 +13,4 @@ callback_whitelist = profile_tasks
|
||||||
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
||||||
deprecation_warnings=False
|
deprecation_warnings=False
|
||||||
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds
|
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds
|
||||||
|
jinja2_extensions = jinja2.ext.do
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@BASTION_ADDRESS"'
|
ansible_ssh_common_args: "-o ProxyCommand='ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p -q USER@BASTION_ADDRESS {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}'"
|
||||||
|
|
|
@ -2,6 +2,6 @@ output "router_id" {
|
||||||
value = "${openstack_networking_router_interface_v2.k8s.id}"
|
value = "${openstack_networking_router_interface_v2.k8s.id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
output "network_id" {
|
output "subnet_id" {
|
||||||
value = "${openstack_networking_subnet_v2.k8s.id}"
|
value = "${openstack_networking_subnet_v2.k8s.id}"
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
Vagrant Install
|
Vagrant Install
|
||||||
=================
|
=================
|
||||||
|
|
||||||
Assuming you have Vagrant (1.9+) installed with virtualbox (it may work
|
Assuming you have Vagrant (2.0+) installed with virtualbox (it may work
|
||||||
with vmware, but is untested) you should be able to launch a 3 node
|
with vmware, but is untested) you should be able to launch a 3 node
|
||||||
Kubernetes cluster by simply running `$ vagrant up`.<br />
|
Kubernetes cluster by simply running `$ vagrant up`.<br />
|
||||||
|
|
||||||
|
|
|
@ -118,6 +118,14 @@ Stack](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/dns-st
|
||||||
* *kubelet_cgroup_driver* - Allows manual override of the
|
* *kubelet_cgroup_driver* - Allows manual override of the
|
||||||
cgroup-driver option for Kubelet. By default autodetection is used
|
cgroup-driver option for Kubelet. By default autodetection is used
|
||||||
to match Docker configuration.
|
to match Docker configuration.
|
||||||
|
* *node_labels* - Labels applied to nodes via kubelet --node-labels parameter.
|
||||||
|
For example, labels can be set in the inventory as variables or more widely in group_vars.
|
||||||
|
*node_labels* must be defined as a dict:
|
||||||
|
```
|
||||||
|
node_labels:
|
||||||
|
label1_name: label1_value
|
||||||
|
label2_name: label2_value
|
||||||
|
```
|
||||||
|
|
||||||
##### Custom flags for Kube Components
|
##### Custom flags for Kube Components
|
||||||
For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. This can be done by providing a list of flags. Example:
|
For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. This can be done by providing a list of flags. Example:
|
||||||
|
|
|
@ -6,7 +6,6 @@
|
||||||
kube_config_dir: /etc/kubernetes
|
kube_config_dir: /etc/kubernetes
|
||||||
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
|
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
|
||||||
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
|
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
|
||||||
system_namespace: kube-system
|
|
||||||
|
|
||||||
# This is where all the cert scripts and certs will be located
|
# This is where all the cert scripts and certs will be located
|
||||||
kube_cert_dir: "{{ kube_config_dir }}/ssl"
|
kube_cert_dir: "{{ kube_config_dir }}/ssl"
|
||||||
|
@ -214,6 +213,10 @@ ingress_nginx_enabled: false
|
||||||
# ingress_nginx_configmap_udp_services:
|
# ingress_nginx_configmap_udp_services:
|
||||||
# 53: "kube-system/kube-dns:53"
|
# 53: "kube-system/kube-dns:53"
|
||||||
|
|
||||||
|
# Cert manager deployment
|
||||||
|
cert_manager_enabled: false
|
||||||
|
# cert_manager_namespace: "cert-manager"
|
||||||
|
|
||||||
# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
|
# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
|
||||||
persistent_volumes_enabled: false
|
persistent_volumes_enabled: false
|
||||||
|
|
||||||
|
|
|
@ -91,7 +91,7 @@
|
||||||
- name: Start Resources
|
- name: Start Resources
|
||||||
kube:
|
kube:
|
||||||
name: "{{item.item.name}}"
|
name: "{{item.item.name}}"
|
||||||
namespace: "{{system_namespace}}"
|
namespace: "kube-system"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{item.item.type}}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||||
|
|
|
@ -3,11 +3,11 @@ kind: ClusterRoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
metadata:
|
metadata:
|
||||||
name: dnsmasq
|
name: dnsmasq
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: dnsmasq
|
name: dnsmasq
|
||||||
namespace: "{{ system_namespace}}"
|
namespace: "kube-system"
|
||||||
roleRef:
|
roleRef:
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
name: cluster-admin
|
name: cluster-admin
|
||||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
name: dnsmasq
|
name: dnsmasq
|
||||||
namespace: "{{system_namespace}}"
|
namespace: "kube-system"
|
||||||
labels:
|
labels:
|
||||||
k8s-app: dnsmasq
|
k8s-app: dnsmasq
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
|
|
@ -3,6 +3,6 @@ apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
name: dnsmasq
|
name: dnsmasq
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
labels:
|
labels:
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
|
|
@ -6,7 +6,7 @@ metadata:
|
||||||
kubernetes.io/cluster-service: 'true'
|
kubernetes.io/cluster-service: 'true'
|
||||||
k8s-app: dnsmasq
|
k8s-app: dnsmasq
|
||||||
name: dnsmasq
|
name: dnsmasq
|
||||||
namespace: {{system_namespace}}
|
namespace: kube-system
|
||||||
spec:
|
spec:
|
||||||
ports:
|
ports:
|
||||||
- port: 53
|
- port: 53
|
||||||
|
|
|
@ -21,6 +21,10 @@ docker_dns_servers_strict: yes
|
||||||
|
|
||||||
docker_container_storage_setup: false
|
docker_container_storage_setup: false
|
||||||
|
|
||||||
|
# Used to override obsoletes=0
|
||||||
|
yum_conf: /etc/yum.conf
|
||||||
|
docker_yum_conf: /etc/yum_docker.conf
|
||||||
|
|
||||||
# CentOS/RedHat docker-ce repo
|
# CentOS/RedHat docker-ce repo
|
||||||
docker_rh_repo_base_url: 'https://download.docker.com/linux/centos/7/$basearch/stable'
|
docker_rh_repo_base_url: 'https://download.docker.com/linux/centos/7/$basearch/stable'
|
||||||
docker_rh_repo_gpgkey: 'https://download.docker.com/linux/centos/gpg'
|
docker_rh_repo_gpgkey: 'https://download.docker.com/linux/centos/gpg'
|
||||||
|
|
|
@ -30,6 +30,8 @@
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
|
||||||
|
- import_tasks: pre-upgrade.yml
|
||||||
|
|
||||||
- name: ensure docker-ce repository public key is installed
|
- name: ensure docker-ce repository public key is installed
|
||||||
action: "{{ docker_repo_key_info.pkg_key }}"
|
action: "{{ docker_repo_key_info.pkg_key }}"
|
||||||
args:
|
args:
|
||||||
|
@ -78,11 +80,27 @@
|
||||||
dest: "/etc/yum.repos.d/docker.repo"
|
dest: "/etc/yum.repos.d/docker.repo"
|
||||||
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
|
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
|
||||||
|
|
||||||
|
- name: Copy yum.conf for editing
|
||||||
|
copy:
|
||||||
|
src: "{{ yum_conf }}"
|
||||||
|
dest: "{{ docker_yum_conf }}"
|
||||||
|
remote_src: yes
|
||||||
|
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
|
||||||
|
|
||||||
|
- name: Edit copy of yum.conf to set obsoletes=0
|
||||||
|
lineinfile:
|
||||||
|
path: "{{ docker_yum_conf }}"
|
||||||
|
state: present
|
||||||
|
regexp: '^obsoletes='
|
||||||
|
line: 'obsoletes=0'
|
||||||
|
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
|
||||||
|
|
||||||
- name: ensure docker packages are installed
|
- name: ensure docker packages are installed
|
||||||
action: "{{ docker_package_info.pkg_mgr }}"
|
action: "{{ docker_package_info.pkg_mgr }}"
|
||||||
args:
|
args:
|
||||||
pkg: "{{item.name}}"
|
pkg: "{{item.name}}"
|
||||||
force: "{{item.force|default(omit)}}"
|
force: "{{item.force|default(omit)}}"
|
||||||
|
conf_file: "{{item.yum_conf|default(omit)}}"
|
||||||
state: present
|
state: present
|
||||||
register: docker_task_result
|
register: docker_task_result
|
||||||
until: docker_task_result|succeeded
|
until: docker_task_result|succeeded
|
||||||
|
|
20
roles/docker/tasks/pre-upgrade.yml
Normal file
20
roles/docker/tasks/pre-upgrade.yml
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
---
|
||||||
|
- name: Ensure old versions of Docker are not installed. | Debian
|
||||||
|
package:
|
||||||
|
name: '{{ item }}'
|
||||||
|
state: absent
|
||||||
|
with_items:
|
||||||
|
- docker
|
||||||
|
- docker-engine
|
||||||
|
when: ansible_os_family == 'Debian' and (docker_versioned_pkg[docker_version | string] | search('docker-ce'))
|
||||||
|
|
||||||
|
- name: Ensure old versions of Docker are not installed. | RedHat
|
||||||
|
package:
|
||||||
|
name: '{{ item }}'
|
||||||
|
state: absent
|
||||||
|
with_items:
|
||||||
|
- docker
|
||||||
|
- docker-common
|
||||||
|
- docker-engine
|
||||||
|
- docker-selinux
|
||||||
|
when: ansible_os_family == 'RedHat' and (docker_versioned_pkg[docker_version | string] | search('docker-ce'))
|
|
@ -28,7 +28,9 @@ docker_package_info:
|
||||||
pkg_mgr: yum
|
pkg_mgr: yum
|
||||||
pkgs:
|
pkgs:
|
||||||
- name: "{{ docker_selinux_versioned_pkg[docker_selinux_version | string] }}"
|
- name: "{{ docker_selinux_versioned_pkg[docker_selinux_version | string] }}"
|
||||||
|
yum_conf: "{{ docker_yum_conf }}"
|
||||||
- name: "{{ docker_versioned_pkg[docker_version | string] }}"
|
- name: "{{ docker_versioned_pkg[docker_version | string] }}"
|
||||||
|
yum_conf: "{{ docker_yum_conf }}"
|
||||||
|
|
||||||
docker_repo_key_info:
|
docker_repo_key_info:
|
||||||
pkg_key: ''
|
pkg_key: ''
|
||||||
|
|
|
@ -70,8 +70,8 @@ calico_policy_image_repo: "quay.io/calico/kube-controllers"
|
||||||
calico_policy_image_tag: "{{ calico_policy_version }}"
|
calico_policy_image_tag: "{{ calico_policy_version }}"
|
||||||
calico_rr_image_repo: "quay.io/calico/routereflector"
|
calico_rr_image_repo: "quay.io/calico/routereflector"
|
||||||
calico_rr_image_tag: "{{ calico_rr_version }}"
|
calico_rr_image_tag: "{{ calico_rr_version }}"
|
||||||
hyperkube_image_repo: "quay.io/coreos/hyperkube"
|
hyperkube_image_repo: "gcr.io/google-containers/hyperkube"
|
||||||
hyperkube_image_tag: "{{ kube_version }}_coreos.0"
|
hyperkube_image_tag: "{{ kube_version }}"
|
||||||
pod_infra_image_repo: "gcr.io/google_containers/pause-amd64"
|
pod_infra_image_repo: "gcr.io/google_containers/pause-amd64"
|
||||||
pod_infra_image_tag: "{{ pod_infra_version }}"
|
pod_infra_image_tag: "{{ pod_infra_version }}"
|
||||||
install_socat_image_repo: "xueshanf/install-socat"
|
install_socat_image_repo: "xueshanf/install-socat"
|
||||||
|
@ -124,7 +124,6 @@ fluentd_image_tag: "{{ fluentd_version }}"
|
||||||
kibana_version: "v4.6.1"
|
kibana_version: "v4.6.1"
|
||||||
kibana_image_repo: "gcr.io/google_containers/kibana"
|
kibana_image_repo: "gcr.io/google_containers/kibana"
|
||||||
kibana_image_tag: "{{ kibana_version }}"
|
kibana_image_tag: "{{ kibana_version }}"
|
||||||
|
|
||||||
helm_version: "v2.8.1"
|
helm_version: "v2.8.1"
|
||||||
helm_image_repo: "lachlanevenson/k8s-helm"
|
helm_image_repo: "lachlanevenson/k8s-helm"
|
||||||
helm_image_tag: "{{ helm_version }}"
|
helm_image_tag: "{{ helm_version }}"
|
||||||
|
@ -132,6 +131,11 @@ tiller_image_repo: "gcr.io/kubernetes-helm/tiller"
|
||||||
tiller_image_tag: "{{ helm_version }}"
|
tiller_image_tag: "{{ helm_version }}"
|
||||||
vault_image_repo: "vault"
|
vault_image_repo: "vault"
|
||||||
vault_image_tag: "{{ vault_version }}"
|
vault_image_tag: "{{ vault_version }}"
|
||||||
|
cert_manager_version: "v0.2.3"
|
||||||
|
cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller"
|
||||||
|
cert_manager_controller_image_tag: "{{ cert_manager_version }}"
|
||||||
|
cert_manager_ingress_shim_image_repo: "quay.io/jetstack/cert-manager-ingress-shim"
|
||||||
|
cert_manager_ingress_shim_image_tag: "{{ cert_manager_version }}"
|
||||||
|
|
||||||
downloads:
|
downloads:
|
||||||
netcheck_server:
|
netcheck_server:
|
||||||
|
@ -140,18 +144,24 @@ downloads:
|
||||||
repo: "{{ netcheck_server_img_repo }}"
|
repo: "{{ netcheck_server_img_repo }}"
|
||||||
tag: "{{ netcheck_server_tag }}"
|
tag: "{{ netcheck_server_tag }}"
|
||||||
sha256: "{{ netcheck_server_digest_checksum|default(None) }}"
|
sha256: "{{ netcheck_server_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
netcheck_agent:
|
netcheck_agent:
|
||||||
enabled: "{{ deploy_netchecker }}"
|
enabled: "{{ deploy_netchecker }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ netcheck_agent_img_repo }}"
|
repo: "{{ netcheck_agent_img_repo }}"
|
||||||
tag: "{{ netcheck_agent_tag }}"
|
tag: "{{ netcheck_agent_tag }}"
|
||||||
sha256: "{{ netcheck_agent_digest_checksum|default(None) }}"
|
sha256: "{{ netcheck_agent_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
etcd:
|
etcd:
|
||||||
enabled: true
|
enabled: true
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ etcd_image_repo }}"
|
repo: "{{ etcd_image_repo }}"
|
||||||
tag: "{{ etcd_image_tag }}"
|
tag: "{{ etcd_image_tag }}"
|
||||||
sha256: "{{ etcd_digest_checksum|default(None) }}"
|
sha256: "{{ etcd_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- etcd
|
||||||
kubeadm:
|
kubeadm:
|
||||||
enabled: "{{ kubeadm_enabled }}"
|
enabled: "{{ kubeadm_enabled }}"
|
||||||
file: true
|
file: true
|
||||||
|
@ -163,6 +173,8 @@ downloads:
|
||||||
unarchive: false
|
unarchive: false
|
||||||
owner: "root"
|
owner: "root"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
istioctl:
|
istioctl:
|
||||||
enabled: "{{ istio_enabled }}"
|
enabled: "{{ istio_enabled }}"
|
||||||
file: true
|
file: true
|
||||||
|
@ -174,140 +186,186 @@ downloads:
|
||||||
unarchive: false
|
unarchive: false
|
||||||
owner: "root"
|
owner: "root"
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
|
groups:
|
||||||
|
- kube-master
|
||||||
hyperkube:
|
hyperkube:
|
||||||
enabled: true
|
enabled: true
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ hyperkube_image_repo }}"
|
repo: "{{ hyperkube_image_repo }}"
|
||||||
tag: "{{ hyperkube_image_tag }}"
|
tag: "{{ hyperkube_image_tag }}"
|
||||||
sha256: "{{ hyperkube_digest_checksum|default(None) }}"
|
sha256: "{{ hyperkube_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
cilium:
|
cilium:
|
||||||
enabled: "{{ kube_network_plugin == 'cilium' }}"
|
enabled: "{{ kube_network_plugin == 'cilium' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ cilium_image_repo }}"
|
repo: "{{ cilium_image_repo }}"
|
||||||
tag: "{{ cilium_image_tag }}"
|
tag: "{{ cilium_image_tag }}"
|
||||||
sha256: "{{ cilium_digest_checksum|default(None) }}"
|
sha256: "{{ cilium_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
flannel:
|
flannel:
|
||||||
enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}"
|
enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ flannel_image_repo }}"
|
repo: "{{ flannel_image_repo }}"
|
||||||
tag: "{{ flannel_image_tag }}"
|
tag: "{{ flannel_image_tag }}"
|
||||||
sha256: "{{ flannel_digest_checksum|default(None) }}"
|
sha256: "{{ flannel_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
flannel_cni:
|
flannel_cni:
|
||||||
enabled: "{{ kube_network_plugin == 'flannel' }}"
|
enabled: "{{ kube_network_plugin == 'flannel' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ flannel_cni_image_repo }}"
|
repo: "{{ flannel_cni_image_repo }}"
|
||||||
tag: "{{ flannel_cni_image_tag }}"
|
tag: "{{ flannel_cni_image_tag }}"
|
||||||
sha256: "{{ flannel_cni_digest_checksum|default(None) }}"
|
sha256: "{{ flannel_cni_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
calicoctl:
|
calicoctl:
|
||||||
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ calicoctl_image_repo }}"
|
repo: "{{ calicoctl_image_repo }}"
|
||||||
tag: "{{ calicoctl_image_tag }}"
|
tag: "{{ calicoctl_image_tag }}"
|
||||||
sha256: "{{ calicoctl_digest_checksum|default(None) }}"
|
sha256: "{{ calicoctl_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
calico_node:
|
calico_node:
|
||||||
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ calico_node_image_repo }}"
|
repo: "{{ calico_node_image_repo }}"
|
||||||
tag: "{{ calico_node_image_tag }}"
|
tag: "{{ calico_node_image_tag }}"
|
||||||
sha256: "{{ calico_node_digest_checksum|default(None) }}"
|
sha256: "{{ calico_node_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
calico_cni:
|
calico_cni:
|
||||||
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ calico_cni_image_repo }}"
|
repo: "{{ calico_cni_image_repo }}"
|
||||||
tag: "{{ calico_cni_image_tag }}"
|
tag: "{{ calico_cni_image_tag }}"
|
||||||
sha256: "{{ calico_cni_digest_checksum|default(None) }}"
|
sha256: "{{ calico_cni_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
calico_policy:
|
calico_policy:
|
||||||
enabled: "{{ enable_network_policy or kube_network_plugin == 'canal' }}"
|
enabled: "{{ enable_network_policy or kube_network_plugin == 'canal' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ calico_policy_image_repo }}"
|
repo: "{{ calico_policy_image_repo }}"
|
||||||
tag: "{{ calico_policy_image_tag }}"
|
tag: "{{ calico_policy_image_tag }}"
|
||||||
sha256: "{{ calico_policy_digest_checksum|default(None) }}"
|
sha256: "{{ calico_policy_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
calico_rr:
|
calico_rr:
|
||||||
enabled: "{{ peer_with_calico_rr is defined and peer_with_calico_rr and kube_network_plugin == 'calico' }}"
|
enabled: "{{ peer_with_calico_rr is defined and peer_with_calico_rr and kube_network_plugin == 'calico' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ calico_rr_image_repo }}"
|
repo: "{{ calico_rr_image_repo }}"
|
||||||
tag: "{{ calico_rr_image_tag }}"
|
tag: "{{ calico_rr_image_tag }}"
|
||||||
sha256: "{{ calico_rr_digest_checksum|default(None) }}"
|
sha256: "{{ calico_rr_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- calico-rr
|
||||||
weave_kube:
|
weave_kube:
|
||||||
enabled: "{{ kube_network_plugin == 'weave' }}"
|
enabled: "{{ kube_network_plugin == 'weave' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ weave_kube_image_repo }}"
|
repo: "{{ weave_kube_image_repo }}"
|
||||||
tag: "{{ weave_kube_image_tag }}"
|
tag: "{{ weave_kube_image_tag }}"
|
||||||
sha256: "{{ weave_kube_digest_checksum|default(None) }}"
|
sha256: "{{ weave_kube_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
weave_npc:
|
weave_npc:
|
||||||
enabled: "{{ kube_network_plugin == 'weave' }}"
|
enabled: "{{ kube_network_plugin == 'weave' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ weave_npc_image_repo }}"
|
repo: "{{ weave_npc_image_repo }}"
|
||||||
tag: "{{ weave_npc_image_tag }}"
|
tag: "{{ weave_npc_image_tag }}"
|
||||||
sha256: "{{ weave_npc_digest_checksum|default(None) }}"
|
sha256: "{{ weave_npc_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
contiv:
|
contiv:
|
||||||
enabled: "{{ kube_network_plugin == 'contiv' }}"
|
enabled: "{{ kube_network_plugin == 'contiv' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ contiv_image_repo }}"
|
repo: "{{ contiv_image_repo }}"
|
||||||
tag: "{{ contiv_image_tag }}"
|
tag: "{{ contiv_image_tag }}"
|
||||||
sha256: "{{ contiv_digest_checksum|default(None) }}"
|
sha256: "{{ contiv_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
contiv_auth_proxy:
|
contiv_auth_proxy:
|
||||||
enabled: "{{ kube_network_plugin == 'contiv' }}"
|
enabled: "{{ kube_network_plugin == 'contiv' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ contiv_auth_proxy_image_repo }}"
|
repo: "{{ contiv_auth_proxy_image_repo }}"
|
||||||
tag: "{{ contiv_auth_proxy_image_tag }}"
|
tag: "{{ contiv_auth_proxy_image_tag }}"
|
||||||
sha256: "{{ contiv_auth_proxy_digest_checksum|default(None) }}"
|
sha256: "{{ contiv_auth_proxy_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
pod_infra:
|
pod_infra:
|
||||||
enabled: true
|
enabled: true
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ pod_infra_image_repo }}"
|
repo: "{{ pod_infra_image_repo }}"
|
||||||
tag: "{{ pod_infra_image_tag }}"
|
tag: "{{ pod_infra_image_tag }}"
|
||||||
sha256: "{{ pod_infra_digest_checksum|default(None) }}"
|
sha256: "{{ pod_infra_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
install_socat:
|
install_socat:
|
||||||
enabled: "{{ ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] }}"
|
enabled: "{{ ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ install_socat_image_repo }}"
|
repo: "{{ install_socat_image_repo }}"
|
||||||
tag: "{{ install_socat_image_tag }}"
|
tag: "{{ install_socat_image_tag }}"
|
||||||
sha256: "{{ install_socat_digest_checksum|default(None) }}"
|
sha256: "{{ install_socat_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- k8s-cluster
|
||||||
nginx:
|
nginx:
|
||||||
enabled: true
|
enabled: "{{ loadbalancer_apiserver_localhost }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ nginx_image_repo }}"
|
repo: "{{ nginx_image_repo }}"
|
||||||
tag: "{{ nginx_image_tag }}"
|
tag: "{{ nginx_image_tag }}"
|
||||||
sha256: "{{ nginx_digest_checksum|default(None) }}"
|
sha256: "{{ nginx_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
dnsmasq:
|
dnsmasq:
|
||||||
enabled: "{{ dns_mode == 'dnsmasq_kubedns' }}"
|
enabled: "{{ dns_mode == 'dnsmasq_kubedns' }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ dnsmasq_image_repo }}"
|
repo: "{{ dnsmasq_image_repo }}"
|
||||||
tag: "{{ dnsmasq_image_tag }}"
|
tag: "{{ dnsmasq_image_tag }}"
|
||||||
sha256: "{{ dnsmasq_digest_checksum|default(None) }}"
|
sha256: "{{ dnsmasq_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
kubedns:
|
kubedns:
|
||||||
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
|
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ kubedns_image_repo }}"
|
repo: "{{ kubedns_image_repo }}"
|
||||||
tag: "{{ kubedns_image_tag }}"
|
tag: "{{ kubedns_image_tag }}"
|
||||||
sha256: "{{ kubedns_digest_checksum|default(None) }}"
|
sha256: "{{ kubedns_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
coredns:
|
coredns:
|
||||||
enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}"
|
enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ coredns_image_repo }}"
|
repo: "{{ coredns_image_repo }}"
|
||||||
tag: "{{ coredns_image_tag }}"
|
tag: "{{ coredns_image_tag }}"
|
||||||
sha256: "{{ coredns_digest_checksum|default(None) }}"
|
sha256: "{{ coredns_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
dnsmasq_nanny:
|
dnsmasq_nanny:
|
||||||
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
|
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ dnsmasq_nanny_image_repo }}"
|
repo: "{{ dnsmasq_nanny_image_repo }}"
|
||||||
tag: "{{ dnsmasq_nanny_image_tag }}"
|
tag: "{{ dnsmasq_nanny_image_tag }}"
|
||||||
sha256: "{{ dnsmasq_nanny_digest_checksum|default(None) }}"
|
sha256: "{{ dnsmasq_nanny_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
dnsmasq_sidecar:
|
dnsmasq_sidecar:
|
||||||
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
|
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ dnsmasq_sidecar_image_repo }}"
|
repo: "{{ dnsmasq_sidecar_image_repo }}"
|
||||||
tag: "{{ dnsmasq_sidecar_image_tag }}"
|
tag: "{{ dnsmasq_sidecar_image_tag }}"
|
||||||
sha256: "{{ dnsmasq_sidecar_digest_checksum|default(None) }}"
|
sha256: "{{ dnsmasq_sidecar_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
kubednsautoscaler:
|
kubednsautoscaler:
|
||||||
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
|
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ kubednsautoscaler_image_repo }}"
|
repo: "{{ kubednsautoscaler_image_repo }}"
|
||||||
tag: "{{ kubednsautoscaler_image_tag }}"
|
tag: "{{ kubednsautoscaler_image_tag }}"
|
||||||
sha256: "{{ kubednsautoscaler_digest_checksum|default(None) }}"
|
sha256: "{{ kubednsautoscaler_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
testbox:
|
testbox:
|
||||||
enabled: true
|
enabled: false
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ test_image_repo }}"
|
repo: "{{ test_image_repo }}"
|
||||||
tag: "{{ test_image_tag }}"
|
tag: "{{ test_image_tag }}"
|
||||||
|
@ -318,30 +376,40 @@ downloads:
|
||||||
repo: "{{ elasticsearch_image_repo }}"
|
repo: "{{ elasticsearch_image_repo }}"
|
||||||
tag: "{{ elasticsearch_image_tag }}"
|
tag: "{{ elasticsearch_image_tag }}"
|
||||||
sha256: "{{ elasticsearch_digest_checksum|default(None) }}"
|
sha256: "{{ elasticsearch_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
fluentd:
|
fluentd:
|
||||||
enabled: "{{ efk_enabled }}"
|
enabled: "{{ efk_enabled }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ fluentd_image_repo }}"
|
repo: "{{ fluentd_image_repo }}"
|
||||||
tag: "{{ fluentd_image_tag }}"
|
tag: "{{ fluentd_image_tag }}"
|
||||||
sha256: "{{ fluentd_digest_checksum|default(None) }}"
|
sha256: "{{ fluentd_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
kibana:
|
kibana:
|
||||||
enabled: "{{ efk_enabled }}"
|
enabled: "{{ efk_enabled }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ kibana_image_repo }}"
|
repo: "{{ kibana_image_repo }}"
|
||||||
tag: "{{ kibana_image_tag }}"
|
tag: "{{ kibana_image_tag }}"
|
||||||
sha256: "{{ kibana_digest_checksum|default(None) }}"
|
sha256: "{{ kibana_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
helm:
|
helm:
|
||||||
enabled: "{{ helm_enabled }}"
|
enabled: "{{ helm_enabled }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ helm_image_repo }}"
|
repo: "{{ helm_image_repo }}"
|
||||||
tag: "{{ helm_image_tag }}"
|
tag: "{{ helm_image_tag }}"
|
||||||
sha256: "{{ helm_digest_checksum|default(None) }}"
|
sha256: "{{ helm_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
tiller:
|
tiller:
|
||||||
enabled: "{{ helm_enabled }}"
|
enabled: "{{ helm_enabled }}"
|
||||||
container: true
|
container: true
|
||||||
repo: "{{ tiller_image_repo }}"
|
repo: "{{ tiller_image_repo }}"
|
||||||
tag: "{{ tiller_image_tag }}"
|
tag: "{{ tiller_image_tag }}"
|
||||||
sha256: "{{ tiller_digest_checksum|default(None) }}"
|
sha256: "{{ tiller_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
vault:
|
vault:
|
||||||
enabled: "{{ cert_management == 'vault' }}"
|
enabled: "{{ cert_management == 'vault' }}"
|
||||||
container: "{{ vault_deployment_type != 'host' }}"
|
container: "{{ vault_deployment_type != 'host' }}"
|
||||||
|
@ -356,6 +424,24 @@ downloads:
|
||||||
unarchive: true
|
unarchive: true
|
||||||
url: "{{ vault_download_url }}"
|
url: "{{ vault_download_url }}"
|
||||||
version: "{{ vault_version }}"
|
version: "{{ vault_version }}"
|
||||||
|
groups:
|
||||||
|
- vault
|
||||||
|
cert_manager_controller:
|
||||||
|
enabled: "{{ cert_manager_enabled }}"
|
||||||
|
container: true
|
||||||
|
repo: "{{ cert_manager_controller_image_repo }}"
|
||||||
|
tag: "{{ cert_manager_controller_image_tag }}"
|
||||||
|
sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
|
cert_manager_ingress_shim:
|
||||||
|
enabled: "{{ cert_manager_enabled }}"
|
||||||
|
container: true
|
||||||
|
repo: "{{ cert_manager_ingress_shim_image_repo }}"
|
||||||
|
tag: "{{ cert_manager_ingress_shim_image_tag }}"
|
||||||
|
sha256: "{{ cert_manager_ingress_shim_digest_checksum|default(None) }}"
|
||||||
|
groups:
|
||||||
|
- kube-node
|
||||||
|
|
||||||
download_defaults:
|
download_defaults:
|
||||||
container: false
|
container: false
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
when:
|
when:
|
||||||
- download.enabled
|
- download.enabled
|
||||||
- download.container
|
- download.container
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
|
||||||
|
@ -23,6 +24,7 @@
|
||||||
- download.enabled
|
- download.enabled
|
||||||
- download.container
|
- download.container
|
||||||
- pull_required|default(download_always_pull)
|
- pull_required|default(download_always_pull)
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
delegate_to: "{{ download_delegate }}"
|
delegate_to: "{{ download_delegate }}"
|
||||||
delegate_facts: yes
|
delegate_facts: yes
|
||||||
run_once: yes
|
run_once: yes
|
||||||
|
@ -38,3 +40,4 @@
|
||||||
- download.enabled
|
- download.enabled
|
||||||
- download.container
|
- download.container
|
||||||
- pull_required|default(download_always_pull)
|
- pull_required|default(download_always_pull)
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
|
|
|
@ -13,6 +13,7 @@
|
||||||
when:
|
when:
|
||||||
- download.enabled
|
- download.enabled
|
||||||
- download.file
|
- download.file
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
|
|
||||||
- name: file_download | Download item
|
- name: file_download | Download item
|
||||||
get_url:
|
get_url:
|
||||||
|
@ -28,6 +29,7 @@
|
||||||
when:
|
when:
|
||||||
- download.enabled
|
- download.enabled
|
||||||
- download.file
|
- download.file
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
|
|
||||||
- name: file_download | Extract archives
|
- name: file_download | Extract archives
|
||||||
unarchive:
|
unarchive:
|
||||||
|
@ -40,3 +42,4 @@
|
||||||
- download.enabled
|
- download.enabled
|
||||||
- download.file
|
- download.file
|
||||||
- download.unarchive|default(False)
|
- download.unarchive|default(False)
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
when:
|
when:
|
||||||
- download.enabled
|
- download.enabled
|
||||||
- download.container
|
- download.container
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
|
||||||
|
@ -17,6 +18,7 @@
|
||||||
- download.enabled
|
- download.enabled
|
||||||
- download.container
|
- download.container
|
||||||
- download_run_once
|
- download_run_once
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
|
||||||
|
@ -27,6 +29,7 @@
|
||||||
- download.enabled
|
- download.enabled
|
||||||
- download.container
|
- download.container
|
||||||
- download_run_once
|
- download_run_once
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
|
|
||||||
- name: "container_download | Update the 'container_changed' fact"
|
- name: "container_download | Update the 'container_changed' fact"
|
||||||
set_fact:
|
set_fact:
|
||||||
|
@ -36,6 +39,7 @@
|
||||||
- download.container
|
- download.container
|
||||||
- download_run_once
|
- download_run_once
|
||||||
- pull_required|default(download_always_pull)
|
- pull_required|default(download_always_pull)
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
run_once: "{{ download_run_once }}"
|
run_once: "{{ download_run_once }}"
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
@ -53,6 +57,7 @@
|
||||||
- download.enabled
|
- download.enabled
|
||||||
- download.container
|
- download.container
|
||||||
- download_run_once
|
- download_run_once
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
|
||||||
|
@ -68,6 +73,7 @@
|
||||||
- download_run_once
|
- download_run_once
|
||||||
- (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] or download_delegate == "localhost")
|
- (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] or download_delegate == "localhost")
|
||||||
- (container_changed or not img.stat.exists)
|
- (container_changed or not img.stat.exists)
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
|
|
||||||
- name: container_download | copy container images to ansible host
|
- name: container_download | copy container images to ansible host
|
||||||
synchronize:
|
synchronize:
|
||||||
|
@ -87,6 +93,7 @@
|
||||||
- inventory_hostname == download_delegate
|
- inventory_hostname == download_delegate
|
||||||
- download_delegate != "localhost"
|
- download_delegate != "localhost"
|
||||||
- saved.changed
|
- saved.changed
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
|
|
||||||
- name: container_download | upload container images to nodes
|
- name: container_download | upload container images to nodes
|
||||||
synchronize:
|
synchronize:
|
||||||
|
@ -108,6 +115,7 @@
|
||||||
- (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and
|
- (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and
|
||||||
inventory_hostname != download_delegate or
|
inventory_hostname != download_delegate or
|
||||||
download_delegate == "localhost")
|
download_delegate == "localhost")
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
tags:
|
tags:
|
||||||
- upload
|
- upload
|
||||||
- upgrade
|
- upgrade
|
||||||
|
@ -120,6 +128,7 @@
|
||||||
- download_run_once
|
- download_run_once
|
||||||
- (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and
|
- (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and
|
||||||
inventory_hostname != download_delegate or download_delegate == "localhost")
|
inventory_hostname != download_delegate or download_delegate == "localhost")
|
||||||
|
- group_names | intersect(download.groups) | length
|
||||||
tags:
|
tags:
|
||||||
- upload
|
- upload
|
||||||
- upgrade
|
- upgrade
|
||||||
|
|
|
@ -12,9 +12,9 @@ etcd_cert_group: root
|
||||||
# Note: This does not set up DNS entries. It simply adds the following DNS
|
# Note: This does not set up DNS entries. It simply adds the following DNS
|
||||||
# entries to the certificate
|
# entries to the certificate
|
||||||
etcd_cert_alt_names:
|
etcd_cert_alt_names:
|
||||||
- "etcd.{{ system_namespace }}.svc.{{ dns_domain }}"
|
- "etcd.kube-system.svc.{{ dns_domain }}"
|
||||||
- "etcd.{{ system_namespace }}.svc"
|
- "etcd.kube-system.svc"
|
||||||
- "etcd.{{ system_namespace }}"
|
- "etcd.kube-system"
|
||||||
- "etcd"
|
- "etcd"
|
||||||
|
|
||||||
etcd_script_dir: "{{ bin_dir }}/etcd-scripts"
|
etcd_script_dir: "{{ bin_dir }}/etcd-scripts"
|
||||||
|
@ -22,12 +22,12 @@ etcd_script_dir: "{{ bin_dir }}/etcd-scripts"
|
||||||
etcd_heartbeat_interval: "250"
|
etcd_heartbeat_interval: "250"
|
||||||
etcd_election_timeout: "5000"
|
etcd_election_timeout: "5000"
|
||||||
|
|
||||||
#etcd_snapshot_count: "10000"
|
# etcd_snapshot_count: "10000"
|
||||||
|
|
||||||
# Parameters for ionice
|
# Parameters for ionice
|
||||||
# -c takes an integer between 0 and 3 or one of the strings none, realtime, best-effort or idle.
|
# -c takes an integer between 0 and 3 or one of the strings none, realtime, best-effort or idle.
|
||||||
# -n takes an integer between 0 (highest priority) and 7 (lowest priority)
|
# -n takes an integer between 0 (highest priority) and 7 (lowest priority)
|
||||||
#etcd_ionice: "-c2 -n0"
|
# etcd_ionice: "-c2 -n0"
|
||||||
|
|
||||||
etcd_metrics: "basic"
|
etcd_metrics: "basic"
|
||||||
|
|
||||||
|
|
|
@ -48,7 +48,7 @@
|
||||||
snapshot save {{ etcd_backup_directory }}/snapshot.db
|
snapshot save {{ etcd_backup_directory }}/snapshot.db
|
||||||
environment:
|
environment:
|
||||||
ETCDCTL_API: 3
|
ETCDCTL_API: 3
|
||||||
ETCDCTL_CERT: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||||
ETCDCTL_KEY: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||||
retries: 3
|
retries: 3
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
|
|
|
@ -9,8 +9,8 @@
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
environment:
|
environment:
|
||||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||||
|
|
||||||
- name: Configure | Check if member is in etcd-events cluster
|
- name: Configure | Check if member is in etcd-events cluster
|
||||||
shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_events_access_addresses }} member list | grep -q {{ etcd_access_address }}"
|
shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_events_access_addresses }} member list | grep -q {{ etcd_access_address }}"
|
||||||
|
@ -22,8 +22,8 @@
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
environment:
|
environment:
|
||||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||||
|
|
||||||
- name: Configure | Copy etcd.service systemd file
|
- name: Configure | Copy etcd.service systemd file
|
||||||
template:
|
template:
|
||||||
|
|
|
@ -7,8 +7,8 @@
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
when: target_node == inventory_hostname
|
when: target_node == inventory_hostname
|
||||||
environment:
|
environment:
|
||||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||||
|
|
||||||
- include_tasks: refresh_config.yml
|
- include_tasks: refresh_config.yml
|
||||||
vars:
|
vars:
|
||||||
|
@ -43,5 +43,5 @@
|
||||||
- facts
|
- facts
|
||||||
when: target_node == inventory_hostname
|
when: target_node == inventory_hostname
|
||||||
environment:
|
environment:
|
||||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||||
|
|
|
@ -7,8 +7,8 @@
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
when: target_node == inventory_hostname
|
when: target_node == inventory_hostname
|
||||||
environment:
|
environment:
|
||||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||||
|
|
||||||
- include_tasks: refresh_config.yml
|
- include_tasks: refresh_config.yml
|
||||||
vars:
|
vars:
|
||||||
|
@ -43,5 +43,5 @@
|
||||||
- facts
|
- facts
|
||||||
when: target_node == inventory_hostname
|
when: target_node == inventory_hostname
|
||||||
environment:
|
environment:
|
||||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||||
|
|
|
@ -7,8 +7,8 @@
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
when: target_node == inventory_hostname
|
when: target_node == inventory_hostname
|
||||||
environment:
|
environment:
|
||||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||||
|
|
||||||
- include_tasks: refresh_config.yml
|
- include_tasks: refresh_config.yml
|
||||||
vars:
|
vars:
|
||||||
|
@ -43,5 +43,5 @@
|
||||||
- facts
|
- facts
|
||||||
when: target_node == inventory_hostname
|
when: target_node == inventory_hostname
|
||||||
environment:
|
environment:
|
||||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||||
|
|
|
@ -29,13 +29,13 @@
|
||||||
tags:
|
tags:
|
||||||
- upgrade
|
- upgrade
|
||||||
|
|
||||||
- import_tasks: set_cluster_health.yml
|
- include_tasks: set_cluster_health.yml
|
||||||
when: is_etcd_master and etcd_cluster_setup
|
when: is_etcd_master and etcd_cluster_setup
|
||||||
|
|
||||||
- import_tasks: configure.yml
|
- include_tasks: configure.yml
|
||||||
when: is_etcd_master and etcd_cluster_setup
|
when: is_etcd_master and etcd_cluster_setup
|
||||||
|
|
||||||
- import_tasks: refresh_config.yml
|
- include_tasks: refresh_config.yml
|
||||||
when: is_etcd_master and etcd_cluster_setup
|
when: is_etcd_master and etcd_cluster_setup
|
||||||
|
|
||||||
- name: Restart etcd if certs changed
|
- name: Restart etcd if certs changed
|
||||||
|
@ -68,8 +68,8 @@
|
||||||
# After etcd cluster is assembled, make sure that
|
# After etcd cluster is assembled, make sure that
|
||||||
# initial state of the cluster is in `existing`
|
# initial state of the cluster is in `existing`
|
||||||
# state insted of `new`.
|
# state insted of `new`.
|
||||||
- import_tasks: set_cluster_health.yml
|
- include_tasks: set_cluster_health.yml
|
||||||
when: is_etcd_master and etcd_cluster_setup
|
when: is_etcd_master and etcd_cluster_setup
|
||||||
|
|
||||||
- import_tasks: refresh_config.yml
|
- include_tasks: refresh_config.yml
|
||||||
when: is_etcd_master and etcd_cluster_setup
|
when: is_etcd_master and etcd_cluster_setup
|
||||||
|
|
|
@ -9,8 +9,8 @@
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
environment:
|
environment:
|
||||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||||
|
|
||||||
- name: Configure | Check if etcd-events cluster is healthy
|
- name: Configure | Check if etcd-events cluster is healthy
|
||||||
shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_events_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
|
shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_events_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
|
||||||
|
@ -22,5 +22,5 @@
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
environment:
|
environment:
|
||||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
[req]
|
{% set counter = {'dns': 2,'ip': 1,} %}{% macro increment(dct, key, inc=1)%}{% if dct.update({key: dct[key] + inc}) %} {% endif %}{% endmacro %}[req]
|
||||||
req_extensions = v3_req
|
req_extensions = v3_req
|
||||||
distinguished_name = req_distinguished_name
|
distinguished_name = req_distinguished_name
|
||||||
|
|
||||||
|
@ -25,19 +25,18 @@ authorityKeyIdentifier=keyid:always,issuer
|
||||||
[alt_names]
|
[alt_names]
|
||||||
DNS.1 = localhost
|
DNS.1 = localhost
|
||||||
{% for host in groups['etcd'] %}
|
{% for host in groups['etcd'] %}
|
||||||
DNS.{{ 1 + loop.index }} = {{ host }}
|
DNS.{{ counter["dns"] }} = {{ host }}{{ increment(counter, 'dns') }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% if loadbalancer_apiserver is defined %}
|
{% if apiserver_loadbalancer_domain_name is defined %}
|
||||||
{% set idx = groups['etcd'] | length | int + 2 %}
|
DNS.{{ counter["dns"] }} = {{ apiserver_loadbalancer_domain_name }}{{ increment(counter, 'dns') }}
|
||||||
DNS.{{ idx | string }} = {{ apiserver_loadbalancer_domain_name }}
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% set idx = groups['etcd'] | length | int + 3 %}
|
|
||||||
{% for etcd_alt_name in etcd_cert_alt_names %}
|
{% for etcd_alt_name in etcd_cert_alt_names %}
|
||||||
DNS.{{ idx + 1 + loop.index }} = {{ etcd_alt_name }}
|
DNS.{{ counter["dns"] }} = {{ etcd_alt_name }}{{ increment(counter, 'dns') }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% for host in groups['etcd'] %}
|
{% for host in groups['etcd'] %}
|
||||||
IP.{{ 2 * loop.index - 1 }} = {{ hostvars[host]['access_ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
|
{% if hostvars[host]['access_ip'] is defined %}
|
||||||
IP.{{ 2 * loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
|
IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip'] }}{{ increment(counter, 'ip') }}
|
||||||
|
{% endif %}
|
||||||
|
IP.{{ counter["ip"] }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}{{ increment(counter, 'ip') }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% set idx = groups['etcd'] | length | int * 2 + 1 %}
|
IP.{{ counter["ip"] }} = 127.0.0.1
|
||||||
IP.{{ idx }} = 127.0.0.1
|
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
- name: Kubernetes Apps | Delete old CoreDNS resources
|
- name: Kubernetes Apps | Delete old CoreDNS resources
|
||||||
kube:
|
kube:
|
||||||
name: "coredns"
|
name: "coredns"
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "{{ item }}"
|
resource: "{{ item }}"
|
||||||
state: absent
|
state: absent
|
||||||
|
@ -16,7 +16,7 @@
|
||||||
- name: Kubernetes Apps | Delete kubeadm CoreDNS
|
- name: Kubernetes Apps | Delete kubeadm CoreDNS
|
||||||
kube:
|
kube:
|
||||||
name: "coredns"
|
name: "coredns"
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "deploy"
|
resource: "deploy"
|
||||||
state: absent
|
state: absent
|
||||||
|
@ -28,7 +28,7 @@
|
||||||
- name: Kubernetes Apps | Delete old KubeDNS resources
|
- name: Kubernetes Apps | Delete old KubeDNS resources
|
||||||
kube:
|
kube:
|
||||||
name: "kube-dns"
|
name: "kube-dns"
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "{{ item }}"
|
resource: "{{ item }}"
|
||||||
state: absent
|
state: absent
|
||||||
|
@ -41,7 +41,7 @@
|
||||||
- name: Kubernetes Apps | Delete kubeadm KubeDNS
|
- name: Kubernetes Apps | Delete kubeadm KubeDNS
|
||||||
kube:
|
kube:
|
||||||
name: "kube-dns"
|
name: "kube-dns"
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "{{ item }}"
|
resource: "{{ item }}"
|
||||||
state: absent
|
state: absent
|
||||||
|
|
|
@ -22,7 +22,7 @@
|
||||||
- name: Kubernetes Apps | Start dashboard
|
- name: Kubernetes Apps | Start dashboard
|
||||||
kube:
|
kube:
|
||||||
name: "{{ item.item.name }}"
|
name: "{{ item.item.name }}"
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "{{ item.item.type }}"
|
resource: "{{ item.item.type }}"
|
||||||
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||||
|
|
|
@ -37,7 +37,7 @@
|
||||||
- name: Kubernetes Apps | Start Resources
|
- name: Kubernetes Apps | Start Resources
|
||||||
kube:
|
kube:
|
||||||
name: "{{ item.item.name }}"
|
name: "{{ item.item.name }}"
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "{{ item.item.type }}"
|
resource: "{{ item.item.type }}"
|
||||||
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||||
|
@ -50,6 +50,10 @@
|
||||||
- dns_mode != 'none'
|
- dns_mode != 'none'
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
- not item|skipped
|
- not item|skipped
|
||||||
|
register: resource_result
|
||||||
|
until: resource_result|succeeded
|
||||||
|
retries: 4
|
||||||
|
delay: 5
|
||||||
tags:
|
tags:
|
||||||
- dnsmasq
|
- dnsmasq
|
||||||
|
|
||||||
|
|
|
@ -15,4 +15,4 @@ roleRef:
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: coredns
|
name: coredns
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: v1
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
name: coredns
|
name: coredns
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
addonmanager.kubernetes.io/mode: EnsureExists
|
addonmanager.kubernetes.io/mode: EnsureExists
|
||||||
data:
|
data:
|
||||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
name: coredns{{ coredns_ordinal_suffix | default('') }}
|
name: coredns{{ coredns_ordinal_suffix | default('') }}
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
|
k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
name: coredns
|
name: coredns
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
addonmanager.kubernetes.io/mode: Reconcile
|
addonmanager.kubernetes.io/mode: Reconcile
|
||||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
name: coredns{{ coredns_ordinal_suffix | default('') }}
|
name: coredns{{ coredns_ordinal_suffix | default('') }}
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
|
k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
|
|
@ -25,7 +25,7 @@ metadata:
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kubernetes-dashboard
|
k8s-app: kubernetes-dashboard
|
||||||
name: kubernetes-dashboard-certs
|
name: kubernetes-dashboard-certs
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
type: Opaque
|
type: Opaque
|
||||||
|
|
||||||
---
|
---
|
||||||
|
@ -37,7 +37,7 @@ metadata:
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kubernetes-dashboard
|
k8s-app: kubernetes-dashboard
|
||||||
name: kubernetes-dashboard
|
name: kubernetes-dashboard
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
|
|
||||||
---
|
---
|
||||||
# ------------------- Dashboard Role & Role Binding ------------------- #
|
# ------------------- Dashboard Role & Role Binding ------------------- #
|
||||||
|
@ -46,7 +46,7 @@ kind: Role
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
metadata:
|
metadata:
|
||||||
name: kubernetes-dashboard-minimal
|
name: kubernetes-dashboard-minimal
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
rules:
|
rules:
|
||||||
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
|
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
|
@ -81,7 +81,7 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: RoleBinding
|
kind: RoleBinding
|
||||||
metadata:
|
metadata:
|
||||||
name: kubernetes-dashboard-minimal
|
name: kubernetes-dashboard-minimal
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
roleRef:
|
roleRef:
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
kind: Role
|
kind: Role
|
||||||
|
@ -89,7 +89,7 @@ roleRef:
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: kubernetes-dashboard
|
name: kubernetes-dashboard
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
|
|
||||||
---
|
---
|
||||||
# ------------------- Gross Hack For anonymous auth through api proxy ------------------- #
|
# ------------------- Gross Hack For anonymous auth through api proxy ------------------- #
|
||||||
|
@ -103,7 +103,7 @@ rules:
|
||||||
resources: ["services/proxy"]
|
resources: ["services/proxy"]
|
||||||
resourceNames: ["https:kubernetes-dashboard:"]
|
resourceNames: ["https:kubernetes-dashboard:"]
|
||||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||||
- nonResourceURLs: ["/ui", "/ui/*", "/api/v1/namespaces/{{ system_namespace }}/services/https:kubernetes-dashboard:/proxy/*"]
|
- nonResourceURLs: ["/ui", "/ui/*", "/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/*"]
|
||||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||||
|
|
||||||
---
|
---
|
||||||
|
@ -128,7 +128,7 @@ metadata:
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kubernetes-dashboard
|
k8s-app: kubernetes-dashboard
|
||||||
name: kubernetes-dashboard
|
name: kubernetes-dashboard
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
spec:
|
spec:
|
||||||
replicas: 1
|
replicas: 1
|
||||||
revisionHistoryLimit: 10
|
revisionHistoryLimit: 10
|
||||||
|
@ -200,7 +200,7 @@ metadata:
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kubernetes-dashboard
|
k8s-app: kubernetes-dashboard
|
||||||
name: kubernetes-dashboard
|
name: kubernetes-dashboard
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
spec:
|
spec:
|
||||||
ports:
|
ports:
|
||||||
- port: 443
|
- port: 443
|
||||||
|
|
|
@ -17,7 +17,7 @@ kind: ClusterRole
|
||||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
metadata:
|
metadata:
|
||||||
name: cluster-proportional-autoscaler
|
name: cluster-proportional-autoscaler
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
rules:
|
rules:
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["nodes"]
|
resources: ["nodes"]
|
||||||
|
|
|
@ -17,11 +17,11 @@ kind: ClusterRoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
metadata:
|
metadata:
|
||||||
name: cluster-proportional-autoscaler
|
name: cluster-proportional-autoscaler
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: cluster-proportional-autoscaler
|
name: cluster-proportional-autoscaler
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
roleRef:
|
roleRef:
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
name: cluster-proportional-autoscaler
|
name: cluster-proportional-autoscaler
|
||||||
|
|
|
@ -17,4 +17,4 @@ kind: ServiceAccount
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
metadata:
|
metadata:
|
||||||
name: cluster-proportional-autoscaler
|
name: cluster-proportional-autoscaler
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
|
|
|
@ -17,7 +17,7 @@ apiVersion: extensions/v1beta1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
name: kubedns-autoscaler
|
name: kubedns-autoscaler
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kubedns-autoscaler
|
k8s-app: kubedns-autoscaler
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
@ -40,7 +40,7 @@ spec:
|
||||||
memory: "10Mi"
|
memory: "10Mi"
|
||||||
command:
|
command:
|
||||||
- /cluster-proportional-autoscaler
|
- /cluster-proportional-autoscaler
|
||||||
- --namespace={{ system_namespace }}
|
- --namespace=kube-system
|
||||||
- --configmap=kubedns-autoscaler
|
- --configmap=kubedns-autoscaler
|
||||||
# Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
|
# Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
|
||||||
- --target=Deployment/kube-dns
|
- --target=Deployment/kube-dns
|
||||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
name: kube-dns
|
name: kube-dns
|
||||||
namespace: "{{system_namespace}}"
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kube-dns
|
k8s-app: kube-dns
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
|
|
@ -3,6 +3,6 @@ apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
name: kube-dns
|
name: kube-dns
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
name: kube-dns
|
name: kube-dns
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kube-dns
|
k8s-app: kube-dns
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
|
|
@ -126,32 +126,3 @@
|
||||||
- kube_version | version_compare('v1.9.3', '<=')
|
- kube_version | version_compare('v1.9.3', '<=')
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
tags: vsphere
|
tags: vsphere
|
||||||
|
|
||||||
# This is not a cluster role, but should be run after kubeconfig is set on master
|
|
||||||
- name: Write kube system namespace manifest
|
|
||||||
template:
|
|
||||||
src: namespace.j2
|
|
||||||
dest: "{{kube_config_dir}}/{{system_namespace}}-ns.yml"
|
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
|
||||||
tags:
|
|
||||||
- apps
|
|
||||||
|
|
||||||
- name: Check if kube system namespace exists
|
|
||||||
command: "{{ bin_dir }}/kubectl get ns {{system_namespace}}"
|
|
||||||
register: 'kubesystem'
|
|
||||||
changed_when: False
|
|
||||||
failed_when: False
|
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
|
||||||
tags:
|
|
||||||
- apps
|
|
||||||
|
|
||||||
- name: Create kube system namespace
|
|
||||||
command: "{{ bin_dir }}/kubectl create -f {{kube_config_dir}}/{{system_namespace}}-ns.yml"
|
|
||||||
retries: 4
|
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
|
||||||
register: create_system_ns
|
|
||||||
until: create_system_ns.rc == 0
|
|
||||||
changed_when: False
|
|
||||||
when: inventory_hostname == groups['kube-master'][0] and kubesystem.rc != 0
|
|
||||||
tags:
|
|
||||||
- apps
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Namespace
|
kind: Namespace
|
||||||
metadata:
|
metadata:
|
||||||
name: "{{system_namespace}}"
|
name: "kube-system"
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
when: rbac_enabled
|
when: rbac_enabled
|
||||||
|
|
||||||
- name: "ElasticSearch | Create Serviceaccount and Clusterrolebinding (RBAC)"
|
- name: "ElasticSearch | Create Serviceaccount and Clusterrolebinding (RBAC)"
|
||||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }} -n {{ system_namespace }}"
|
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }} -n kube-system"
|
||||||
with_items:
|
with_items:
|
||||||
- "efk-sa.yml"
|
- "efk-sa.yml"
|
||||||
- "efk-clusterrolebinding.yml"
|
- "efk-clusterrolebinding.yml"
|
||||||
|
@ -24,7 +24,7 @@
|
||||||
register: es_deployment_manifest
|
register: es_deployment_manifest
|
||||||
|
|
||||||
- name: "ElasticSearch | Create ES deployment"
|
- name: "ElasticSearch | Create ES deployment"
|
||||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-deployment.yaml -n {{ system_namespace }}"
|
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-deployment.yaml -n kube-system"
|
||||||
run_once: true
|
run_once: true
|
||||||
when: es_deployment_manifest.changed
|
when: es_deployment_manifest.changed
|
||||||
|
|
||||||
|
@ -35,6 +35,6 @@
|
||||||
register: es_service_manifest
|
register: es_service_manifest
|
||||||
|
|
||||||
- name: "ElasticSearch | Create ES service"
|
- name: "ElasticSearch | Create ES service"
|
||||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-service.yaml -n {{ system_namespace }}"
|
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-service.yaml -n kube-system"
|
||||||
run_once: true
|
run_once: true
|
||||||
when: es_service_manifest.changed
|
when: es_service_manifest.changed
|
||||||
|
|
|
@ -3,11 +3,11 @@ kind: ClusterRoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
metadata:
|
metadata:
|
||||||
name: efk
|
name: efk
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: efk
|
name: efk
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
roleRef:
|
roleRef:
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
name: cluster-admin
|
name: cluster-admin
|
||||||
|
|
|
@ -3,6 +3,6 @@ apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
name: efk
|
name: efk
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
|
|
@ -4,7 +4,7 @@ apiVersion: extensions/v1beta1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
name: elasticsearch-logging-v1
|
name: elasticsearch-logging-v1
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: elasticsearch-logging
|
k8s-app: elasticsearch-logging
|
||||||
version: "{{ elasticsearch_image_tag }}"
|
version: "{{ elasticsearch_image_tag }}"
|
||||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
name: elasticsearch-logging
|
name: elasticsearch-logging
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
labels:
|
labels:
|
||||||
k8s-app: elasticsearch-logging
|
k8s-app: elasticsearch-logging
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
|
|
@ -17,6 +17,6 @@
|
||||||
register: fluentd_ds_manifest
|
register: fluentd_ds_manifest
|
||||||
|
|
||||||
- name: "Fluentd | Create fluentd daemonset"
|
- name: "Fluentd | Create fluentd daemonset"
|
||||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/fluentd-ds.yaml -n {{ system_namespace }}"
|
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/fluentd-ds.yaml -n kube-system"
|
||||||
run_once: true
|
run_once: true
|
||||||
when: fluentd_ds_manifest.changed
|
when: fluentd_ds_manifest.changed
|
||||||
|
|
|
@ -2,7 +2,7 @@ apiVersion: v1
|
||||||
kind: ConfigMap
|
kind: ConfigMap
|
||||||
metadata:
|
metadata:
|
||||||
name: fluentd-config
|
name: fluentd-config
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
data:
|
data:
|
||||||
{{ fluentd_config_file }}: |
|
{{ fluentd_config_file }}: |
|
||||||
# This configuration file for Fluentd / td-agent is used
|
# This configuration file for Fluentd / td-agent is used
|
||||||
|
|
|
@ -4,7 +4,7 @@ apiVersion: extensions/v1beta1
|
||||||
kind: DaemonSet
|
kind: DaemonSet
|
||||||
metadata:
|
metadata:
|
||||||
name: "fluentd-es-v{{ fluentd_version }}"
|
name: "fluentd-es-v{{ fluentd_version }}"
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
labels:
|
labels:
|
||||||
k8s-app: fluentd-es
|
k8s-app: fluentd-es
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
filename: "{{kube_config_dir}}/kibana-deployment.yaml"
|
filename: "{{kube_config_dir}}/kibana-deployment.yaml"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
name: "kibana-logging"
|
name: "kibana-logging"
|
||||||
namespace: "{{system_namespace}}"
|
namespace: "kube-system"
|
||||||
resource: "deployment"
|
resource: "deployment"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ kibana_deployment_manifest.changed }}"
|
with_items: "{{ kibana_deployment_manifest.changed }}"
|
||||||
|
@ -27,7 +27,7 @@
|
||||||
filename: "{{kube_config_dir}}/kibana-service.yaml"
|
filename: "{{kube_config_dir}}/kibana-service.yaml"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
name: "kibana-logging"
|
name: "kibana-logging"
|
||||||
namespace: "{{system_namespace}}"
|
namespace: "kube-system"
|
||||||
resource: "svc"
|
resource: "svc"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ kibana_service_manifest.changed }}"
|
with_items: "{{ kibana_service_manifest.changed }}"
|
||||||
|
|
|
@ -4,7 +4,7 @@ apiVersion: extensions/v1beta1
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
name: kibana-logging
|
name: kibana-logging
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kibana-logging
|
k8s-app: kibana-logging
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
name: kibana-logging
|
name: kibana-logging
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kibana-logging
|
k8s-app: kibana-logging
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
cephfs_provisioner_image_repo: quay.io/kubespray/cephfs-provisioner
|
cephfs_provisioner_image_repo: quay.io/kubespray/cephfs-provisioner
|
||||||
cephfs_provisioner_image_tag: 92295a30
|
cephfs_provisioner_image_tag: 92295a30
|
||||||
|
|
||||||
cephfs_provisioner_namespace: "{{ system_namespace }}"
|
cephfs_provisioner_namespace: "kube-system"
|
||||||
cephfs_provisioner_cluster: ceph
|
cephfs_provisioner_cluster: ceph
|
||||||
cephfs_provisioner_monitors: []
|
cephfs_provisioner_monitors: []
|
||||||
cephfs_provisioner_admin_id: admin
|
cephfs_provisioner_admin_id: admin
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
local_volume_provisioner_image_repo: quay.io/external_storage/local-volume-provisioner
|
local_volume_provisioner_image_repo: quay.io/external_storage/local-volume-provisioner
|
||||||
local_volume_provisioner_image_tag: v2.0.0
|
local_volume_provisioner_image_tag: v2.0.0
|
||||||
|
|
||||||
local_volume_provisioner_namespace: "{{ system_namespace }}"
|
local_volume_provisioner_namespace: "kube-system"
|
||||||
local_volume_provisioner_base_dir: /mnt/disks
|
local_volume_provisioner_base_dir: /mnt/disks
|
||||||
local_volume_provisioner_mount_dir: /mnt/disks
|
local_volume_provisioner_mount_dir: /mnt/disks
|
||||||
local_volume_provisioner_storage_class: local-storage
|
local_volume_provisioner_storage_class: local-storage
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
- name: Helm | Apply Helm Manifests (RBAC)
|
- name: Helm | Apply Helm Manifests (RBAC)
|
||||||
kube:
|
kube:
|
||||||
name: "{{item.item.name}}"
|
name: "{{item.item.name}}"
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{item.item.type}}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||||
|
@ -28,7 +28,7 @@
|
||||||
|
|
||||||
- name: Helm | Install/upgrade helm
|
- name: Helm | Install/upgrade helm
|
||||||
command: >
|
command: >
|
||||||
{{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }} --tiller-namespace={{ system_namespace }}
|
{{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }} --tiller-namespace=kube-system
|
||||||
{% if helm_skip_refresh %} --skip-refresh{% endif %}
|
{% if helm_skip_refresh %} --skip-refresh{% endif %}
|
||||||
{% if helm_stable_repo_url is defined %} --stable-repo-url {{ helm_stable_repo_url }}{% endif %}
|
{% if helm_stable_repo_url is defined %} --stable-repo-url {{ helm_stable_repo_url }}{% endif %}
|
||||||
{% if rbac_enabled %} --service-account=tiller{% endif %}
|
{% if rbac_enabled %} --service-account=tiller{% endif %}
|
||||||
|
|
|
@ -3,11 +3,11 @@ kind: ClusterRoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
metadata:
|
metadata:
|
||||||
name: tiller
|
name: tiller
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: tiller
|
name: tiller
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
roleRef:
|
roleRef:
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
name: cluster-admin
|
name: cluster-admin
|
||||||
|
|
|
@ -3,6 +3,6 @@ apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
name: tiller
|
name: tiller
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
|
|
@ -0,0 +1,17 @@
|
||||||
|
Deployment files
|
||||||
|
================
|
||||||
|
|
||||||
|
This directory contains example deployment manifests for cert-manager that can
|
||||||
|
be used in place of the official Helm chart.
|
||||||
|
|
||||||
|
This is useful if you are deploying cert-manager into an environment without
|
||||||
|
Helm, or want to inspect a 'bare minimum' deployment.
|
||||||
|
|
||||||
|
Where do these come from?
|
||||||
|
-------------------------
|
||||||
|
|
||||||
|
The manifests in these subdirectories are generated from the Helm chart
|
||||||
|
automatically. The `values.yaml` files used to configure cert-manager can be
|
||||||
|
found in [`hack/deploy`](../../hack/deploy/).
|
||||||
|
|
||||||
|
They are automatically generated by running `./hack/update-deploy-gen.sh`.
|
|
@ -0,0 +1,6 @@
|
||||||
|
---
|
||||||
|
cert_manager_namespace: "cert-manager"
|
||||||
|
cert_manager_cpu_requests: 10m
|
||||||
|
cert_manager_cpu_limits: 30m
|
||||||
|
cert_manager_memory_requests: 32Mi
|
||||||
|
cert_manager_memory_limits: 200Mi
|
|
@ -0,0 +1,38 @@
|
||||||
|
---
|
||||||
|
|
||||||
|
- name: Cert Manager | Create addon dir
|
||||||
|
file:
|
||||||
|
path: "{{ kube_config_dir }}/addons/cert_manager"
|
||||||
|
state: directory
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
|
- name: Cert Manager | Create manifests
|
||||||
|
template:
|
||||||
|
src: "{{ item.file }}.j2"
|
||||||
|
dest: "{{ kube_config_dir }}/addons/cert_manager/{{ item.file }}"
|
||||||
|
with_items:
|
||||||
|
- { name: cert-manager-ns, file: cert-manager-ns.yml, type: ns }
|
||||||
|
- { name: cert-manager-sa, file: cert-manager-sa.yml, type: sa }
|
||||||
|
- { name: cert-manager-clusterrole, file: cert-manager-clusterrole.yml, type: clusterrole }
|
||||||
|
- { name: cert-manager-clusterrolebinding, file: cert-manager-clusterrolebinding.yml, type: clusterrolebinding }
|
||||||
|
- { name: cert-manager-issuer-crd, file: cert-manager-issuer-crd.yml, type: crd }
|
||||||
|
- { name: cert-manager-clusterissuer-crd, file: cert-manager-clusterissuer-crd.yml, type: crd }
|
||||||
|
- { name: cert-manager-certificate-crd, file: cert-manager-certificate-crd.yml, type: crd }
|
||||||
|
- { name: cert-manager-deploy, file: cert-manager-deploy.yml, type: deploy }
|
||||||
|
register: cert_manager_manifests
|
||||||
|
when:
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
- name: Cert Manager | Apply manifests
|
||||||
|
kube:
|
||||||
|
name: "{{ item.item.name }}"
|
||||||
|
namespace: "{{ cert_manager_namespace }}"
|
||||||
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
|
resource: "{{ item.item.type }}"
|
||||||
|
filename: "{{ kube_config_dir }}/addons/cert_manager/{{ item.item.file }}"
|
||||||
|
state: "latest"
|
||||||
|
with_items: "{{ cert_manager_manifests.results }}"
|
||||||
|
when:
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
|
@ -0,0 +1,21 @@
|
||||||
|
---
|
||||||
|
apiVersion: apiextensions.k8s.io/v1beta1
|
||||||
|
kind: CustomResourceDefinition
|
||||||
|
metadata:
|
||||||
|
name: certificates.certmanager.k8s.io
|
||||||
|
labels:
|
||||||
|
app: cert-manager
|
||||||
|
chart: cert-manager-0.2.5
|
||||||
|
release: cert-manager
|
||||||
|
heritage: Tiller
|
||||||
|
spec:
|
||||||
|
group: certmanager.k8s.io
|
||||||
|
version: v1alpha1
|
||||||
|
scope: Namespaced
|
||||||
|
names:
|
||||||
|
kind: Certificate
|
||||||
|
plural: certificates
|
||||||
|
shortNames:
|
||||||
|
- cert
|
||||||
|
- certs
|
||||||
|
|
|
@ -0,0 +1,17 @@
|
||||||
|
---
|
||||||
|
apiVersion: apiextensions.k8s.io/v1beta1
|
||||||
|
kind: CustomResourceDefinition
|
||||||
|
metadata:
|
||||||
|
name: clusterissuers.certmanager.k8s.io
|
||||||
|
labels:
|
||||||
|
app: cert-manager
|
||||||
|
chart: cert-manager-0.2.5
|
||||||
|
release: cert-manager
|
||||||
|
heritage: Tiller
|
||||||
|
spec:
|
||||||
|
group: certmanager.k8s.io
|
||||||
|
version: v1alpha1
|
||||||
|
names:
|
||||||
|
kind: ClusterIssuer
|
||||||
|
plural: clusterissuers
|
||||||
|
scope: Cluster
|
|
@ -0,0 +1,25 @@
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
name: cert-manager
|
||||||
|
labels:
|
||||||
|
app: cert-manager
|
||||||
|
chart: cert-manager-0.2.5
|
||||||
|
release: cert-manager
|
||||||
|
heritage: Tiller
|
||||||
|
rules:
|
||||||
|
- apiGroups: ["certmanager.k8s.io"]
|
||||||
|
resources: ["certificates", "issuers", "clusterissuers"]
|
||||||
|
verbs: ["*"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
# TODO: remove endpoints once 0.4 is released. We include it here in case
|
||||||
|
# users use the 'master' version of the Helm chart with a 0.2.x release of
|
||||||
|
# cert-manager that still performs leader election with Endpoint resources.
|
||||||
|
# We advise users don't do this, but some will anyway and this will reduce
|
||||||
|
# friction.
|
||||||
|
resources: ["endpoints", "configmaps", "secrets", "events", "services", "pods"]
|
||||||
|
verbs: ["*"]
|
||||||
|
- apiGroups: ["extensions"]
|
||||||
|
resources: ["ingresses"]
|
||||||
|
verbs: ["*"]
|
|
@ -0,0 +1,18 @@
|
||||||
|
---
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: cert-manager
|
||||||
|
labels:
|
||||||
|
app: cert-manager
|
||||||
|
chart: cert-manager-0.2.5
|
||||||
|
release: cert-manager
|
||||||
|
heritage: Tiller
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: cert-manager
|
||||||
|
subjects:
|
||||||
|
- name: cert-manager
|
||||||
|
namespace: {{ cert_manager_namespace }}
|
||||||
|
kind: ServiceAccount
|
|
@ -0,0 +1,51 @@
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1beta1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: cert-manager
|
||||||
|
namespace: {{ cert_manager_namespace }}
|
||||||
|
labels:
|
||||||
|
app: cert-manager
|
||||||
|
chart: cert-manager-0.2.5
|
||||||
|
release: cert-manager
|
||||||
|
heritage: Tiller
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: cert-manager
|
||||||
|
release: cert-manager
|
||||||
|
annotations:
|
||||||
|
spec:
|
||||||
|
serviceAccountName: cert-manager
|
||||||
|
containers:
|
||||||
|
- name: cert-manager
|
||||||
|
image: {{ cert_manager_controller_image_repo }}:{{ cert_manager_controller_image_tag }}
|
||||||
|
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||||
|
args:
|
||||||
|
- --cluster-resource-namespace=$(POD_NAMESPACE)
|
||||||
|
env:
|
||||||
|
- name: POD_NAMESPACE
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: metadata.namespace
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: {{ cert_manager_cpu_requests }}
|
||||||
|
memory: {{ cert_manager_memory_requests }}
|
||||||
|
limits:
|
||||||
|
cpu: {{ cert_manager_cpu_limits }}
|
||||||
|
memory: {{ cert_manager_memory_limits }}
|
||||||
|
|
||||||
|
- name: ingress-shim
|
||||||
|
image: {{ cert_manager_ingress_shim_image_repo }}:{{ cert_manager_ingress_shim_image_tag }}
|
||||||
|
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: {{ cert_manager_cpu_requests }}
|
||||||
|
memory: {{ cert_manager_memory_requests }}
|
||||||
|
limits:
|
||||||
|
cpu: {{ cert_manager_cpu_limits }}
|
||||||
|
memory: {{ cert_manager_memory_limits }}
|
||||||
|
|
|
@ -0,0 +1,17 @@
|
||||||
|
---
|
||||||
|
apiVersion: apiextensions.k8s.io/v1beta1
|
||||||
|
kind: CustomResourceDefinition
|
||||||
|
metadata:
|
||||||
|
name: issuers.certmanager.k8s.io
|
||||||
|
labels:
|
||||||
|
app: cert-manager
|
||||||
|
chart: cert-manager-0.2.5
|
||||||
|
release: cert-manager
|
||||||
|
heritage: Tiller
|
||||||
|
spec:
|
||||||
|
group: certmanager.k8s.io
|
||||||
|
version: v1alpha1
|
||||||
|
names:
|
||||||
|
kind: Issuer
|
||||||
|
plural: issuers
|
||||||
|
scope: Namespaced
|
|
@ -0,0 +1,7 @@
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Namespace
|
||||||
|
metadata:
|
||||||
|
name: {{ cert_manager_namespace }}
|
||||||
|
labels:
|
||||||
|
name: {{ cert_manager_namespace }}
|
|
@ -0,0 +1,11 @@
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: cert-manager
|
||||||
|
namespace: {{ cert_manager_namespace }}
|
||||||
|
labels:
|
||||||
|
app: cert-manager
|
||||||
|
chart: cert-manager-0.2.5
|
||||||
|
release: cert-manager
|
||||||
|
heritage: Tiller
|
|
@ -20,6 +20,9 @@ spec:
|
||||||
labels:
|
labels:
|
||||||
k8s-app: ingress-nginx
|
k8s-app: ingress-nginx
|
||||||
version: v{{ ingress_nginx_controller_image_tag }}
|
version: v{{ ingress_nginx_controller_image_tag }}
|
||||||
|
annotations:
|
||||||
|
prometheus.io/port: '10254'
|
||||||
|
prometheus.io/scrape: 'true'
|
||||||
spec:
|
spec:
|
||||||
{% if ingress_nginx_host_network %}
|
{% if ingress_nginx_host_network %}
|
||||||
hostNetwork: true
|
hostNetwork: true
|
||||||
|
@ -78,3 +81,4 @@ spec:
|
||||||
{% if rbac_enabled %}
|
{% if rbac_enabled %}
|
||||||
serviceAccountName: ingress-nginx
|
serviceAccountName: ingress-nginx
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
|
|
@ -6,3 +6,10 @@ dependencies:
|
||||||
- apps
|
- apps
|
||||||
- ingress-nginx
|
- ingress-nginx
|
||||||
- ingress-controller
|
- ingress-controller
|
||||||
|
|
||||||
|
- role: kubernetes-apps/ingress_controller/cert_manager
|
||||||
|
when: cert_manager_enabled
|
||||||
|
tags:
|
||||||
|
- apps
|
||||||
|
- cert-manager
|
||||||
|
- ingress-controller
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
- name: Start Calico resources
|
- name: Start Calico resources
|
||||||
kube:
|
kube:
|
||||||
name: "{{item.item.name}}"
|
name: "{{item.item.name}}"
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{item.item.type}}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
- name: Canal | Start Resources
|
- name: Canal | Start Resources
|
||||||
kube:
|
kube:
|
||||||
name: "{{item.item.name}}"
|
name: "{{item.item.name}}"
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{item.item.type}}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
- name: Cilium | Start Resources
|
- name: Cilium | Start Resources
|
||||||
kube:
|
kube:
|
||||||
name: "{{item.item.name}}"
|
name: "{{item.item.name}}"
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{item.item.type}}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||||
|
@ -11,7 +11,7 @@
|
||||||
when: inventory_hostname == groups['kube-master'][0] and not item|skipped
|
when: inventory_hostname == groups['kube-master'][0] and not item|skipped
|
||||||
|
|
||||||
- name: Cilium | Wait for pods to run
|
- name: Cilium | Wait for pods to run
|
||||||
command: "{{bin_dir}}/kubectl -n {{system_namespace}} get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"
|
command: "{{bin_dir}}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"
|
||||||
register: pods_not_ready
|
register: pods_not_ready
|
||||||
until: pods_not_ready.stdout.find("cilium")==-1
|
until: pods_not_ready.stdout.find("cilium")==-1
|
||||||
retries: 30
|
retries: 30
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
- name: Contiv | Create Kubernetes resources
|
- name: Contiv | Create Kubernetes resources
|
||||||
kube:
|
kube:
|
||||||
name: "{{ item.item.name }}"
|
name: "{{ item.item.name }}"
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "{{ item.item.type }}"
|
resource: "{{ item.item.type }}"
|
||||||
filename: "{{ contiv_config_dir }}/{{ item.item.file }}"
|
filename: "{{ contiv_config_dir }}/{{ item.item.file }}"
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
- name: Flannel | Start Resources
|
- name: Flannel | Start Resources
|
||||||
kube:
|
kube:
|
||||||
name: "{{item.item.name}}"
|
name: "{{item.item.name}}"
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{item.item.type}}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
kubectl: "{{ bin_dir }}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/weave-net.yml"
|
filename: "{{ kube_config_dir }}/weave-net.yml"
|
||||||
resource: "ds"
|
resource: "ds"
|
||||||
namespace: "{{system_namespace}}"
|
namespace: "kube-system"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
when: inventory_hostname == groups['kube-master'][0]
|
when: inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
name: calico-policy-controller
|
name: calico-policy-controller
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
resource: rs
|
resource: rs
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
state: absent
|
state: absent
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@
|
||||||
- name: Start of Calico kube controllers
|
- name: Start of Calico kube controllers
|
||||||
kube:
|
kube:
|
||||||
name: "{{item.item.name}}"
|
name: "{{item.item.name}}"
|
||||||
namespace: "{{ system_namespace }}"
|
namespace: "kube-system"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{item.item.type}}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||||
|
|
|
@ -2,7 +2,7 @@ apiVersion: apps/v1beta2
|
||||||
kind: Deployment
|
kind: Deployment
|
||||||
metadata:
|
metadata:
|
||||||
name: calico-kube-controllers
|
name: calico-kube-controllers
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: calico-kube-controllers
|
k8s-app: calico-kube-controllers
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
@ -15,7 +15,7 @@ spec:
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
name: calico-kube-controllers
|
name: calico-kube-controllers
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
k8s-app: calico-kube-controllers
|
k8s-app: calico-kube-controllers
|
||||||
|
|
|
@ -3,7 +3,7 @@ kind: ClusterRole
|
||||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
metadata:
|
metadata:
|
||||||
name: calico-kube-controllers
|
name: calico-kube-controllers
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
rules:
|
rules:
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
|
|
|
@ -10,4 +10,4 @@ roleRef:
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: calico-kube-controllers
|
name: calico-kube-controllers
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
|
|
|
@ -3,6 +3,6 @@ apiVersion: v1
|
||||||
kind: ServiceAccount
|
kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
name: calico-kube-controllers
|
name: calico-kube-controllers
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
kubernetes.io/cluster-service: "true"
|
kubernetes.io/cluster-service: "true"
|
||||||
|
|
|
@ -4,6 +4,6 @@ registry_image_tag: 2.6
|
||||||
registry_proxy_image_repo: gcr.io/google_containers/kube-registry-proxy
|
registry_proxy_image_repo: gcr.io/google_containers/kube-registry-proxy
|
||||||
registry_proxy_image_tag: 0.4
|
registry_proxy_image_tag: 0.4
|
||||||
|
|
||||||
registry_namespace: "{{ system_namespace }}"
|
registry_namespace: "kube-system"
|
||||||
registry_storage_class: ""
|
registry_storage_class: ""
|
||||||
registry_disk_size: "10Gi"
|
registry_disk_size: "10Gi"
|
||||||
|
|
|
@ -44,5 +44,5 @@
|
||||||
when: needs_rotation
|
when: needs_rotation
|
||||||
|
|
||||||
- name: Rotate Tokens | Delete pods in system namespace
|
- name: Rotate Tokens | Delete pods in system namespace
|
||||||
command: "{{ bin_dir }}/kubectl delete pods -n {{ system_namespace }} --all"
|
command: "{{ bin_dir }}/kubectl delete pods -n kube-system --all"
|
||||||
when: needs_rotation
|
when: needs_rotation
|
||||||
|
|
|
@ -96,4 +96,5 @@ volume_cross_zone_attachment: false
|
||||||
## Encrypting Secret Data at Rest
|
## Encrypting Secret Data at Rest
|
||||||
kube_encrypt_secret_data: false
|
kube_encrypt_secret_data: false
|
||||||
kube_encrypt_token: "{{ lookup('password', inventory_dir + '/credentials/kube_encrypt_token.creds length=32 chars=ascii_letters,digits') }}"
|
kube_encrypt_token: "{{ lookup('password', inventory_dir + '/credentials/kube_encrypt_token.creds length=32 chars=ascii_letters,digits') }}"
|
||||||
kube_encryption_algorithm: "aescbc" # Must be either: aescbc, secretbox or aesgcm
|
# Must be either: aescbc, secretbox or aesgcm
|
||||||
|
kube_encryption_algorithm: "aescbc"
|
||||||
|
|
|
@ -9,4 +9,6 @@
|
||||||
- {src: apiserver-key.pem, dest: apiserver.key}
|
- {src: apiserver-key.pem, dest: apiserver.key}
|
||||||
- {src: ca.pem, dest: ca.crt}
|
- {src: ca.pem, dest: ca.crt}
|
||||||
- {src: ca-key.pem, dest: ca.key}
|
- {src: ca-key.pem, dest: ca.key}
|
||||||
|
- {src: service-account-key.pem, dest: sa.pub}
|
||||||
|
- {src: service-account-key.pem, dest: sa.key}
|
||||||
register: kubeadm_copy_old_certs
|
register: kubeadm_copy_old_certs
|
||||||
|
|
|
@ -30,4 +30,7 @@
|
||||||
with_items:
|
with_items:
|
||||||
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
||||||
when: kube_apiserver_manifest_replaced.changed
|
when: kube_apiserver_manifest_replaced.changed
|
||||||
run_once: true
|
register: remove_master_container
|
||||||
|
retries: 4
|
||||||
|
until: remove_master_container.rc == 0
|
||||||
|
delay: 5
|
|
@ -90,3 +90,6 @@ apiServerCertSANs:
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
certificatesDir: {{ kube_config_dir }}/ssl
|
certificatesDir: {{ kube_config_dir }}/ssl
|
||||||
unifiedControlPlaneImage: "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}"
|
unifiedControlPlaneImage: "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}"
|
||||||
|
{% if kube_override_hostname|default('') %}
|
||||||
|
nodeName: {{ kube_override_hostname }}
|
||||||
|
{% endif %}
|
||||||
|
|
|
@ -2,7 +2,7 @@ apiVersion: v1
|
||||||
kind: Pod
|
kind: Pod
|
||||||
metadata:
|
metadata:
|
||||||
name: kube-apiserver
|
name: kube-apiserver
|
||||||
namespace: {{system_namespace}}
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kube-apiserver
|
k8s-app: kube-apiserver
|
||||||
kubespray: v2
|
kubespray: v2
|
||||||
|
@ -63,7 +63,7 @@ spec:
|
||||||
{% if kube_token_auth|default(true) %}
|
{% if kube_token_auth|default(true) %}
|
||||||
- --token-auth-file={{ kube_token_dir }}/known_tokens.csv
|
- --token-auth-file={{ kube_token_dir }}/known_tokens.csv
|
||||||
{% endif %}
|
{% endif %}
|
||||||
- --service-account-key-file={{ kube_cert_dir }}/apiserver-key.pem
|
- --service-account-key-file={{ kube_cert_dir }}/service-account-key.pem
|
||||||
{% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %}
|
{% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %}
|
||||||
- --oidc-issuer-url={{ kube_oidc_url }}
|
- --oidc-issuer-url={{ kube_oidc_url }}
|
||||||
- --oidc-client-id={{ kube_oidc_client_id }}
|
- --oidc-client-id={{ kube_oidc_client_id }}
|
||||||
|
|
|
@ -2,7 +2,7 @@ apiVersion: v1
|
||||||
kind: Pod
|
kind: Pod
|
||||||
metadata:
|
metadata:
|
||||||
name: kube-controller-manager
|
name: kube-controller-manager
|
||||||
namespace: {{system_namespace}}
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kube-controller-manager
|
k8s-app: kube-controller-manager
|
||||||
annotations:
|
annotations:
|
||||||
|
@ -29,7 +29,7 @@ spec:
|
||||||
- controller-manager
|
- controller-manager
|
||||||
- --kubeconfig={{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml
|
- --kubeconfig={{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml
|
||||||
- --leader-elect=true
|
- --leader-elect=true
|
||||||
- --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
|
- --service-account-private-key-file={{ kube_cert_dir }}/service-account-key.pem
|
||||||
- --root-ca-file={{ kube_cert_dir }}/ca.pem
|
- --root-ca-file={{ kube_cert_dir }}/ca.pem
|
||||||
- --cluster-signing-cert-file={{ kube_cert_dir }}/ca.pem
|
- --cluster-signing-cert-file={{ kube_cert_dir }}/ca.pem
|
||||||
- --cluster-signing-key-file={{ kube_cert_dir }}/ca-key.pem
|
- --cluster-signing-key-file={{ kube_cert_dir }}/ca-key.pem
|
||||||
|
|
|
@ -2,7 +2,7 @@ apiVersion: v1
|
||||||
kind: Pod
|
kind: Pod
|
||||||
metadata:
|
metadata:
|
||||||
name: kube-scheduler
|
name: kube-scheduler
|
||||||
namespace: {{ system_namespace }}
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kube-scheduler
|
k8s-app: kube-scheduler
|
||||||
annotations:
|
annotations:
|
||||||
|
|
|
@ -1,6 +0,0 @@
|
||||||
---
|
|
||||||
namespace_kubesystem:
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: "{{system_namespace}}"
|
|
|
@ -134,6 +134,19 @@
|
||||||
tags:
|
tags:
|
||||||
- kube-proxy
|
- kube-proxy
|
||||||
|
|
||||||
|
- name: Write cloud-config
|
||||||
|
template:
|
||||||
|
src: "{{ cloud_provider }}-cloud-config.j2"
|
||||||
|
dest: "{{ kube_config_dir }}/cloud_config"
|
||||||
|
group: "{{ kube_cert_group }}"
|
||||||
|
mode: 0640
|
||||||
|
when:
|
||||||
|
- cloud_provider is defined
|
||||||
|
- cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
|
||||||
|
notify: restart kubelet
|
||||||
|
tags:
|
||||||
|
- cloud-provider
|
||||||
|
|
||||||
# reload-systemd
|
# reload-systemd
|
||||||
- meta: flush_handlers
|
- meta: flush_handlers
|
||||||
|
|
||||||
|
|
|
@ -81,18 +81,26 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{# Kubelet node labels #}
|
{# Kubelet node labels #}
|
||||||
|
{% set role_node_labels = [] %}
|
||||||
{% if inventory_hostname in groups['kube-master'] %}
|
{% if inventory_hostname in groups['kube-master'] %}
|
||||||
{% set node_labels %}--node-labels=node-role.kubernetes.io/master=true{% endset %}
|
{% do role_node_labels.append('node-role.kubernetes.io/master=true') %}
|
||||||
{% if not standalone_kubelet|bool %}
|
{% if not standalone_kubelet|bool %}
|
||||||
{% set node_labels %}{{ node_labels }},node-role.kubernetes.io/node=true{% endset %}
|
{% do role_node_labels.append('node-role.kubernetes.io/node=true') %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% elif inventory_hostname in groups['kube-ingress']|default([]) %}
|
{% elif inventory_hostname in groups['kube-ingress']|default([]) %}
|
||||||
{% set node_labels %}--node-labels=node-role.kubernetes.io/ingress=true{% endset %}
|
{% do role_node_labels.append('node-role.kubernetes.io/ingress=true') %}
|
||||||
{% else %}
|
{% else %}
|
||||||
{% set node_labels %}--node-labels=node-role.kubernetes.io/node=true{% endset %}
|
{% do role_node_labels.append('node-role.kubernetes.io/node=true') %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
{% set inventory_node_labels = [] %}
|
||||||
|
{% if node_labels is defined %}
|
||||||
|
{% for labelname, labelvalue in node_labels.iteritems() %}
|
||||||
|
{% do inventory_node_labels.append(labelname + '=' + labelvalue) %}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
{% set all_node_labels = role_node_labels + inventory_node_labels %}
|
||||||
|
|
||||||
KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ kube_reserved }} {{ node_labels }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}"
|
KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ kube_reserved }} --node-labels={{ all_node_labels | join(',') }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}"
|
||||||
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave", "contiv", "cilium"] %}
|
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave", "contiv", "cilium"] %}
|
||||||
KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
|
KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
|
||||||
{% elif kube_network_plugin is defined and kube_network_plugin == "weave" %}
|
{% elif kube_network_plugin is defined and kube_network_plugin == "weave" %}
|
||||||
|
|
|
@ -2,7 +2,7 @@ apiVersion: v1
|
||||||
kind: Pod
|
kind: Pod
|
||||||
metadata:
|
metadata:
|
||||||
name: kube-proxy
|
name: kube-proxy
|
||||||
namespace: {{system_namespace}}
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: kube-proxy
|
k8s-app: kube-proxy
|
||||||
annotations:
|
annotations:
|
||||||
|
@ -48,7 +48,6 @@ spec:
|
||||||
{% elif kube_proxy_mode == 'ipvs' %}
|
{% elif kube_proxy_mode == 'ipvs' %}
|
||||||
- --masquerade-all
|
- --masquerade-all
|
||||||
- --feature-gates=SupportIPVSProxyMode=true
|
- --feature-gates=SupportIPVSProxyMode=true
|
||||||
- --proxy-mode=ipvs
|
|
||||||
- --ipvs-min-sync-period=5s
|
- --ipvs-min-sync-period=5s
|
||||||
- --ipvs-sync-period=5s
|
- --ipvs-sync-period=5s
|
||||||
- --ipvs-scheduler=rr
|
- --ipvs-scheduler=rr
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue