Merge branch 'master' into master
This commit is contained in:
commit
16961f69f2
141 changed files with 654 additions and 262 deletions
10
Vagrantfile
vendored
10
Vagrantfile
vendored
|
@ -3,7 +3,7 @@
|
|||
|
||||
require 'fileutils'
|
||||
|
||||
Vagrant.require_version ">= 1.9.0"
|
||||
Vagrant.require_version ">= 2.0.0"
|
||||
|
||||
CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb")
|
||||
|
||||
|
@ -135,12 +135,6 @@ Vagrant.configure("2") do |config|
|
|||
|
||||
config.vm.network :private_network, ip: ip
|
||||
|
||||
# workaround for Vagrant 1.9.1 and centos vm
|
||||
# https://github.com/hashicorp/vagrant/issues/8096
|
||||
if Vagrant::VERSION == "1.9.1" && $os == "centos"
|
||||
config.vm.provision "shell", inline: "service network restart", run: "always"
|
||||
end
|
||||
|
||||
# Disable swap for each vm
|
||||
config.vm.provision "shell", inline: "swapoff -a"
|
||||
|
||||
|
@ -164,7 +158,7 @@ Vagrant.configure("2") do |config|
|
|||
if File.exist?(File.join(File.dirname($inventory), "hosts"))
|
||||
ansible.inventory_path = $inventory
|
||||
end
|
||||
ansible.sudo = true
|
||||
ansible.become = true
|
||||
ansible.limit = "all"
|
||||
ansible.host_key_checking = false
|
||||
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache"]
|
||||
|
|
|
@ -13,3 +13,4 @@ callback_whitelist = profile_tasks
|
|||
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
||||
deprecation_warnings=False
|
||||
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds
|
||||
jinja2_extensions = jinja2.ext.do
|
||||
|
|
|
@ -1 +1 @@
|
|||
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@BASTION_ADDRESS"'
|
||||
ansible_ssh_common_args: "-o ProxyCommand='ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p -q USER@BASTION_ADDRESS {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}'"
|
||||
|
|
|
@ -2,6 +2,6 @@ output "router_id" {
|
|||
value = "${openstack_networking_router_interface_v2.k8s.id}"
|
||||
}
|
||||
|
||||
output "network_id" {
|
||||
output "subnet_id" {
|
||||
value = "${openstack_networking_subnet_v2.k8s.id}"
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
Vagrant Install
|
||||
=================
|
||||
|
||||
Assuming you have Vagrant (1.9+) installed with virtualbox (it may work
|
||||
Assuming you have Vagrant (2.0+) installed with virtualbox (it may work
|
||||
with vmware, but is untested) you should be able to launch a 3 node
|
||||
Kubernetes cluster by simply running `$ vagrant up`.<br />
|
||||
|
||||
|
|
|
@ -118,6 +118,14 @@ Stack](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/dns-st
|
|||
* *kubelet_cgroup_driver* - Allows manual override of the
|
||||
cgroup-driver option for Kubelet. By default autodetection is used
|
||||
to match Docker configuration.
|
||||
* *node_labels* - Labels applied to nodes via kubelet --node-labels parameter.
|
||||
For example, labels can be set in the inventory as variables or more widely in group_vars.
|
||||
*node_labels* must be defined as a dict:
|
||||
```
|
||||
node_labels:
|
||||
label1_name: label1_value
|
||||
label2_name: label2_value
|
||||
```
|
||||
|
||||
##### Custom flags for Kube Components
|
||||
For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. This can be done by providing a list of flags. Example:
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
kube_config_dir: /etc/kubernetes
|
||||
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
|
||||
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
|
||||
system_namespace: kube-system
|
||||
|
||||
# This is where all the cert scripts and certs will be located
|
||||
kube_cert_dir: "{{ kube_config_dir }}/ssl"
|
||||
|
@ -214,6 +213,10 @@ ingress_nginx_enabled: false
|
|||
# ingress_nginx_configmap_udp_services:
|
||||
# 53: "kube-system/kube-dns:53"
|
||||
|
||||
# Cert manager deployment
|
||||
cert_manager_enabled: false
|
||||
# cert_manager_namespace: "cert-manager"
|
||||
|
||||
# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
|
||||
persistent_volumes_enabled: false
|
||||
|
||||
|
|
|
@ -91,7 +91,7 @@
|
|||
- name: Start Resources
|
||||
kube:
|
||||
name: "{{item.item.name}}"
|
||||
namespace: "{{system_namespace}}"
|
||||
namespace: "kube-system"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
resource: "{{item.item.type}}"
|
||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||
|
|
|
@ -3,11 +3,11 @@ kind: ClusterRoleBinding
|
|||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: dnsmasq
|
||||
namespace: "{{ system_namespace }}"
|
||||
namespace: "kube-system"
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: dnsmasq
|
||||
namespace: "{{ system_namespace}}"
|
||||
namespace: "kube-system"
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1
|
|||
kind: Deployment
|
||||
metadata:
|
||||
name: dnsmasq
|
||||
namespace: "{{system_namespace}}"
|
||||
namespace: "kube-system"
|
||||
labels:
|
||||
k8s-app: dnsmasq
|
||||
kubernetes.io/cluster-service: "true"
|
||||
|
|
|
@ -3,6 +3,6 @@ apiVersion: v1
|
|||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: dnsmasq
|
||||
namespace: "{{ system_namespace }}"
|
||||
namespace: "kube-system"
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
|
|
|
@ -6,7 +6,7 @@ metadata:
|
|||
kubernetes.io/cluster-service: 'true'
|
||||
k8s-app: dnsmasq
|
||||
name: dnsmasq
|
||||
namespace: {{system_namespace}}
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- port: 53
|
||||
|
|
|
@ -21,6 +21,10 @@ docker_dns_servers_strict: yes
|
|||
|
||||
docker_container_storage_setup: false
|
||||
|
||||
# Used to override obsoletes=0
|
||||
yum_conf: /etc/yum.conf
|
||||
docker_yum_conf: /etc/yum_docker.conf
|
||||
|
||||
# CentOS/RedHat docker-ce repo
|
||||
docker_rh_repo_base_url: 'https://download.docker.com/linux/centos/7/$basearch/stable'
|
||||
docker_rh_repo_gpgkey: 'https://download.docker.com/linux/centos/gpg'
|
||||
|
|
|
@ -30,6 +30,8 @@
|
|||
tags:
|
||||
- facts
|
||||
|
||||
- import_tasks: pre-upgrade.yml
|
||||
|
||||
- name: ensure docker-ce repository public key is installed
|
||||
action: "{{ docker_repo_key_info.pkg_key }}"
|
||||
args:
|
||||
|
@ -78,11 +80,27 @@
|
|||
dest: "/etc/yum.repos.d/docker.repo"
|
||||
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
|
||||
|
||||
- name: Copy yum.conf for editing
|
||||
copy:
|
||||
src: "{{ yum_conf }}"
|
||||
dest: "{{ docker_yum_conf }}"
|
||||
remote_src: yes
|
||||
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
|
||||
|
||||
- name: Edit copy of yum.conf to set obsoletes=0
|
||||
lineinfile:
|
||||
path: "{{ docker_yum_conf }}"
|
||||
state: present
|
||||
regexp: '^obsoletes='
|
||||
line: 'obsoletes=0'
|
||||
when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
|
||||
|
||||
- name: ensure docker packages are installed
|
||||
action: "{{ docker_package_info.pkg_mgr }}"
|
||||
args:
|
||||
pkg: "{{item.name}}"
|
||||
force: "{{item.force|default(omit)}}"
|
||||
conf_file: "{{item.yum_conf|default(omit)}}"
|
||||
state: present
|
||||
register: docker_task_result
|
||||
until: docker_task_result|succeeded
|
||||
|
|
20
roles/docker/tasks/pre-upgrade.yml
Normal file
20
roles/docker/tasks/pre-upgrade.yml
Normal file
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
- name: Ensure old versions of Docker are not installed. | Debian
|
||||
package:
|
||||
name: '{{ item }}'
|
||||
state: absent
|
||||
with_items:
|
||||
- docker
|
||||
- docker-engine
|
||||
when: ansible_os_family == 'Debian' and (docker_versioned_pkg[docker_version | string] | search('docker-ce'))
|
||||
|
||||
- name: Ensure old versions of Docker are not installed. | RedHat
|
||||
package:
|
||||
name: '{{ item }}'
|
||||
state: absent
|
||||
with_items:
|
||||
- docker
|
||||
- docker-common
|
||||
- docker-engine
|
||||
- docker-selinux
|
||||
when: ansible_os_family == 'RedHat' and (docker_versioned_pkg[docker_version | string] | search('docker-ce'))
|
|
@ -28,7 +28,9 @@ docker_package_info:
|
|||
pkg_mgr: yum
|
||||
pkgs:
|
||||
- name: "{{ docker_selinux_versioned_pkg[docker_selinux_version | string] }}"
|
||||
yum_conf: "{{ docker_yum_conf }}"
|
||||
- name: "{{ docker_versioned_pkg[docker_version | string] }}"
|
||||
yum_conf: "{{ docker_yum_conf }}"
|
||||
|
||||
docker_repo_key_info:
|
||||
pkg_key: ''
|
||||
|
|
|
@ -70,8 +70,8 @@ calico_policy_image_repo: "quay.io/calico/kube-controllers"
|
|||
calico_policy_image_tag: "{{ calico_policy_version }}"
|
||||
calico_rr_image_repo: "quay.io/calico/routereflector"
|
||||
calico_rr_image_tag: "{{ calico_rr_version }}"
|
||||
hyperkube_image_repo: "quay.io/coreos/hyperkube"
|
||||
hyperkube_image_tag: "{{ kube_version }}_coreos.0"
|
||||
hyperkube_image_repo: "gcr.io/google-containers/hyperkube"
|
||||
hyperkube_image_tag: "{{ kube_version }}"
|
||||
pod_infra_image_repo: "gcr.io/google_containers/pause-amd64"
|
||||
pod_infra_image_tag: "{{ pod_infra_version }}"
|
||||
install_socat_image_repo: "xueshanf/install-socat"
|
||||
|
@ -124,7 +124,6 @@ fluentd_image_tag: "{{ fluentd_version }}"
|
|||
kibana_version: "v4.6.1"
|
||||
kibana_image_repo: "gcr.io/google_containers/kibana"
|
||||
kibana_image_tag: "{{ kibana_version }}"
|
||||
|
||||
helm_version: "v2.8.1"
|
||||
helm_image_repo: "lachlanevenson/k8s-helm"
|
||||
helm_image_tag: "{{ helm_version }}"
|
||||
|
@ -132,6 +131,11 @@ tiller_image_repo: "gcr.io/kubernetes-helm/tiller"
|
|||
tiller_image_tag: "{{ helm_version }}"
|
||||
vault_image_repo: "vault"
|
||||
vault_image_tag: "{{ vault_version }}"
|
||||
cert_manager_version: "v0.2.3"
|
||||
cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller"
|
||||
cert_manager_controller_image_tag: "{{ cert_manager_version }}"
|
||||
cert_manager_ingress_shim_image_repo: "quay.io/jetstack/cert-manager-ingress-shim"
|
||||
cert_manager_ingress_shim_image_tag: "{{ cert_manager_version }}"
|
||||
|
||||
downloads:
|
||||
netcheck_server:
|
||||
|
@ -140,18 +144,24 @@ downloads:
|
|||
repo: "{{ netcheck_server_img_repo }}"
|
||||
tag: "{{ netcheck_server_tag }}"
|
||||
sha256: "{{ netcheck_server_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- k8s-cluster
|
||||
netcheck_agent:
|
||||
enabled: "{{ deploy_netchecker }}"
|
||||
container: true
|
||||
repo: "{{ netcheck_agent_img_repo }}"
|
||||
tag: "{{ netcheck_agent_tag }}"
|
||||
sha256: "{{ netcheck_agent_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- k8s-cluster
|
||||
etcd:
|
||||
enabled: true
|
||||
container: true
|
||||
repo: "{{ etcd_image_repo }}"
|
||||
tag: "{{ etcd_image_tag }}"
|
||||
sha256: "{{ etcd_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- etcd
|
||||
kubeadm:
|
||||
enabled: "{{ kubeadm_enabled }}"
|
||||
file: true
|
||||
|
@ -163,6 +173,8 @@ downloads:
|
|||
unarchive: false
|
||||
owner: "root"
|
||||
mode: "0755"
|
||||
groups:
|
||||
- k8s-cluster
|
||||
istioctl:
|
||||
enabled: "{{ istio_enabled }}"
|
||||
file: true
|
||||
|
@ -174,140 +186,186 @@ downloads:
|
|||
unarchive: false
|
||||
owner: "root"
|
||||
mode: "0755"
|
||||
groups:
|
||||
- kube-master
|
||||
hyperkube:
|
||||
enabled: true
|
||||
container: true
|
||||
repo: "{{ hyperkube_image_repo }}"
|
||||
tag: "{{ hyperkube_image_tag }}"
|
||||
sha256: "{{ hyperkube_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- k8s-cluster
|
||||
cilium:
|
||||
enabled: "{{ kube_network_plugin == 'cilium' }}"
|
||||
container: true
|
||||
repo: "{{ cilium_image_repo }}"
|
||||
tag: "{{ cilium_image_tag }}"
|
||||
sha256: "{{ cilium_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- k8s-cluster
|
||||
flannel:
|
||||
enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}"
|
||||
container: true
|
||||
repo: "{{ flannel_image_repo }}"
|
||||
tag: "{{ flannel_image_tag }}"
|
||||
sha256: "{{ flannel_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- k8s-cluster
|
||||
flannel_cni:
|
||||
enabled: "{{ kube_network_plugin == 'flannel' }}"
|
||||
container: true
|
||||
repo: "{{ flannel_cni_image_repo }}"
|
||||
tag: "{{ flannel_cni_image_tag }}"
|
||||
sha256: "{{ flannel_cni_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- k8s-cluster
|
||||
calicoctl:
|
||||
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
||||
container: true
|
||||
repo: "{{ calicoctl_image_repo }}"
|
||||
tag: "{{ calicoctl_image_tag }}"
|
||||
sha256: "{{ calicoctl_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- k8s-cluster
|
||||
calico_node:
|
||||
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
||||
container: true
|
||||
repo: "{{ calico_node_image_repo }}"
|
||||
tag: "{{ calico_node_image_tag }}"
|
||||
sha256: "{{ calico_node_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- k8s-cluster
|
||||
calico_cni:
|
||||
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
|
||||
container: true
|
||||
repo: "{{ calico_cni_image_repo }}"
|
||||
tag: "{{ calico_cni_image_tag }}"
|
||||
sha256: "{{ calico_cni_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- k8s-cluster
|
||||
calico_policy:
|
||||
enabled: "{{ enable_network_policy or kube_network_plugin == 'canal' }}"
|
||||
container: true
|
||||
repo: "{{ calico_policy_image_repo }}"
|
||||
tag: "{{ calico_policy_image_tag }}"
|
||||
sha256: "{{ calico_policy_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- k8s-cluster
|
||||
calico_rr:
|
||||
enabled: "{{ peer_with_calico_rr is defined and peer_with_calico_rr and kube_network_plugin == 'calico' }}"
|
||||
container: true
|
||||
repo: "{{ calico_rr_image_repo }}"
|
||||
tag: "{{ calico_rr_image_tag }}"
|
||||
sha256: "{{ calico_rr_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- calico-rr
|
||||
weave_kube:
|
||||
enabled: "{{ kube_network_plugin == 'weave' }}"
|
||||
container: true
|
||||
repo: "{{ weave_kube_image_repo }}"
|
||||
tag: "{{ weave_kube_image_tag }}"
|
||||
sha256: "{{ weave_kube_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- k8s-cluster
|
||||
weave_npc:
|
||||
enabled: "{{ kube_network_plugin == 'weave' }}"
|
||||
container: true
|
||||
repo: "{{ weave_npc_image_repo }}"
|
||||
tag: "{{ weave_npc_image_tag }}"
|
||||
sha256: "{{ weave_npc_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- k8s-cluster
|
||||
contiv:
|
||||
enabled: "{{ kube_network_plugin == 'contiv' }}"
|
||||
container: true
|
||||
repo: "{{ contiv_image_repo }}"
|
||||
tag: "{{ contiv_image_tag }}"
|
||||
sha256: "{{ contiv_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- k8s-cluster
|
||||
contiv_auth_proxy:
|
||||
enabled: "{{ kube_network_plugin == 'contiv' }}"
|
||||
container: true
|
||||
repo: "{{ contiv_auth_proxy_image_repo }}"
|
||||
tag: "{{ contiv_auth_proxy_image_tag }}"
|
||||
sha256: "{{ contiv_auth_proxy_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- k8s-cluster
|
||||
pod_infra:
|
||||
enabled: true
|
||||
container: true
|
||||
repo: "{{ pod_infra_image_repo }}"
|
||||
tag: "{{ pod_infra_image_tag }}"
|
||||
sha256: "{{ pod_infra_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- k8s-cluster
|
||||
install_socat:
|
||||
enabled: "{{ ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] }}"
|
||||
container: true
|
||||
repo: "{{ install_socat_image_repo }}"
|
||||
tag: "{{ install_socat_image_tag }}"
|
||||
sha256: "{{ install_socat_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- k8s-cluster
|
||||
nginx:
|
||||
enabled: true
|
||||
enabled: "{{ loadbalancer_apiserver_localhost }}"
|
||||
container: true
|
||||
repo: "{{ nginx_image_repo }}"
|
||||
tag: "{{ nginx_image_tag }}"
|
||||
sha256: "{{ nginx_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
dnsmasq:
|
||||
enabled: "{{ dns_mode == 'dnsmasq_kubedns' }}"
|
||||
container: true
|
||||
repo: "{{ dnsmasq_image_repo }}"
|
||||
tag: "{{ dnsmasq_image_tag }}"
|
||||
sha256: "{{ dnsmasq_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
kubedns:
|
||||
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
|
||||
container: true
|
||||
repo: "{{ kubedns_image_repo }}"
|
||||
tag: "{{ kubedns_image_tag }}"
|
||||
sha256: "{{ kubedns_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
coredns:
|
||||
enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}"
|
||||
container: true
|
||||
repo: "{{ coredns_image_repo }}"
|
||||
tag: "{{ coredns_image_tag }}"
|
||||
sha256: "{{ coredns_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
dnsmasq_nanny:
|
||||
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
|
||||
container: true
|
||||
repo: "{{ dnsmasq_nanny_image_repo }}"
|
||||
tag: "{{ dnsmasq_nanny_image_tag }}"
|
||||
sha256: "{{ dnsmasq_nanny_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
dnsmasq_sidecar:
|
||||
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
|
||||
container: true
|
||||
repo: "{{ dnsmasq_sidecar_image_repo }}"
|
||||
tag: "{{ dnsmasq_sidecar_image_tag }}"
|
||||
sha256: "{{ dnsmasq_sidecar_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
kubednsautoscaler:
|
||||
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
|
||||
container: true
|
||||
repo: "{{ kubednsautoscaler_image_repo }}"
|
||||
tag: "{{ kubednsautoscaler_image_tag }}"
|
||||
sha256: "{{ kubednsautoscaler_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
testbox:
|
||||
enabled: true
|
||||
enabled: false
|
||||
container: true
|
||||
repo: "{{ test_image_repo }}"
|
||||
tag: "{{ test_image_tag }}"
|
||||
|
@ -318,30 +376,40 @@ downloads:
|
|||
repo: "{{ elasticsearch_image_repo }}"
|
||||
tag: "{{ elasticsearch_image_tag }}"
|
||||
sha256: "{{ elasticsearch_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
fluentd:
|
||||
enabled: "{{ efk_enabled }}"
|
||||
container: true
|
||||
repo: "{{ fluentd_image_repo }}"
|
||||
tag: "{{ fluentd_image_tag }}"
|
||||
sha256: "{{ fluentd_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
kibana:
|
||||
enabled: "{{ efk_enabled }}"
|
||||
container: true
|
||||
repo: "{{ kibana_image_repo }}"
|
||||
tag: "{{ kibana_image_tag }}"
|
||||
sha256: "{{ kibana_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
helm:
|
||||
enabled: "{{ helm_enabled }}"
|
||||
container: true
|
||||
repo: "{{ helm_image_repo }}"
|
||||
tag: "{{ helm_image_tag }}"
|
||||
sha256: "{{ helm_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
tiller:
|
||||
enabled: "{{ helm_enabled }}"
|
||||
container: true
|
||||
repo: "{{ tiller_image_repo }}"
|
||||
tag: "{{ tiller_image_tag }}"
|
||||
sha256: "{{ tiller_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
vault:
|
||||
enabled: "{{ cert_management == 'vault' }}"
|
||||
container: "{{ vault_deployment_type != 'host' }}"
|
||||
|
@ -356,6 +424,24 @@ downloads:
|
|||
unarchive: true
|
||||
url: "{{ vault_download_url }}"
|
||||
version: "{{ vault_version }}"
|
||||
groups:
|
||||
- vault
|
||||
cert_manager_controller:
|
||||
enabled: "{{ cert_manager_enabled }}"
|
||||
container: true
|
||||
repo: "{{ cert_manager_controller_image_repo }}"
|
||||
tag: "{{ cert_manager_controller_image_tag }}"
|
||||
sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
cert_manager_ingress_shim:
|
||||
enabled: "{{ cert_manager_enabled }}"
|
||||
container: true
|
||||
repo: "{{ cert_manager_ingress_shim_image_repo }}"
|
||||
tag: "{{ cert_manager_ingress_shim_image_tag }}"
|
||||
sha256: "{{ cert_manager_ingress_shim_digest_checksum|default(None) }}"
|
||||
groups:
|
||||
- kube-node
|
||||
|
||||
download_defaults:
|
||||
container: false
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
when:
|
||||
- download.enabled
|
||||
- download.container
|
||||
- group_names | intersect(download.groups) | length
|
||||
tags:
|
||||
- facts
|
||||
|
||||
|
@ -23,6 +24,7 @@
|
|||
- download.enabled
|
||||
- download.container
|
||||
- pull_required|default(download_always_pull)
|
||||
- group_names | intersect(download.groups) | length
|
||||
delegate_to: "{{ download_delegate }}"
|
||||
delegate_facts: yes
|
||||
run_once: yes
|
||||
|
@ -38,3 +40,4 @@
|
|||
- download.enabled
|
||||
- download.container
|
||||
- pull_required|default(download_always_pull)
|
||||
- group_names | intersect(download.groups) | length
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
when:
|
||||
- download.enabled
|
||||
- download.file
|
||||
- group_names | intersect(download.groups) | length
|
||||
|
||||
- name: file_download | Download item
|
||||
get_url:
|
||||
|
@ -28,6 +29,7 @@
|
|||
when:
|
||||
- download.enabled
|
||||
- download.file
|
||||
- group_names | intersect(download.groups) | length
|
||||
|
||||
- name: file_download | Extract archives
|
||||
unarchive:
|
||||
|
@ -40,3 +42,4 @@
|
|||
- download.enabled
|
||||
- download.file
|
||||
- download.unarchive|default(False)
|
||||
- group_names | intersect(download.groups) | length
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
when:
|
||||
- download.enabled
|
||||
- download.container
|
||||
- group_names | intersect(download.groups) | length
|
||||
tags:
|
||||
- facts
|
||||
|
||||
|
@ -17,6 +18,7 @@
|
|||
- download.enabled
|
||||
- download.container
|
||||
- download_run_once
|
||||
- group_names | intersect(download.groups) | length
|
||||
tags:
|
||||
- facts
|
||||
|
||||
|
@ -27,6 +29,7 @@
|
|||
- download.enabled
|
||||
- download.container
|
||||
- download_run_once
|
||||
- group_names | intersect(download.groups) | length
|
||||
|
||||
- name: "container_download | Update the 'container_changed' fact"
|
||||
set_fact:
|
||||
|
@ -36,6 +39,7 @@
|
|||
- download.container
|
||||
- download_run_once
|
||||
- pull_required|default(download_always_pull)
|
||||
- group_names | intersect(download.groups) | length
|
||||
run_once: "{{ download_run_once }}"
|
||||
tags:
|
||||
- facts
|
||||
|
@ -53,6 +57,7 @@
|
|||
- download.enabled
|
||||
- download.container
|
||||
- download_run_once
|
||||
- group_names | intersect(download.groups) | length
|
||||
tags:
|
||||
- facts
|
||||
|
||||
|
@ -68,6 +73,7 @@
|
|||
- download_run_once
|
||||
- (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] or download_delegate == "localhost")
|
||||
- (container_changed or not img.stat.exists)
|
||||
- group_names | intersect(download.groups) | length
|
||||
|
||||
- name: container_download | copy container images to ansible host
|
||||
synchronize:
|
||||
|
@ -87,6 +93,7 @@
|
|||
- inventory_hostname == download_delegate
|
||||
- download_delegate != "localhost"
|
||||
- saved.changed
|
||||
- group_names | intersect(download.groups) | length
|
||||
|
||||
- name: container_download | upload container images to nodes
|
||||
synchronize:
|
||||
|
@ -108,6 +115,7 @@
|
|||
- (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and
|
||||
inventory_hostname != download_delegate or
|
||||
download_delegate == "localhost")
|
||||
- group_names | intersect(download.groups) | length
|
||||
tags:
|
||||
- upload
|
||||
- upgrade
|
||||
|
@ -120,6 +128,7 @@
|
|||
- download_run_once
|
||||
- (ansible_os_family not in ["CoreOS", "Container Linux by CoreOS"] and
|
||||
inventory_hostname != download_delegate or download_delegate == "localhost")
|
||||
- group_names | intersect(download.groups) | length
|
||||
tags:
|
||||
- upload
|
||||
- upgrade
|
||||
|
|
|
@ -12,9 +12,9 @@ etcd_cert_group: root
|
|||
# Note: This does not set up DNS entries. It simply adds the following DNS
|
||||
# entries to the certificate
|
||||
etcd_cert_alt_names:
|
||||
- "etcd.{{ system_namespace }}.svc.{{ dns_domain }}"
|
||||
- "etcd.{{ system_namespace }}.svc"
|
||||
- "etcd.{{ system_namespace }}"
|
||||
- "etcd.kube-system.svc.{{ dns_domain }}"
|
||||
- "etcd.kube-system.svc"
|
||||
- "etcd.kube-system"
|
||||
- "etcd"
|
||||
|
||||
etcd_script_dir: "{{ bin_dir }}/etcd-scripts"
|
||||
|
@ -22,12 +22,12 @@ etcd_script_dir: "{{ bin_dir }}/etcd-scripts"
|
|||
etcd_heartbeat_interval: "250"
|
||||
etcd_election_timeout: "5000"
|
||||
|
||||
#etcd_snapshot_count: "10000"
|
||||
# etcd_snapshot_count: "10000"
|
||||
|
||||
# Parameters for ionice
|
||||
# -c takes an integer between 0 and 3 or one of the strings none, realtime, best-effort or idle.
|
||||
# -n takes an integer between 0 (highest priority) and 7 (lowest priority)
|
||||
#etcd_ionice: "-c2 -n0"
|
||||
# etcd_ionice: "-c2 -n0"
|
||||
|
||||
etcd_metrics: "basic"
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
snapshot save {{ etcd_backup_directory }}/snapshot.db
|
||||
environment:
|
||||
ETCDCTL_API: 3
|
||||
ETCDCTL_CERT: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
||||
ETCDCTL_CERT: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
retries: 3
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
|
|
|
@ -9,8 +9,8 @@
|
|||
tags:
|
||||
- facts
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
|
||||
- name: Configure | Check if member is in etcd-events cluster
|
||||
shell: "{{ bin_dir }}/etcdctl --no-sync --endpoints={{ etcd_events_access_addresses }} member list | grep -q {{ etcd_access_address }}"
|
||||
|
@ -22,8 +22,8 @@
|
|||
tags:
|
||||
- facts
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
|
||||
- name: Configure | Copy etcd.service systemd file
|
||||
template:
|
||||
|
|
|
@ -7,8 +7,8 @@
|
|||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when: target_node == inventory_hostname
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
|
||||
- include_tasks: refresh_config.yml
|
||||
vars:
|
||||
|
@ -43,5 +43,5 @@
|
|||
- facts
|
||||
when: target_node == inventory_hostname
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
|
|
|
@ -7,8 +7,8 @@
|
|||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when: target_node == inventory_hostname
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
|
||||
- include_tasks: refresh_config.yml
|
||||
vars:
|
||||
|
@ -43,5 +43,5 @@
|
|||
- facts
|
||||
when: target_node == inventory_hostname
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
|
|
|
@ -7,8 +7,8 @@
|
|||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when: target_node == inventory_hostname
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
|
||||
- include_tasks: refresh_config.yml
|
||||
vars:
|
||||
|
@ -43,5 +43,5 @@
|
|||
- facts
|
||||
when: target_node == inventory_hostname
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
|
|
|
@ -29,13 +29,13 @@
|
|||
tags:
|
||||
- upgrade
|
||||
|
||||
- import_tasks: set_cluster_health.yml
|
||||
- include_tasks: set_cluster_health.yml
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
|
||||
- import_tasks: configure.yml
|
||||
- include_tasks: configure.yml
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
|
||||
- import_tasks: refresh_config.yml
|
||||
- include_tasks: refresh_config.yml
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
|
||||
- name: Restart etcd if certs changed
|
||||
|
@ -68,8 +68,8 @@
|
|||
# After etcd cluster is assembled, make sure that
|
||||
# initial state of the cluster is in `existing`
|
||||
# state insted of `new`.
|
||||
- import_tasks: set_cluster_health.yml
|
||||
- include_tasks: set_cluster_health.yml
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
|
||||
- import_tasks: refresh_config.yml
|
||||
- include_tasks: refresh_config.yml
|
||||
when: is_etcd_master and etcd_cluster_setup
|
||||
|
|
|
@ -9,8 +9,8 @@
|
|||
tags:
|
||||
- facts
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
|
||||
- name: Configure | Check if etcd-events cluster is healthy
|
||||
shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_events_access_addresses }} cluster-health | grep -q 'cluster is healthy'"
|
||||
|
@ -22,5 +22,5 @@
|
|||
tags:
|
||||
- facts
|
||||
environment:
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
||||
ETCDCTL_CERT_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}.pem"
|
||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ inventory_hostname }}-key.pem"
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
[req]
|
||||
{% set counter = {'dns': 2,'ip': 1,} %}{% macro increment(dct, key, inc=1)%}{% if dct.update({key: dct[key] + inc}) %} {% endif %}{% endmacro %}[req]
|
||||
req_extensions = v3_req
|
||||
distinguished_name = req_distinguished_name
|
||||
|
||||
|
@ -25,19 +25,18 @@ authorityKeyIdentifier=keyid:always,issuer
|
|||
[alt_names]
|
||||
DNS.1 = localhost
|
||||
{% for host in groups['etcd'] %}
|
||||
DNS.{{ 1 + loop.index }} = {{ host }}
|
||||
DNS.{{ counter["dns"] }} = {{ host }}{{ increment(counter, 'dns') }}
|
||||
{% endfor %}
|
||||
{% if loadbalancer_apiserver is defined %}
|
||||
{% set idx = groups['etcd'] | length | int + 2 %}
|
||||
DNS.{{ idx | string }} = {{ apiserver_loadbalancer_domain_name }}
|
||||
{% if apiserver_loadbalancer_domain_name is defined %}
|
||||
DNS.{{ counter["dns"] }} = {{ apiserver_loadbalancer_domain_name }}{{ increment(counter, 'dns') }}
|
||||
{% endif %}
|
||||
{% set idx = groups['etcd'] | length | int + 3 %}
|
||||
{% for etcd_alt_name in etcd_cert_alt_names %}
|
||||
DNS.{{ idx + 1 + loop.index }} = {{ etcd_alt_name }}
|
||||
DNS.{{ counter["dns"] }} = {{ etcd_alt_name }}{{ increment(counter, 'dns') }}
|
||||
{% endfor %}
|
||||
{% for host in groups['etcd'] %}
|
||||
IP.{{ 2 * loop.index - 1 }} = {{ hostvars[host]['access_ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
|
||||
IP.{{ 2 * loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
|
||||
{% if hostvars[host]['access_ip'] is defined %}
|
||||
IP.{{ counter["ip"] }} = {{ hostvars[host]['access_ip'] }}{{ increment(counter, 'ip') }}
|
||||
{% endif %}
|
||||
IP.{{ counter["ip"] }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}{{ increment(counter, 'ip') }}
|
||||
{% endfor %}
|
||||
{% set idx = groups['etcd'] | length | int * 2 + 1 %}
|
||||
IP.{{ idx }} = 127.0.0.1
|
||||
IP.{{ counter["ip"] }} = 127.0.0.1
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
- name: Kubernetes Apps | Delete old CoreDNS resources
|
||||
kube:
|
||||
name: "coredns"
|
||||
namespace: "{{ system_namespace }}"
|
||||
namespace: "kube-system"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
resource: "{{ item }}"
|
||||
state: absent
|
||||
|
@ -16,7 +16,7 @@
|
|||
- name: Kubernetes Apps | Delete kubeadm CoreDNS
|
||||
kube:
|
||||
name: "coredns"
|
||||
namespace: "{{ system_namespace }}"
|
||||
namespace: "kube-system"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
resource: "deploy"
|
||||
state: absent
|
||||
|
@ -28,7 +28,7 @@
|
|||
- name: Kubernetes Apps | Delete old KubeDNS resources
|
||||
kube:
|
||||
name: "kube-dns"
|
||||
namespace: "{{ system_namespace }}"
|
||||
namespace: "kube-system"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
resource: "{{ item }}"
|
||||
state: absent
|
||||
|
@ -41,7 +41,7 @@
|
|||
- name: Kubernetes Apps | Delete kubeadm KubeDNS
|
||||
kube:
|
||||
name: "kube-dns"
|
||||
namespace: "{{ system_namespace }}"
|
||||
namespace: "kube-system"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
resource: "{{ item }}"
|
||||
state: absent
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
- name: Kubernetes Apps | Start dashboard
|
||||
kube:
|
||||
name: "{{ item.item.name }}"
|
||||
namespace: "{{ system_namespace }}"
|
||||
namespace: "kube-system"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
resource: "{{ item.item.type }}"
|
||||
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
- name: Kubernetes Apps | Start Resources
|
||||
kube:
|
||||
name: "{{ item.item.name }}"
|
||||
namespace: "{{ system_namespace }}"
|
||||
namespace: "kube-system"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
resource: "{{ item.item.type }}"
|
||||
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||
|
@ -50,6 +50,10 @@
|
|||
- dns_mode != 'none'
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
- not item|skipped
|
||||
register: resource_result
|
||||
until: resource_result|succeeded
|
||||
retries: 4
|
||||
delay: 5
|
||||
tags:
|
||||
- dnsmasq
|
||||
|
||||
|
|
|
@ -15,4 +15,4 @@ roleRef:
|
|||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: coredns
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: v1
|
|||
kind: ConfigMap
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1
|
|||
kind: Deployment
|
||||
metadata:
|
||||
name: coredns{{ coredns_ordinal_suffix | default('') }}
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
|
||||
kubernetes.io/cluster-service: "true"
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: v1
|
|||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: v1
|
|||
kind: Service
|
||||
metadata:
|
||||
name: coredns{{ coredns_ordinal_suffix | default('') }}
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: coredns{{ coredns_ordinal_suffix | default('') }}
|
||||
kubernetes.io/cluster-service: "true"
|
||||
|
|
|
@ -25,7 +25,7 @@ metadata:
|
|||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
@ -37,7 +37,7 @@ metadata:
|
|||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Role & Role Binding ------------------- #
|
||||
|
@ -46,7 +46,7 @@ kind: Role
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
rules:
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
|
||||
- apiGroups: [""]
|
||||
|
@ -81,7 +81,7 @@ apiVersion: rbac.authorization.k8s.io/v1
|
|||
kind: RoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
|
@ -89,7 +89,7 @@ roleRef:
|
|||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Gross Hack For anonymous auth through api proxy ------------------- #
|
||||
|
@ -103,7 +103,7 @@ rules:
|
|||
resources: ["services/proxy"]
|
||||
resourceNames: ["https:kubernetes-dashboard:"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
- nonResourceURLs: ["/ui", "/ui/*", "/api/v1/namespaces/{{ system_namespace }}/services/https:kubernetes-dashboard:/proxy/*"]
|
||||
- nonResourceURLs: ["/ui", "/ui/*", "/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/*"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
|
||||
---
|
||||
|
@ -128,7 +128,7 @@ metadata:
|
|||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
|
@ -200,7 +200,7 @@ metadata:
|
|||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
|
|
|
@ -17,7 +17,7 @@ kind: ClusterRole
|
|||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: cluster-proportional-autoscaler
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
|
|
|
@ -17,11 +17,11 @@ kind: ClusterRoleBinding
|
|||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: cluster-proportional-autoscaler
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cluster-proportional-autoscaler
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: cluster-proportional-autoscaler
|
||||
|
|
|
@ -17,4 +17,4 @@ kind: ServiceAccount
|
|||
apiVersion: v1
|
||||
metadata:
|
||||
name: cluster-proportional-autoscaler
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
|
|
|
@ -17,7 +17,7 @@ apiVersion: extensions/v1beta1
|
|||
kind: Deployment
|
||||
metadata:
|
||||
name: kubedns-autoscaler
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kubedns-autoscaler
|
||||
kubernetes.io/cluster-service: "true"
|
||||
|
@ -40,7 +40,7 @@ spec:
|
|||
memory: "10Mi"
|
||||
command:
|
||||
- /cluster-proportional-autoscaler
|
||||
- --namespace={{ system_namespace }}
|
||||
- --namespace=kube-system
|
||||
- --configmap=kubedns-autoscaler
|
||||
# Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
|
||||
- --target=Deployment/kube-dns
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1
|
|||
kind: Deployment
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: "{{system_namespace}}"
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
|
|
|
@ -3,6 +3,6 @@ apiVersion: v1
|
|||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: v1
|
|||
kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
|
|
|
@ -126,32 +126,3 @@
|
|||
- kube_version | version_compare('v1.9.3', '<=')
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
tags: vsphere
|
||||
|
||||
# This is not a cluster role, but should be run after kubeconfig is set on master
|
||||
- name: Write kube system namespace manifest
|
||||
template:
|
||||
src: namespace.j2
|
||||
dest: "{{kube_config_dir}}/{{system_namespace}}-ns.yml"
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
tags:
|
||||
- apps
|
||||
|
||||
- name: Check if kube system namespace exists
|
||||
command: "{{ bin_dir }}/kubectl get ns {{system_namespace}}"
|
||||
register: 'kubesystem'
|
||||
changed_when: False
|
||||
failed_when: False
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
tags:
|
||||
- apps
|
||||
|
||||
- name: Create kube system namespace
|
||||
command: "{{ bin_dir }}/kubectl create -f {{kube_config_dir}}/{{system_namespace}}-ns.yml"
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
register: create_system_ns
|
||||
until: create_system_ns.rc == 0
|
||||
changed_when: False
|
||||
when: inventory_hostname == groups['kube-master'][0] and kubesystem.rc != 0
|
||||
tags:
|
||||
- apps
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: "{{system_namespace}}"
|
||||
name: "kube-system"
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
when: rbac_enabled
|
||||
|
||||
- name: "ElasticSearch | Create Serviceaccount and Clusterrolebinding (RBAC)"
|
||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }} -n {{ system_namespace }}"
|
||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/{{ item }} -n kube-system"
|
||||
with_items:
|
||||
- "efk-sa.yml"
|
||||
- "efk-clusterrolebinding.yml"
|
||||
|
@ -24,7 +24,7 @@
|
|||
register: es_deployment_manifest
|
||||
|
||||
- name: "ElasticSearch | Create ES deployment"
|
||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-deployment.yaml -n {{ system_namespace }}"
|
||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-deployment.yaml -n kube-system"
|
||||
run_once: true
|
||||
when: es_deployment_manifest.changed
|
||||
|
||||
|
@ -35,6 +35,6 @@
|
|||
register: es_service_manifest
|
||||
|
||||
- name: "ElasticSearch | Create ES service"
|
||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-service.yaml -n {{ system_namespace }}"
|
||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-service.yaml -n kube-system"
|
||||
run_once: true
|
||||
when: es_service_manifest.changed
|
||||
|
|
|
@ -3,11 +3,11 @@ kind: ClusterRoleBinding
|
|||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: efk
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: efk
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
|
|
|
@ -3,6 +3,6 @@ apiVersion: v1
|
|||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: efk
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
|
|
|
@ -4,7 +4,7 @@ apiVersion: extensions/v1beta1
|
|||
kind: Deployment
|
||||
metadata:
|
||||
name: elasticsearch-logging-v1
|
||||
namespace: "{{ system_namespace }}"
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: elasticsearch-logging
|
||||
version: "{{ elasticsearch_image_tag }}"
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: v1
|
|||
kind: Service
|
||||
metadata:
|
||||
name: elasticsearch-logging
|
||||
namespace: "{{ system_namespace }}"
|
||||
namespace: "kube-system"
|
||||
labels:
|
||||
k8s-app: elasticsearch-logging
|
||||
kubernetes.io/cluster-service: "true"
|
||||
|
|
|
@ -17,6 +17,6 @@
|
|||
register: fluentd_ds_manifest
|
||||
|
||||
- name: "Fluentd | Create fluentd daemonset"
|
||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/fluentd-ds.yaml -n {{ system_namespace }}"
|
||||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/fluentd-ds.yaml -n kube-system"
|
||||
run_once: true
|
||||
when: fluentd_ds_manifest.changed
|
||||
|
|
|
@ -2,7 +2,7 @@ apiVersion: v1
|
|||
kind: ConfigMap
|
||||
metadata:
|
||||
name: fluentd-config
|
||||
namespace: "{{ system_namespace }}"
|
||||
namespace: "kube-system"
|
||||
data:
|
||||
{{ fluentd_config_file }}: |
|
||||
# This configuration file for Fluentd / td-agent is used
|
||||
|
|
|
@ -4,7 +4,7 @@ apiVersion: extensions/v1beta1
|
|||
kind: DaemonSet
|
||||
metadata:
|
||||
name: "fluentd-es-v{{ fluentd_version }}"
|
||||
namespace: "{{ system_namespace }}"
|
||||
namespace: "kube-system"
|
||||
labels:
|
||||
k8s-app: fluentd-es
|
||||
kubernetes.io/cluster-service: "true"
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
filename: "{{kube_config_dir}}/kibana-deployment.yaml"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
name: "kibana-logging"
|
||||
namespace: "{{system_namespace}}"
|
||||
namespace: "kube-system"
|
||||
resource: "deployment"
|
||||
state: "latest"
|
||||
with_items: "{{ kibana_deployment_manifest.changed }}"
|
||||
|
@ -27,7 +27,7 @@
|
|||
filename: "{{kube_config_dir}}/kibana-service.yaml"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
name: "kibana-logging"
|
||||
namespace: "{{system_namespace}}"
|
||||
namespace: "kube-system"
|
||||
resource: "svc"
|
||||
state: "latest"
|
||||
with_items: "{{ kibana_service_manifest.changed }}"
|
||||
|
|
|
@ -4,7 +4,7 @@ apiVersion: extensions/v1beta1
|
|||
kind: Deployment
|
||||
metadata:
|
||||
name: kibana-logging
|
||||
namespace: "{{ system_namespace }}"
|
||||
namespace: "kube-system"
|
||||
labels:
|
||||
k8s-app: kibana-logging
|
||||
kubernetes.io/cluster-service: "true"
|
||||
|
|
|
@ -3,7 +3,7 @@ apiVersion: v1
|
|||
kind: Service
|
||||
metadata:
|
||||
name: kibana-logging
|
||||
namespace: "{{ system_namespace }}"
|
||||
namespace: "kube-system"
|
||||
labels:
|
||||
k8s-app: kibana-logging
|
||||
kubernetes.io/cluster-service: "true"
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
cephfs_provisioner_image_repo: quay.io/kubespray/cephfs-provisioner
|
||||
cephfs_provisioner_image_tag: 92295a30
|
||||
|
||||
cephfs_provisioner_namespace: "{{ system_namespace }}"
|
||||
cephfs_provisioner_namespace: "kube-system"
|
||||
cephfs_provisioner_cluster: ceph
|
||||
cephfs_provisioner_monitors: []
|
||||
cephfs_provisioner_admin_id: admin
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
local_volume_provisioner_image_repo: quay.io/external_storage/local-volume-provisioner
|
||||
local_volume_provisioner_image_tag: v2.0.0
|
||||
|
||||
local_volume_provisioner_namespace: "{{ system_namespace }}"
|
||||
local_volume_provisioner_namespace: "kube-system"
|
||||
local_volume_provisioner_base_dir: /mnt/disks
|
||||
local_volume_provisioner_mount_dir: /mnt/disks
|
||||
local_volume_provisioner_storage_class: local-storage
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
- name: Helm | Apply Helm Manifests (RBAC)
|
||||
kube:
|
||||
name: "{{item.item.name}}"
|
||||
namespace: "{{ system_namespace }}"
|
||||
namespace: "kube-system"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
resource: "{{item.item.type}}"
|
||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||
|
@ -28,7 +28,7 @@
|
|||
|
||||
- name: Helm | Install/upgrade helm
|
||||
command: >
|
||||
{{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }} --tiller-namespace={{ system_namespace }}
|
||||
{{ bin_dir }}/helm init --upgrade --tiller-image={{ tiller_image_repo }}:{{ tiller_image_tag }} --tiller-namespace=kube-system
|
||||
{% if helm_skip_refresh %} --skip-refresh{% endif %}
|
||||
{% if helm_stable_repo_url is defined %} --stable-repo-url {{ helm_stable_repo_url }}{% endif %}
|
||||
{% if rbac_enabled %} --service-account=tiller{% endif %}
|
||||
|
|
|
@ -3,11 +3,11 @@ kind: ClusterRoleBinding
|
|||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: tiller
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: tiller
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
|
|
|
@ -3,6 +3,6 @@ apiVersion: v1
|
|||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: tiller
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
Deployment files
|
||||
================
|
||||
|
||||
This directory contains example deployment manifests for cert-manager that can
|
||||
be used in place of the official Helm chart.
|
||||
|
||||
This is useful if you are deploying cert-manager into an environment without
|
||||
Helm, or want to inspect a 'bare minimum' deployment.
|
||||
|
||||
Where do these come from?
|
||||
-------------------------
|
||||
|
||||
The manifests in these subdirectories are generated from the Helm chart
|
||||
automatically. The `values.yaml` files used to configure cert-manager can be
|
||||
found in [`hack/deploy`](../../hack/deploy/).
|
||||
|
||||
They are automatically generated by running `./hack/update-deploy-gen.sh`.
|
|
@ -0,0 +1,6 @@
|
|||
---
|
||||
cert_manager_namespace: "cert-manager"
|
||||
cert_manager_cpu_requests: 10m
|
||||
cert_manager_cpu_limits: 30m
|
||||
cert_manager_memory_requests: 32Mi
|
||||
cert_manager_memory_limits: 200Mi
|
|
@ -0,0 +1,38 @@
|
|||
---
|
||||
|
||||
- name: Cert Manager | Create addon dir
|
||||
file:
|
||||
path: "{{ kube_config_dir }}/addons/cert_manager"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0755
|
||||
|
||||
- name: Cert Manager | Create manifests
|
||||
template:
|
||||
src: "{{ item.file }}.j2"
|
||||
dest: "{{ kube_config_dir }}/addons/cert_manager/{{ item.file }}"
|
||||
with_items:
|
||||
- { name: cert-manager-ns, file: cert-manager-ns.yml, type: ns }
|
||||
- { name: cert-manager-sa, file: cert-manager-sa.yml, type: sa }
|
||||
- { name: cert-manager-clusterrole, file: cert-manager-clusterrole.yml, type: clusterrole }
|
||||
- { name: cert-manager-clusterrolebinding, file: cert-manager-clusterrolebinding.yml, type: clusterrolebinding }
|
||||
- { name: cert-manager-issuer-crd, file: cert-manager-issuer-crd.yml, type: crd }
|
||||
- { name: cert-manager-clusterissuer-crd, file: cert-manager-clusterissuer-crd.yml, type: crd }
|
||||
- { name: cert-manager-certificate-crd, file: cert-manager-certificate-crd.yml, type: crd }
|
||||
- { name: cert-manager-deploy, file: cert-manager-deploy.yml, type: deploy }
|
||||
register: cert_manager_manifests
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: Cert Manager | Apply manifests
|
||||
kube:
|
||||
name: "{{ item.item.name }}"
|
||||
namespace: "{{ cert_manager_namespace }}"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
resource: "{{ item.item.type }}"
|
||||
filename: "{{ kube_config_dir }}/addons/cert_manager/{{ item.item.file }}"
|
||||
state: "latest"
|
||||
with_items: "{{ cert_manager_manifests.results }}"
|
||||
when:
|
||||
- inventory_hostname == groups['kube-master'][0]
|
|
@ -0,0 +1,21 @@
|
|||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: certificates.certmanager.k8s.io
|
||||
labels:
|
||||
app: cert-manager
|
||||
chart: cert-manager-0.2.5
|
||||
release: cert-manager
|
||||
heritage: Tiller
|
||||
spec:
|
||||
group: certmanager.k8s.io
|
||||
version: v1alpha1
|
||||
scope: Namespaced
|
||||
names:
|
||||
kind: Certificate
|
||||
plural: certificates
|
||||
shortNames:
|
||||
- cert
|
||||
- certs
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: clusterissuers.certmanager.k8s.io
|
||||
labels:
|
||||
app: cert-manager
|
||||
chart: cert-manager-0.2.5
|
||||
release: cert-manager
|
||||
heritage: Tiller
|
||||
spec:
|
||||
group: certmanager.k8s.io
|
||||
version: v1alpha1
|
||||
names:
|
||||
kind: ClusterIssuer
|
||||
plural: clusterissuers
|
||||
scope: Cluster
|
|
@ -0,0 +1,25 @@
|
|||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: cert-manager
|
||||
labels:
|
||||
app: cert-manager
|
||||
chart: cert-manager-0.2.5
|
||||
release: cert-manager
|
||||
heritage: Tiller
|
||||
rules:
|
||||
- apiGroups: ["certmanager.k8s.io"]
|
||||
resources: ["certificates", "issuers", "clusterissuers"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: [""]
|
||||
# TODO: remove endpoints once 0.4 is released. We include it here in case
|
||||
# users use the 'master' version of the Helm chart with a 0.2.x release of
|
||||
# cert-manager that still performs leader election with Endpoint resources.
|
||||
# We advise users don't do this, but some will anyway and this will reduce
|
||||
# friction.
|
||||
resources: ["endpoints", "configmaps", "secrets", "events", "services", "pods"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["extensions"]
|
||||
resources: ["ingresses"]
|
||||
verbs: ["*"]
|
|
@ -0,0 +1,18 @@
|
|||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cert-manager
|
||||
labels:
|
||||
app: cert-manager
|
||||
chart: cert-manager-0.2.5
|
||||
release: cert-manager
|
||||
heritage: Tiller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cert-manager
|
||||
subjects:
|
||||
- name: cert-manager
|
||||
namespace: {{ cert_manager_namespace }}
|
||||
kind: ServiceAccount
|
|
@ -0,0 +1,51 @@
|
|||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: cert-manager
|
||||
namespace: {{ cert_manager_namespace }}
|
||||
labels:
|
||||
app: cert-manager
|
||||
chart: cert-manager-0.2.5
|
||||
release: cert-manager
|
||||
heritage: Tiller
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: cert-manager
|
||||
release: cert-manager
|
||||
annotations:
|
||||
spec:
|
||||
serviceAccountName: cert-manager
|
||||
containers:
|
||||
- name: cert-manager
|
||||
image: {{ cert_manager_controller_image_repo }}:{{ cert_manager_controller_image_tag }}
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
args:
|
||||
- --cluster-resource-namespace=$(POD_NAMESPACE)
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
resources:
|
||||
requests:
|
||||
cpu: {{ cert_manager_cpu_requests }}
|
||||
memory: {{ cert_manager_memory_requests }}
|
||||
limits:
|
||||
cpu: {{ cert_manager_cpu_limits }}
|
||||
memory: {{ cert_manager_memory_limits }}
|
||||
|
||||
- name: ingress-shim
|
||||
image: {{ cert_manager_ingress_shim_image_repo }}:{{ cert_manager_ingress_shim_image_tag }}
|
||||
imagePullPolicy: {{ k8s_image_pull_policy }}
|
||||
resources:
|
||||
requests:
|
||||
cpu: {{ cert_manager_cpu_requests }}
|
||||
memory: {{ cert_manager_memory_requests }}
|
||||
limits:
|
||||
cpu: {{ cert_manager_cpu_limits }}
|
||||
memory: {{ cert_manager_memory_limits }}
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: issuers.certmanager.k8s.io
|
||||
labels:
|
||||
app: cert-manager
|
||||
chart: cert-manager-0.2.5
|
||||
release: cert-manager
|
||||
heritage: Tiller
|
||||
spec:
|
||||
group: certmanager.k8s.io
|
||||
version: v1alpha1
|
||||
names:
|
||||
kind: Issuer
|
||||
plural: issuers
|
||||
scope: Namespaced
|
|
@ -0,0 +1,7 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: {{ cert_manager_namespace }}
|
||||
labels:
|
||||
name: {{ cert_manager_namespace }}
|
|
@ -0,0 +1,11 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cert-manager
|
||||
namespace: {{ cert_manager_namespace }}
|
||||
labels:
|
||||
app: cert-manager
|
||||
chart: cert-manager-0.2.5
|
||||
release: cert-manager
|
||||
heritage: Tiller
|
|
@ -20,6 +20,9 @@ spec:
|
|||
labels:
|
||||
k8s-app: ingress-nginx
|
||||
version: v{{ ingress_nginx_controller_image_tag }}
|
||||
annotations:
|
||||
prometheus.io/port: '10254'
|
||||
prometheus.io/scrape: 'true'
|
||||
spec:
|
||||
{% if ingress_nginx_host_network %}
|
||||
hostNetwork: true
|
||||
|
@ -78,3 +81,4 @@ spec:
|
|||
{% if rbac_enabled %}
|
||||
serviceAccountName: ingress-nginx
|
||||
{% endif %}
|
||||
|
||||
|
|
|
@ -6,3 +6,10 @@ dependencies:
|
|||
- apps
|
||||
- ingress-nginx
|
||||
- ingress-controller
|
||||
|
||||
- role: kubernetes-apps/ingress_controller/cert_manager
|
||||
when: cert_manager_enabled
|
||||
tags:
|
||||
- apps
|
||||
- cert-manager
|
||||
- ingress-controller
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
- name: Start Calico resources
|
||||
kube:
|
||||
name: "{{item.item.name}}"
|
||||
namespace: "{{ system_namespace }}"
|
||||
namespace: "kube-system"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
resource: "{{item.item.type}}"
|
||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
- name: Canal | Start Resources
|
||||
kube:
|
||||
name: "{{item.item.name}}"
|
||||
namespace: "{{ system_namespace }}"
|
||||
namespace: "kube-system"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
resource: "{{item.item.type}}"
|
||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
- name: Cilium | Start Resources
|
||||
kube:
|
||||
name: "{{item.item.name}}"
|
||||
namespace: "{{ system_namespace }}"
|
||||
namespace: "kube-system"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
resource: "{{item.item.type}}"
|
||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||
|
@ -11,7 +11,7 @@
|
|||
when: inventory_hostname == groups['kube-master'][0] and not item|skipped
|
||||
|
||||
- name: Cilium | Wait for pods to run
|
||||
command: "{{bin_dir}}/kubectl -n {{system_namespace}} get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"
|
||||
command: "{{bin_dir}}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'"
|
||||
register: pods_not_ready
|
||||
until: pods_not_ready.stdout.find("cilium")==-1
|
||||
retries: 30
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
- name: Contiv | Create Kubernetes resources
|
||||
kube:
|
||||
name: "{{ item.item.name }}"
|
||||
namespace: "{{ system_namespace }}"
|
||||
namespace: "kube-system"
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
resource: "{{ item.item.type }}"
|
||||
filename: "{{ contiv_config_dir }}/{{ item.item.file }}"
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
- name: Flannel | Start Resources
|
||||
kube:
|
||||
name: "{{item.item.name}}"
|
||||
namespace: "{{ system_namespace }}"
|
||||
namespace: "kube-system"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
resource: "{{item.item.type}}"
|
||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "{{ kube_config_dir }}/weave-net.yml"
|
||||
resource: "ds"
|
||||
namespace: "{{system_namespace}}"
|
||||
namespace: "kube-system"
|
||||
state: "latest"
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
name: calico-policy-controller
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
resource: rs
|
||||
namespace: "{{ system_namespace }}"
|
||||
namespace: "kube-system"
|
||||
state: absent
|
||||
run_once: true
|
||||
|
||||
|
@ -32,7 +32,7 @@
|
|||
- name: Start of Calico kube controllers
|
||||
kube:
|
||||
name: "{{item.item.name}}"
|
||||
namespace: "{{ system_namespace }}"
|
||||
namespace: "kube-system"
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
resource: "{{item.item.type}}"
|
||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
||||
|
|
|
@ -2,7 +2,7 @@ apiVersion: apps/v1beta2
|
|||
kind: Deployment
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-kube-controllers
|
||||
kubernetes.io/cluster-service: "true"
|
||||
|
@ -15,7 +15,7 @@ spec:
|
|||
template:
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
k8s-app: calico-kube-controllers
|
||||
|
|
|
@ -3,7 +3,7 @@ kind: ClusterRole
|
|||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
|
|
|
@ -10,4 +10,4 @@ roleRef:
|
|||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-kube-controllers
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
|
|
|
@ -3,6 +3,6 @@ apiVersion: v1
|
|||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
|
|
|
@ -4,6 +4,6 @@ registry_image_tag: 2.6
|
|||
registry_proxy_image_repo: gcr.io/google_containers/kube-registry-proxy
|
||||
registry_proxy_image_tag: 0.4
|
||||
|
||||
registry_namespace: "{{ system_namespace }}"
|
||||
registry_namespace: "kube-system"
|
||||
registry_storage_class: ""
|
||||
registry_disk_size: "10Gi"
|
||||
|
|
|
@ -44,5 +44,5 @@
|
|||
when: needs_rotation
|
||||
|
||||
- name: Rotate Tokens | Delete pods in system namespace
|
||||
command: "{{ bin_dir }}/kubectl delete pods -n {{ system_namespace }} --all"
|
||||
command: "{{ bin_dir }}/kubectl delete pods -n kube-system --all"
|
||||
when: needs_rotation
|
||||
|
|
|
@ -96,4 +96,5 @@ volume_cross_zone_attachment: false
|
|||
## Encrypting Secret Data at Rest
|
||||
kube_encrypt_secret_data: false
|
||||
kube_encrypt_token: "{{ lookup('password', inventory_dir + '/credentials/kube_encrypt_token.creds length=32 chars=ascii_letters,digits') }}"
|
||||
kube_encryption_algorithm: "aescbc" # Must be either: aescbc, secretbox or aesgcm
|
||||
# Must be either: aescbc, secretbox or aesgcm
|
||||
kube_encryption_algorithm: "aescbc"
|
||||
|
|
|
@ -9,4 +9,6 @@
|
|||
- {src: apiserver-key.pem, dest: apiserver.key}
|
||||
- {src: ca.pem, dest: ca.crt}
|
||||
- {src: ca-key.pem, dest: ca.key}
|
||||
- {src: service-account-key.pem, dest: sa.pub}
|
||||
- {src: service-account-key.pem, dest: sa.key}
|
||||
register: kubeadm_copy_old_certs
|
||||
|
|
|
@ -30,4 +30,7 @@
|
|||
with_items:
|
||||
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
||||
when: kube_apiserver_manifest_replaced.changed
|
||||
run_once: true
|
||||
register: remove_master_container
|
||||
retries: 4
|
||||
until: remove_master_container.rc == 0
|
||||
delay: 5
|
|
@ -38,7 +38,7 @@ apiServerExtraArgs:
|
|||
apiserver-count: "{{ kube_apiserver_count }}"
|
||||
{% if kube_version | version_compare('v1.9', '>=') %}
|
||||
endpoint-reconciler-type: lease
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
service-node-port-range: {{ kube_apiserver_node_port_range }}
|
||||
kubelet-preferred-address-types: "{{ kubelet_preferred_address_types }}"
|
||||
{% if kube_basic_auth|default(true) %}
|
||||
|
@ -90,3 +90,6 @@ apiServerCertSANs:
|
|||
{% endfor %}
|
||||
certificatesDir: {{ kube_config_dir }}/ssl
|
||||
unifiedControlPlaneImage: "{{ hyperkube_image_repo }}:{{ hyperkube_image_tag }}"
|
||||
{% if kube_override_hostname|default('') %}
|
||||
nodeName: {{ kube_override_hostname }}
|
||||
{% endif %}
|
||||
|
|
|
@ -2,7 +2,7 @@ apiVersion: v1
|
|||
kind: Pod
|
||||
metadata:
|
||||
name: kube-apiserver
|
||||
namespace: {{system_namespace}}
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-apiserver
|
||||
kubespray: v2
|
||||
|
@ -63,7 +63,7 @@ spec:
|
|||
{% if kube_token_auth|default(true) %}
|
||||
- --token-auth-file={{ kube_token_dir }}/known_tokens.csv
|
||||
{% endif %}
|
||||
- --service-account-key-file={{ kube_cert_dir }}/apiserver-key.pem
|
||||
- --service-account-key-file={{ kube_cert_dir }}/service-account-key.pem
|
||||
{% if kube_oidc_auth|default(false) and kube_oidc_url is defined and kube_oidc_client_id is defined %}
|
||||
- --oidc-issuer-url={{ kube_oidc_url }}
|
||||
- --oidc-client-id={{ kube_oidc_client_id }}
|
||||
|
|
|
@ -2,7 +2,7 @@ apiVersion: v1
|
|||
kind: Pod
|
||||
metadata:
|
||||
name: kube-controller-manager
|
||||
namespace: {{system_namespace}}
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-controller-manager
|
||||
annotations:
|
||||
|
@ -29,7 +29,7 @@ spec:
|
|||
- controller-manager
|
||||
- --kubeconfig={{ kube_config_dir }}/kube-controller-manager-kubeconfig.yaml
|
||||
- --leader-elect=true
|
||||
- --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem
|
||||
- --service-account-private-key-file={{ kube_cert_dir }}/service-account-key.pem
|
||||
- --root-ca-file={{ kube_cert_dir }}/ca.pem
|
||||
- --cluster-signing-cert-file={{ kube_cert_dir }}/ca.pem
|
||||
- --cluster-signing-key-file={{ kube_cert_dir }}/ca-key.pem
|
||||
|
|
|
@ -2,7 +2,7 @@ apiVersion: v1
|
|||
kind: Pod
|
||||
metadata:
|
||||
name: kube-scheduler
|
||||
namespace: {{ system_namespace }}
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-scheduler
|
||||
annotations:
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
---
|
||||
namespace_kubesystem:
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: "{{system_namespace}}"
|
|
@ -134,6 +134,19 @@
|
|||
tags:
|
||||
- kube-proxy
|
||||
|
||||
- name: Write cloud-config
|
||||
template:
|
||||
src: "{{ cloud_provider }}-cloud-config.j2"
|
||||
dest: "{{ kube_config_dir }}/cloud_config"
|
||||
group: "{{ kube_cert_group }}"
|
||||
mode: 0640
|
||||
when:
|
||||
- cloud_provider is defined
|
||||
- cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
|
||||
notify: restart kubelet
|
||||
tags:
|
||||
- cloud-provider
|
||||
|
||||
# reload-systemd
|
||||
- meta: flush_handlers
|
||||
|
||||
|
|
|
@ -81,18 +81,26 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
|
|||
{% endif %}
|
||||
|
||||
{# Kubelet node labels #}
|
||||
{% set role_node_labels = [] %}
|
||||
{% if inventory_hostname in groups['kube-master'] %}
|
||||
{% set node_labels %}--node-labels=node-role.kubernetes.io/master=true{% endset %}
|
||||
{% do role_node_labels.append('node-role.kubernetes.io/master=true') %}
|
||||
{% if not standalone_kubelet|bool %}
|
||||
{% set node_labels %}{{ node_labels }},node-role.kubernetes.io/node=true{% endset %}
|
||||
{% do role_node_labels.append('node-role.kubernetes.io/node=true') %}
|
||||
{% endif %}
|
||||
{% elif inventory_hostname in groups['kube-ingress']|default([]) %}
|
||||
{% set node_labels %}--node-labels=node-role.kubernetes.io/ingress=true{% endset %}
|
||||
{% do role_node_labels.append('node-role.kubernetes.io/ingress=true') %}
|
||||
{% else %}
|
||||
{% set node_labels %}--node-labels=node-role.kubernetes.io/node=true{% endset %}
|
||||
{% do role_node_labels.append('node-role.kubernetes.io/node=true') %}
|
||||
{% endif %}
|
||||
{% set inventory_node_labels = [] %}
|
||||
{% if node_labels is defined %}
|
||||
{% for labelname, labelvalue in node_labels.iteritems() %}
|
||||
{% do inventory_node_labels.append(labelname + '=' + labelvalue) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% set all_node_labels = role_node_labels + inventory_node_labels %}
|
||||
|
||||
KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ kube_reserved }} {{ node_labels }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}"
|
||||
KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ kube_reserved }} --node-labels={{ all_node_labels | join(',') }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}"
|
||||
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave", "contiv", "cilium"] %}
|
||||
KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
|
||||
{% elif kube_network_plugin is defined and kube_network_plugin == "weave" %}
|
||||
|
|
|
@ -2,7 +2,7 @@ apiVersion: v1
|
|||
kind: Pod
|
||||
metadata:
|
||||
name: kube-proxy
|
||||
namespace: {{system_namespace}}
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-proxy
|
||||
annotations:
|
||||
|
@ -48,7 +48,6 @@ spec:
|
|||
{% elif kube_proxy_mode == 'ipvs' %}
|
||||
- --masquerade-all
|
||||
- --feature-gates=SupportIPVSProxyMode=true
|
||||
- --proxy-mode=ipvs
|
||||
- --ipvs-min-sync-period=5s
|
||||
- --ipvs-sync-period=5s
|
||||
- --ipvs-scheduler=rr
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue