c12s-kubespray/roles/download/defaults/main.yml

614 lines
22 KiB
YAML
Raw Normal View History

2015-12-31 13:07:02 +00:00
---
local_release_dir: /tmp/releases
2015-12-31 13:07:02 +00:00
# Used to only evaluate vars from download role
skip_downloads: false
# if this is set to true will only download files once. Doesn't work
# on Container Linux by CoreOS unless the download_localhost is true and localhost
# is running another OS type. Default compress level is 1 (fastest).
download_run_once: False
download_compress: 1
# if this is set to true will download container
download_container: True
# if this is set to true, uses the localhost for download_run_once mode
# (requires docker and sudo to access docker). You may want this option for
# local caching of docker images or for Container Linux by CoreOS cluster nodes.
# Otherwise, uses the first node in the kube-master group to store images
# in the download_run_once mode.
download_localhost: False
# Always pull images if set to True. Otherwise check by the repo's tag/digest.
download_always_pull: False
# Some problems may occur when downloading files over https proxy due to ansible bug
# https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable
# SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
download_validate_certs: True
# Use the first kube-master if download_localhost is not set
download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube-master'][0]}}{% endif %}"
# Arch of Docker images and needed packages
2018-08-20 14:07:27 +00:00
image_arch: "{{host_architecture | default('amd64')}}"
# Versions
kube_version: v1.12.1
kubeadm_version: "{{ kube_version }}"
etcd_version: v3.2.24
# kubernetes image repo define
kube_image_repo: "gcr.io/google-containers"
# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
# after migration to container download
calico_version: "v3.1.3"
calico_ctl_version: "v3.1.3"
calico_cni_version: "v3.1.3"
calico_policy_version: "v3.1.3"
calico_rr_version: "v0.6.1"
2018-03-21 00:59:51 +00:00
flannel_version: "v0.10.0"
2017-10-26 09:18:06 +00:00
flannel_cni_version: "v0.3.0"
vault_version: 0.10.1
weave_version: "2.4.1"
2018-09-06 06:15:51 +00:00
pod_infra_version: 3.1
2018-09-17 14:45:05 +00:00
contiv_version: 1.2.1
2018-08-22 02:52:24 +00:00
cilium_version: "v1.2.0"
kube_router_version: "v0.2.1"
2015-12-31 15:05:25 +00:00
# Download URLs
kubeadm_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm"
vault_download_url: "https://releases.hashicorp.com/vault/{{ vault_version }}/vault_{{ vault_version }}_linux_{{ image_arch }}.zip"
2018-07-30 09:55:25 +00:00
etcd_download_url: "https://github.com/coreos/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
hyperkube_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64/hyperkube"
2015-12-31 15:05:25 +00:00
# Checksums
hyperkube_checksums:
v1.12.1: 4aa23cfb2fc2e2e4d0cbe0d83a648c38e4baabd6c66f5cdbbb40cbc7582fdc74
v1.12.0: f80336201f3152a5307c01f8a7206847398dde15c69b3d20c76a7d9520b60daf
v1.11.3: dac8da16dd6688e52b5dc510f5dd0a20b54350d52fb27ceba2f018ba2c8be692
v1.11.2: d727f8cae3fc26b1add9b4ff0d4d9b99605544ff7fb3baeecdca394362adbfb8
v1.11.1: 019ce1ecf4c6a70c06a7f4ef107443351458b4d9e6b9ce4a436bfbfbef93feea
v1.11.0: 7e191c164dc2c942abd37e4b50846e0be31ca959afffeff6b034beacbc2a106a
v1.10.8: f8a68514a6c858089f44ec93b2ffb2d764ea67d3b02b19112348f73ffcfe4386
v1.10.7: 13e25eb39467014fd169f38b7cd6bec8ff55525b8001c7abba85957e6470b6cc
v1.10.6: 0daa34fa58470e5f20def10d3dd544922c28c558719d3338ad8c524154c91257
v1.10.5: 1a53456f9d33a7c07adb1636f20f1d0b92b8e7647063a70d0ce134a238e680fe
v1.10.4: 16e36693c15494036d930139a749ec1bc492b7fefa2c3adc1abbe8f38178ae7c
v1.10.3: e807753dc309635902a56069ee06fc390944ef034b72c53b2e1e51d0c9ead8a3
v1.10.2: 3843fb594a18c4a64d77736bab72000ec4b8c4ddf178e20ec3249f709e9ed9c1
v1.10.1: 6e0642ad6bae68dc81b8d1c9efa18e265e17e23da1895862823cafac08c0344c
v1.10.0: b5575b2fb4266754c1675b8cd5d9b6cac70f3fee7a05c4e80da3a9e83e58c57e
kubeadm_checksums:
v1.12.1: 5d95efd65aad398d85a9802799f36410ae7a95f9cbe73c8b10d2213c10a6d7be
v1.12.0: 463fb058b7fa2591fb01f29f2451b054f6cbaa0f8a20394b4a4eb5d68473176f
v1.11.3: 422a7a32ed9a7b1eaa2a4f9d121674dfbe80eb41e206092c13017d097f75aaec
v1.11.2: 6b17720a65b8ff46efe92a5544f149c39a221910d89939838d75581d4e6924c0
v1.11.1: 425ec24b95f7217ee06d1588aba22f206a5829f8c6a5352c2862368552361fe6
v1.11.0: 0000478fc59a24ec1727de744188d13c4d702a644954132efa9d9954371b3553
v1.10.8: 42660875dd94c93267bd2f567c67d692b362bd143d7502967a62c5474b2b25b8
v1.10.7: cdeb07fd3705e973800c4aa0b8a510d5dba1de8e1039428cfebdaf3d93e332b6
v1.10.6: e1d49a6b33b384f681468add2e9ee08552069ae0d6b0ad59e1c943ddbaeac3fa
v1.10.5: f231d4bcc9f2ed15597272e5359e380cc760c0b57a1f7cb97ce2bbab5df774e0
v1.10.4: 7e1169bbbeed973ab402941672dec957638dea5952a1e8bc89a37d5e709cc4b4
v1.10.3: b2a6f0764b89a4a13a3da4471af943ce98efeb29e2913c9e7880fe27f4f43a5f
v1.10.2: 394d7d340214c91d669186cf4f2110d8eb840ca965399b4d8b22d0545a60e377
v1.10.1: 012e48fb92b1c22543b12ab2db7d780777972043287404c98cca4d2c6ec964ec
v1.10.0: ebbac985834289037b544523c3e2f39bb44bea938aca9d9e88ef7e880fb8472f
etcd_binary_checksum: 947849dbcfa13927c81236fb76a7c01d587bbab42ab1e807184cd91b026ebed7
vault_binary_checksum: 3c4d70ba71619a43229e65c67830e30e050eab7a81ac6b28325ff707e5914188
hyperkube_binary_checksum: "{{ hyperkube_checksums[kube_version] }}"
kubeadm_binary_checksum: "{{ kubeadm_checksums[kubeadm_version] }}"
# Containers
# In some cases, we need a way to set --registry-mirror or --insecure-registry for docker,
# it helps a lot for local private development or bare metal environment.
# So you need define --registry-mirror or --insecure-registry, and modify the following url address.
# example:
# You need to deploy kubernetes cluster on local private development.
# Also provide the address of your own private registry.
# And use --insecure-registry options for docker
etcd_image_repo: "quay.io/coreos/etcd"
2018-08-20 14:07:27 +00:00
etcd_image_tag: "{{ etcd_version }}{%- if image_arch != 'amd64' -%}-{{ image_arch }}{%- endif -%}"
flannel_image_repo: "quay.io/coreos/flannel"
flannel_image_tag: "{{ flannel_version }}"
flannel_cni_image_repo: "quay.io/coreos/flannel-cni"
flannel_cni_image_tag: "{{ flannel_cni_version }}"
calicoctl_image_repo: "quay.io/calico/ctl"
calicoctl_image_tag: "{{ calico_ctl_version }}"
calico_node_image_repo: "quay.io/calico/node"
calico_node_image_tag: "{{ calico_version }}"
calico_cni_image_repo: "quay.io/calico/cni"
calico_cni_image_tag: "{{ calico_cni_version }}"
calico_policy_image_repo: "quay.io/calico/kube-controllers"
calico_policy_image_tag: "{{ calico_policy_version }}"
2017-04-12 01:52:04 +00:00
calico_rr_image_repo: "quay.io/calico/routereflector"
calico_rr_image_tag: "{{ calico_rr_version }}"
hyperkube_image_repo: "{{ kube_image_repo }}/hyperkube-{{ image_arch }}"
2018-03-22 08:07:28 +00:00
hyperkube_image_tag: "{{ kube_version }}"
pod_infra_image_repo: "gcr.io/google_containers/pause-{{ image_arch }}"
pod_infra_image_tag: "{{ pod_infra_version }}"
install_socat_image_repo: "xueshanf/install-socat"
install_socat_image_tag: "latest"
netcheck_version: "v1.2.2"
netcheck_agent_image_repo: "mirantis/k8s-netchecker-agent"
netcheck_agent_image_tag: "{{ netcheck_version }}"
netcheck_server_image_repo: "mirantis/k8s-netchecker-server"
netcheck_server_image_tag: "{{ netcheck_version }}"
weave_kube_image_repo: "docker.io/weaveworks/weave-kube"
2017-01-10 14:47:39 +00:00
weave_kube_image_tag: "{{ weave_version }}"
weave_npc_image_repo: "docker.io/weaveworks/weave-npc"
2017-01-10 14:47:39 +00:00
weave_npc_image_tag: "{{ weave_version }}"
contiv network support (#1914) * Add Contiv support Contiv is a network plugin for Kubernetes and Docker. It supports vlan/vxlan/BGP/Cisco ACI technologies. It support firewall policies, multiple networks and bridging pods onto physical networks. * Update contiv version to 1.1.4 Update contiv version to 1.1.4 and added SVC_SUBNET in contiv-config. * Load openvswitch module to workaround on CentOS7.4 * Set contiv cni version to 0.1.0 Correct contiv CNI version to 0.1.0. * Use kube_apiserver_endpoint for K8S_API_SERVER Use kube_apiserver_endpoint as K8S_API_SERVER to make contiv talks to a available endpoint no matter if there's a loadbalancer or not. * Make contiv use its own etcd Before this commit, contiv is using a etcd proxy mode to k8s etcd, this work fine when the etcd hosts are co-located with contiv etcd proxy, however the k8s peering certs are only in etcd group, as a result the etcd-proxy is not able to peering with the k8s etcd on etcd group, plus the netplugin is always trying to find the etcd endpoint on localhost, this will cause problem for all netplugins not runnign on etcd group nodes. This commit make contiv uses its own etcd, separate from k8s one. on kube-master nodes (where net-master runs), it will run as leader mode and on all rest nodes it will run as proxy mode. * Use cp instead of rsync to copy cni binaries Since rsync has been removed from hyperkube, this commit changes it to use cp instead. * Make contiv-etcd able to run on master nodes * Add rbac_enabled flag for contiv pods * Add contiv into CNI network plugin lists * migrate contiv test to tests/files Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com> * Add required rules for contiv netplugin * Better handling json return of fwdMode * Make contiv etcd port configurable * Use default var instead of templating * roles/download/defaults/main.yml: use contiv 1.1.7 Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com>
2017-11-29 14:24:16 +00:00
contiv_image_repo: "contiv/netplugin"
contiv_image_tag: "{{ contiv_version }}"
2018-09-17 14:45:05 +00:00
contiv_init_image_repo: "contiv/netplugin-init"
contiv_init_image_tag: "latest"
contiv network support (#1914) * Add Contiv support Contiv is a network plugin for Kubernetes and Docker. It supports vlan/vxlan/BGP/Cisco ACI technologies. It support firewall policies, multiple networks and bridging pods onto physical networks. * Update contiv version to 1.1.4 Update contiv version to 1.1.4 and added SVC_SUBNET in contiv-config. * Load openvswitch module to workaround on CentOS7.4 * Set contiv cni version to 0.1.0 Correct contiv CNI version to 0.1.0. * Use kube_apiserver_endpoint for K8S_API_SERVER Use kube_apiserver_endpoint as K8S_API_SERVER to make contiv talks to a available endpoint no matter if there's a loadbalancer or not. * Make contiv use its own etcd Before this commit, contiv is using a etcd proxy mode to k8s etcd, this work fine when the etcd hosts are co-located with contiv etcd proxy, however the k8s peering certs are only in etcd group, as a result the etcd-proxy is not able to peering with the k8s etcd on etcd group, plus the netplugin is always trying to find the etcd endpoint on localhost, this will cause problem for all netplugins not runnign on etcd group nodes. This commit make contiv uses its own etcd, separate from k8s one. on kube-master nodes (where net-master runs), it will run as leader mode and on all rest nodes it will run as proxy mode. * Use cp instead of rsync to copy cni binaries Since rsync has been removed from hyperkube, this commit changes it to use cp instead. * Make contiv-etcd able to run on master nodes * Add rbac_enabled flag for contiv pods * Add contiv into CNI network plugin lists * migrate contiv test to tests/files Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com> * Add required rules for contiv netplugin * Better handling json return of fwdMode * Make contiv etcd port configurable * Use default var instead of templating * roles/download/defaults/main.yml: use contiv 1.1.7 Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com>
2017-11-29 14:24:16 +00:00
contiv_auth_proxy_image_repo: "contiv/auth_proxy"
contiv_auth_proxy_image_tag: "{{ contiv_version }}"
contiv_etcd_init_image_repo: "ferest/etcd-initer"
contiv_etcd_init_image_tag: latest
2018-09-17 14:45:05 +00:00
contiv_ovs_image_repo: "contiv/ovs"
contiv_ovs_image_tag: "latest"
cilium_image_repo: "docker.io/cilium/cilium"
cilium_image_tag: "{{ cilium_version }}"
kube_router_image_repo: "cloudnativelabs/kube-router"
kube_router_image_tag: "{{ kube_router_version }}"
nginx_image_repo: nginx
nginx_image_tag: 1.13
2017-10-05 16:31:04 +00:00
dnsmasq_version: 2.78
dnsmasq_image_repo: "andyshinn/dnsmasq"
dnsmasq_image_tag: "{{ dnsmasq_version }}"
kubedns_version: 1.14.13
kubedns_image_repo: "gcr.io/google_containers/k8s-dns-kube-dns-{{ image_arch }}"
kubedns_image_tag: "{{ kubedns_version }}"
2018-08-21 15:53:23 +00:00
coredns_version: "1.2.2"
2018-08-21 15:53:23 +00:00
coredns_image_repo: "gcr.io/google-containers/coredns"
2018-08-23 14:47:18 +00:00
coredns_image_tag: "{{ coredns_version }}{%- if image_arch != 'amd64' -%}__{{ image_arch}}_linux{%- endif -%}"
2018-08-21 15:53:23 +00:00
2018-08-18 23:30:54 +00:00
dnsmasq_nanny_image_repo: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-{{ image_arch }}"
dnsmasq_nanny_image_tag: "{{ kubedns_version }}"
dnsmasq_sidecar_image_repo: "gcr.io/google_containers/k8s-dns-sidecar-{{ image_arch }}"
dnsmasq_sidecar_image_tag: "{{ kubedns_version }}"
dnsmasqautoscaler_version: 1.1.2
dnsmasqautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-autoscaler-{{ image_arch }}"
dnsmasqautoscaler_image_tag: "{{ dnsmasqautoscaler_version }}"
kubednsautoscaler_version: 1.2.0
kubednsautoscaler_image_repo: "gcr.io/google_containers/cluster-proportional-autoscaler-{{ image_arch }}"
kubednsautoscaler_image_tag: "{{ kubednsautoscaler_version }}"
test_image_repo: busybox
test_image_tag: latest
busybox_image_repo: busybox
busybox_image_tag: 1.29.2
2018-05-21 14:36:51 +00:00
helm_version: "v2.9.1"
2017-03-17 11:56:25 +00:00
helm_image_repo: "lachlanevenson/k8s-helm"
helm_image_tag: "{{ helm_version }}"
tiller_image_repo: "gcr.io/kubernetes-helm/tiller"
tiller_image_tag: "{{ helm_version }}"
vault_image_repo: "vault"
vault_image_tag: "{{ vault_version }}"
registry_image_repo: "registry"
registry_image_tag: "2.6"
registry_proxy_image_repo: "gcr.io/google_containers/kube-registry-proxy"
registry_proxy_image_tag: "0.4"
local_volume_provisioner_image_repo: "quay.io/external_storage/local-volume-provisioner"
local_volume_provisioner_image_tag: "v2.1.0"
cephfs_provisioner_image_repo: "quay.io/external_storage/cephfs-provisioner"
cephfs_provisioner_image_tag: "v2.1.0-k8s1.11"
ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller"
ingress_nginx_controller_image_tag: "0.20.0"
ingress_nginx_default_backend_image_repo: "k8s.gcr.io/defaultbackend-amd64"
ingress_nginx_default_backend_image_tag: "1.5"
cert_manager_version: "v0.5.0"
cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller"
cert_manager_controller_image_tag: "{{ cert_manager_version }}"
2017-03-17 11:56:25 +00:00
2015-12-31 15:05:25 +00:00
downloads:
netcheck_server:
enabled: "{{ deploy_netchecker }}"
container: true
repo: "{{ netcheck_server_image_repo }}"
tag: "{{ netcheck_server_image_tag }}"
sha256: "{{ netcheck_server_digest_checksum|default(None) }}"
groups:
- k8s-cluster
netcheck_agent:
enabled: "{{ deploy_netchecker }}"
container: true
repo: "{{ netcheck_agent_image_repo }}"
tag: "{{ netcheck_agent_image_tag }}"
sha256: "{{ netcheck_agent_digest_checksum|default(None) }}"
groups:
- k8s-cluster
etcd:
container: "{{ etcd_deployment_type != 'host' }}"
file: "{{ etcd_deployment_type == 'host' }}"
enabled: true
2018-07-30 09:55:25 +00:00
version: "{{ etcd_version }}"
dest: "{{local_release_dir}}/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
repo: "{{ etcd_image_repo }}"
tag: "{{ etcd_image_tag }}"
sha256: "{{ etcd_binary_checksum if etcd_deployment_type == 'host' else etcd_digest_checksum|d(None) }}"
2018-07-30 09:55:25 +00:00
url: "{{ etcd_download_url }}"
unarchive: true
owner: "root"
mode: "0755"
groups:
- etcd
kubeadm:
enabled: "{{ kubeadm_enabled }}"
file: true
version: "{{ kubeadm_version }}"
dest: "{{local_release_dir}}/kubeadm"
sha256: "{{ kubeadm_binary_checksum }}"
url: "{{ kubeadm_download_url }}"
unarchive: false
owner: "root"
mode: "0755"
groups:
- k8s-cluster
hyperkube:
enabled: "{{ kubeadm_enabled == false }}"
container: true
repo: "{{ hyperkube_image_repo }}"
tag: "{{ hyperkube_image_tag }}"
sha256: "{{ hyperkube_digest_checksum|default(None) }}"
groups:
- k8s-cluster
2018-07-30 09:55:25 +00:00
hyperkube_file:
enabled: true
file: true
version: "{{ kube_version }}"
dest: "{{local_release_dir}}/hyperkube"
sha256: "{{ hyperkube_binary_checksum }}"
2018-07-30 09:55:25 +00:00
url: "{{ hyperkube_download_url }}"
unarchive: false
owner: "root"
mode: "0755"
groups:
- k8s-cluster
cilium:
enabled: "{{ kube_network_plugin == 'cilium' }}"
container: true
repo: "{{ cilium_image_repo }}"
tag: "{{ cilium_image_tag }}"
sha256: "{{ cilium_digest_checksum|default(None) }}"
groups:
- k8s-cluster
flannel:
enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}"
container: true
repo: "{{ flannel_image_repo }}"
tag: "{{ flannel_image_tag }}"
sha256: "{{ flannel_digest_checksum|default(None) }}"
groups:
- k8s-cluster
flannel_cni:
enabled: "{{ kube_network_plugin == 'flannel' }}"
container: true
repo: "{{ flannel_cni_image_repo }}"
tag: "{{ flannel_cni_image_tag }}"
sha256: "{{ flannel_cni_digest_checksum|default(None) }}"
groups:
- k8s-cluster
calicoctl:
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
container: true
repo: "{{ calicoctl_image_repo }}"
tag: "{{ calicoctl_image_tag }}"
sha256: "{{ calicoctl_digest_checksum|default(None) }}"
groups:
- k8s-cluster
calico_node:
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
container: true
repo: "{{ calico_node_image_repo }}"
tag: "{{ calico_node_image_tag }}"
sha256: "{{ calico_node_digest_checksum|default(None) }}"
groups:
- k8s-cluster
calico_cni:
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
container: true
repo: "{{ calico_cni_image_repo }}"
tag: "{{ calico_cni_image_tag }}"
sha256: "{{ calico_cni_digest_checksum|default(None) }}"
groups:
- k8s-cluster
calico_policy:
enabled: "{{ enable_network_policy or kube_network_plugin == 'canal' }}"
container: true
repo: "{{ calico_policy_image_repo }}"
tag: "{{ calico_policy_image_tag }}"
sha256: "{{ calico_policy_digest_checksum|default(None) }}"
groups:
- k8s-cluster
calico_rr:
2018-03-08 09:04:16 +00:00
enabled: "{{ peer_with_calico_rr is defined and peer_with_calico_rr and kube_network_plugin == 'calico' }}"
container: true
repo: "{{ calico_rr_image_repo }}"
tag: "{{ calico_rr_image_tag }}"
sha256: "{{ calico_rr_digest_checksum|default(None) }}"
groups:
- calico-rr
2017-01-10 14:47:39 +00:00
weave_kube:
enabled: "{{ kube_network_plugin == 'weave' }}"
2017-01-10 14:47:39 +00:00
container: true
repo: "{{ weave_kube_image_repo }}"
tag: "{{ weave_kube_image_tag }}"
sha256: "{{ weave_kube_digest_checksum|default(None) }}"
groups:
- k8s-cluster
2017-01-10 14:47:39 +00:00
weave_npc:
enabled: "{{ kube_network_plugin == 'weave' }}"
2017-01-10 14:47:39 +00:00
container: true
repo: "{{ weave_npc_image_repo }}"
tag: "{{ weave_npc_image_tag }}"
sha256: "{{ weave_npc_digest_checksum|default(None) }}"
groups:
- k8s-cluster
contiv network support (#1914) * Add Contiv support Contiv is a network plugin for Kubernetes and Docker. It supports vlan/vxlan/BGP/Cisco ACI technologies. It support firewall policies, multiple networks and bridging pods onto physical networks. * Update contiv version to 1.1.4 Update contiv version to 1.1.4 and added SVC_SUBNET in contiv-config. * Load openvswitch module to workaround on CentOS7.4 * Set contiv cni version to 0.1.0 Correct contiv CNI version to 0.1.0. * Use kube_apiserver_endpoint for K8S_API_SERVER Use kube_apiserver_endpoint as K8S_API_SERVER to make contiv talks to a available endpoint no matter if there's a loadbalancer or not. * Make contiv use its own etcd Before this commit, contiv is using a etcd proxy mode to k8s etcd, this work fine when the etcd hosts are co-located with contiv etcd proxy, however the k8s peering certs are only in etcd group, as a result the etcd-proxy is not able to peering with the k8s etcd on etcd group, plus the netplugin is always trying to find the etcd endpoint on localhost, this will cause problem for all netplugins not runnign on etcd group nodes. This commit make contiv uses its own etcd, separate from k8s one. on kube-master nodes (where net-master runs), it will run as leader mode and on all rest nodes it will run as proxy mode. * Use cp instead of rsync to copy cni binaries Since rsync has been removed from hyperkube, this commit changes it to use cp instead. * Make contiv-etcd able to run on master nodes * Add rbac_enabled flag for contiv pods * Add contiv into CNI network plugin lists * migrate contiv test to tests/files Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com> * Add required rules for contiv netplugin * Better handling json return of fwdMode * Make contiv etcd port configurable * Use default var instead of templating * roles/download/defaults/main.yml: use contiv 1.1.7 Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com>
2017-11-29 14:24:16 +00:00
contiv:
enabled: "{{ kube_network_plugin == 'contiv' }}"
container: true
repo: "{{ contiv_image_repo }}"
tag: "{{ contiv_image_tag }}"
sha256: "{{ contiv_digest_checksum|default(None) }}"
groups:
- k8s-cluster
contiv network support (#1914) * Add Contiv support Contiv is a network plugin for Kubernetes and Docker. It supports vlan/vxlan/BGP/Cisco ACI technologies. It support firewall policies, multiple networks and bridging pods onto physical networks. * Update contiv version to 1.1.4 Update contiv version to 1.1.4 and added SVC_SUBNET in contiv-config. * Load openvswitch module to workaround on CentOS7.4 * Set contiv cni version to 0.1.0 Correct contiv CNI version to 0.1.0. * Use kube_apiserver_endpoint for K8S_API_SERVER Use kube_apiserver_endpoint as K8S_API_SERVER to make contiv talks to a available endpoint no matter if there's a loadbalancer or not. * Make contiv use its own etcd Before this commit, contiv is using a etcd proxy mode to k8s etcd, this work fine when the etcd hosts are co-located with contiv etcd proxy, however the k8s peering certs are only in etcd group, as a result the etcd-proxy is not able to peering with the k8s etcd on etcd group, plus the netplugin is always trying to find the etcd endpoint on localhost, this will cause problem for all netplugins not runnign on etcd group nodes. This commit make contiv uses its own etcd, separate from k8s one. on kube-master nodes (where net-master runs), it will run as leader mode and on all rest nodes it will run as proxy mode. * Use cp instead of rsync to copy cni binaries Since rsync has been removed from hyperkube, this commit changes it to use cp instead. * Make contiv-etcd able to run on master nodes * Add rbac_enabled flag for contiv pods * Add contiv into CNI network plugin lists * migrate contiv test to tests/files Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com> * Add required rules for contiv netplugin * Better handling json return of fwdMode * Make contiv etcd port configurable * Use default var instead of templating * roles/download/defaults/main.yml: use contiv 1.1.7 Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com>
2017-11-29 14:24:16 +00:00
contiv_auth_proxy:
enabled: "{{ kube_network_plugin == 'contiv' }}"
container: true
repo: "{{ contiv_auth_proxy_image_repo }}"
tag: "{{ contiv_auth_proxy_image_tag }}"
sha256: "{{ contiv_auth_proxy_digest_checksum|default(None) }}"
groups:
- k8s-cluster
contiv_etcd_init:
enabled: "{{ kube_network_plugin == 'contiv' }}"
container: true
repo: "{{ contiv_etcd_init_image_repo }}"
tag: "{{ contiv_etcd_init_image_tag }}"
sha256: "{{ contiv_etcd_init_digest_checksum|default(None) }}"
groups:
- k8s-cluster
kube_router:
enabled: "{{ kube_network_plugin == 'kube-router' }}"
container: true
repo: "{{ kube_router_image_repo }}"
tag: "{{ kube_router_image_tag }}"
sha256: "{{ kube_router_digest_checksum|default(None) }}"
groups:
- k8s-cluster
pod_infra:
enabled: true
container: true
repo: "{{ pod_infra_image_repo }}"
tag: "{{ pod_infra_image_tag }}"
sha256: "{{ pod_infra_digest_checksum|default(None) }}"
groups:
- k8s-cluster
install_socat:
enabled: "{{ ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] }}"
container: true
repo: "{{ install_socat_image_repo }}"
tag: "{{ install_socat_image_tag }}"
sha256: "{{ install_socat_digest_checksum|default(None) }}"
groups:
- k8s-cluster
nginx:
enabled: "{{ loadbalancer_apiserver_localhost }}"
container: true
repo: "{{ nginx_image_repo }}"
tag: "{{ nginx_image_tag }}"
sha256: "{{ nginx_digest_checksum|default(None) }}"
groups:
- kube-node
dnsmasq:
enabled: "{{ dns_mode == 'dnsmasq_kubedns' }}"
container: true
repo: "{{ dnsmasq_image_repo }}"
tag: "{{ dnsmasq_image_tag }}"
sha256: "{{ dnsmasq_digest_checksum|default(None) }}"
groups:
- kube-node
kubedns:
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
container: true
repo: "{{ kubedns_image_repo }}"
tag: "{{ kubedns_image_tag }}"
sha256: "{{ kubedns_digest_checksum|default(None) }}"
groups:
- kube-node
coredns:
enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}"
container: true
repo: "{{ coredns_image_repo }}"
tag: "{{ coredns_image_tag }}"
sha256: "{{ coredns_digest_checksum|default(None) }}"
groups:
- kube-node
dnsmasq_nanny:
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
container: true
repo: "{{ dnsmasq_nanny_image_repo }}"
tag: "{{ dnsmasq_nanny_image_tag }}"
sha256: "{{ dnsmasq_nanny_digest_checksum|default(None) }}"
groups:
- kube-node
dnsmasq_sidecar:
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
container: true
repo: "{{ dnsmasq_sidecar_image_repo }}"
tag: "{{ dnsmasq_sidecar_image_tag }}"
sha256: "{{ dnsmasq_sidecar_digest_checksum|default(None) }}"
groups:
- kube-node
kubednsautoscaler:
enabled: "{{ dns_mode in ['kubedns', 'dnsmasq_kubedns'] }}"
container: true
repo: "{{ kubednsautoscaler_image_repo }}"
tag: "{{ kubednsautoscaler_image_tag }}"
sha256: "{{ kubednsautoscaler_digest_checksum|default(None) }}"
groups:
- kube-node
busybox:
enabled: "{{ kube_network_plugin in ['kube-router'] }}"
container: true
repo: "{{ busybox_image_repo }}"
tag: "{{ busybox_image_tag }}"
sha256: "{{ busybox_digest_checksum|default(None) }}"
groups:
- k8s-cluster
testbox:
enabled: false
container: true
repo: "{{ test_image_repo }}"
tag: "{{ test_image_tag }}"
sha256: "{{ testbox_digest_checksum|default(None) }}"
2017-03-17 11:56:25 +00:00
helm:
enabled: "{{ helm_enabled }}"
2017-03-17 11:56:25 +00:00
container: true
repo: "{{ helm_image_repo }}"
tag: "{{ helm_image_tag }}"
sha256: "{{ helm_digest_checksum|default(None) }}"
groups:
- kube-node
2017-03-17 11:56:25 +00:00
tiller:
enabled: "{{ helm_enabled }}"
2017-03-17 11:56:25 +00:00
container: true
repo: "{{ tiller_image_repo }}"
tag: "{{ tiller_image_tag }}"
sha256: "{{ tiller_digest_checksum|default(None) }}"
groups:
- kube-node
vault:
enabled: "{{ cert_management == 'vault' }}"
container: "{{ vault_deployment_type != 'host' }}"
file: "{{ vault_deployment_type == 'host' }}"
dest: "{{local_release_dir}}/vault/vault_{{ vault_version }}_linux_{{ image_arch }}.zip"
mode: "0755"
owner: "vault"
repo: "{{ vault_image_repo }}"
sha256: "{{ vault_binary_checksum if vault_deployment_type == 'host' else vault_digest_checksum|d(none) }}"
tag: "{{ vault_image_tag }}"
unarchive: true
url: "{{ vault_download_url }}"
version: "{{ vault_version }}"
groups:
- vault
registry:
enabled: "{{ registry_enabled }}"
container: true
repo: "{{ registry_image_repo }}"
tag: "{{ registry_image_tag }}"
sha256: "{{ registry_digest_checksum|default(None) }}"
groups:
- kube-node
registry_proxy:
enabled: "{{ registry_enabled }}"
container: true
repo: "{{ registry_proxy_image_repo }}"
tag: "{{ registry_proxy_image_tag }}"
sha256: "{{ registry_proxy_digest_checksum|default(None) }}"
groups:
- kube-node
local_volume_provisioner:
enabled: "{{ local_volume_provisioner_enabled }}"
container: true
repo: "{{ local_volume_provisioner_image_repo }}"
tag: "{{ local_volume_provisioner_image_tag }}"
sha256: "{{ local_volume_provisioner_digest_checksum|default(None) }}"
groups:
- kube-node
cephfs_provisioner:
enabled: "{{ cephfs_provisioner_enabled }}"
container: true
repo: "{{ cephfs_provisioner_image_repo }}"
tag: "{{ cephfs_provisioner_image_tag }}"
sha256: "{{ cephfs_provisioner_digest_checksum|default(None) }}"
groups:
- kube-node
ingress_nginx_controller:
enabled: "{{ ingress_nginx_enabled }}"
container: true
repo: "{{ ingress_nginx_controller_image_repo }}"
tag: "{{ ingress_nginx_controller_image_tag }}"
sha256: "{{ ingress_nginx_controller_digest_checksum|default(None) }}"
groups:
- kube-node
ingress_nginx_default_backend:
enabled: "{{ ingress_nginx_enabled }}"
container: true
repo: "{{ ingress_nginx_default_backend_image_repo }}"
tag: "{{ ingress_nginx_default_backend_image_tag }}"
sha256: "{{ ingress_nginx_default_backend_digest_checksum|default(None) }}"
groups:
- kube-node
cert_manager_controller:
enabled: "{{ cert_manager_enabled }}"
container: true
repo: "{{ cert_manager_controller_image_repo }}"
tag: "{{ cert_manager_controller_image_tag }}"
sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}"
groups:
- kube-node
download_defaults:
container: false
file: false
repo: None
tag: None
enabled: false
dest: None
version: None
url: None
unarchive: false
owner: kube
mode: None