c12s-kubespray/roles/download/defaults/main.yml

712 lines
26 KiB
YAML
Raw Normal View History

2015-12-31 13:07:02 +00:00
---
local_release_dir: /tmp/releases
2015-12-31 13:07:02 +00:00
# Used to only evaluate vars from download role
skip_downloads: false
# if this is set to true will only download files once. Doesn't work
# on Container Linux by CoreOS unless the download_localhost is true and localhost
# is running another OS type. Default compress level is 1 (fastest).
download_run_once: False
download_compress: 1
# if this is set to true will download container
download_container: True
# if this is set to true, uses the localhost for download_run_once mode
# (requires docker and sudo to access docker). You may want this option for
# local caching of docker images or for Container Linux by CoreOS cluster nodes.
# Otherwise, uses the first node in the kube-master group to store images
# in the download_run_once mode.
download_localhost: False
# Always pull images if set to True. Otherwise check by the repo's tag/digest.
download_always_pull: False
# Some problems may occur when downloading files over https proxy due to ansible bug
# https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable
# SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
download_validate_certs: True
# Use the first kube-master if download_localhost is not set
download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube-master'][0]}}{% endif %}"
# Arch of Docker images and needed packages
2018-08-20 14:07:27 +00:00
image_arch: "{{host_architecture | default('amd64')}}"
# Versions
2019-04-24 09:08:01 +00:00
kube_version: v1.14.1
kubeadm_version: "{{ kube_version }}"
2019-04-08 07:34:25 +00:00
etcd_version: v3.2.26
# kubernetes image repo define
kube_image_repo: "gcr.io/google-containers"
# TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
# after migration to container download
calico_version: "v3.4.0"
calico_ctl_version: "v3.4.4"
calico_cni_version: "v3.4.0"
calico_policy_version: "v3.4.0"
calico_rr_version: "v0.6.1"
calico_typha_version: "v3.4.4"
flannel_version: "v0.11.0"
2017-10-26 09:18:06 +00:00
flannel_cni_version: "v0.3.0"
cni_version: "v0.6.0"
weave_version: 2.5.1
2018-09-06 06:15:51 +00:00
pod_infra_version: 3.1
2018-09-17 14:45:05 +00:00
contiv_version: 1.2.1
2018-10-31 07:42:56 +00:00
cilium_version: "v1.3.0"
2019-04-09 10:37:04 +00:00
kube_router_version: "v0.2.5"
multus_version: "v3.1.autoconf"
2015-12-31 15:05:25 +00:00
# Download URLs
kubeadm_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubeadm_version }}/bin/linux/{{ image_arch }}/kubeadm"
hyperkube_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/{{ image_arch }}/hyperkube"
etcd_download_url: "https://github.com/coreos/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-{{ image_arch }}.tar.gz"
cni_download_url: "https://github.com/containernetworking/plugins/releases/download/{{ cni_version }}/cni-plugins-{{ image_arch }}-{{ cni_version }}.tgz"
calicoctl_download_url: "https://github.com/projectcalico/calicoctl/releases/download/{{ calico_ctl_version }}/calicoctl-linux-{{ image_arch }}"
2015-12-31 15:05:25 +00:00
# Checksums
hyperkube_checksums:
arm64:
2019-04-24 09:08:01 +00:00
v1.14.1: d5236efc2547fd07c7cc2ed9345dfbcd1204385847ca686cf1c62d15056de399
v1.14.0: 708e00a41f6516d525dee00c91ebe3c3bf2feaf9b7f0af7689487e3e17e356c2
2019-03-27 10:16:21 +00:00
v1.13.5: 8ffd84ba0cb6382a0ff96000458db8a83c92cac09458defe8496f0f0e155a6a8
2019-03-07 07:16:56 +00:00
v1.13.4: b9e909e388634d103fe5376aafa313bed5e69293383b0c740de4fe8e18d42d12
v1.13.3: 588037923b7f4090f5f7a3de23ea49a10345295f0b39bd0c1ebdaa24eaa76731
v1.13.2: 7f2c2b0c6dcc81102a89fa41957db214416fc8a0cfae664fc0e150a7d3ad337b
v1.13.1: 66205d99ec93090c6d814ab1de7c38cd84257d3dcf3a957618fad5878caea13d
v1.13.0: 4391ea0d8d472c1737f1ce945756bf2a11395c708824c780d1a44fbddf031e59
v1.12.6: 29036599f173ceeab2c72dff589aa02d5a55b45143c70de7c08cdea75a282bc2
v1.12.5: 0b953f2d564d2f9298f3fc3ad6279cf4b18c1c967ebd2d542b79dda54e3aa27a
v1.12.4: f66fc2c945c757d6b34cdd654c3d951f74f366026f9af0dd10eb64e482584811
v1.12.3: a6142356fdbc8915cb474437355f809d987f6b983e21081dc3e18ea86c76bf85
v1.12.2: 81e6fdd4e3ed8687a37d9b1830aa43d508fb7d5061c81cb2576a17077382b614
v1.12.1: 6863440b5516c94f48a3a23bf325a007af09f5412f335444e204bc4b09fbad2a
v1.12.0: 3eb06e2344ea5e4988fdda168627319e7b10665f7f9fc9d96c477ccb39d0b061
amd64:
2019-04-24 09:08:01 +00:00
v1.14.1: fb34b98da9325feca8daa09bb934dbe6a533aad69c2a5599bbed81b99bb9c267
v1.14.0: af8b04504365dbe4ce6a1772f42eb390d4221a21149b522fc8a0c4b1cd3d97aa
2019-03-27 10:16:21 +00:00
v1.13.5: 1a8a357ebfeab8ec62d0c6f11b59df1a93d6711c3a16e1501da32b55c144c73a
2019-03-07 07:16:56 +00:00
v1.13.4: 6f2d755a350efec8b3b29e0ddf8362f60475cc10d42dea37f8f2159f7776867b
v1.13.3: b238c772b5e4b9deed0cdc695fe86324660d037b38c6d6d7eeae7d7a657840c7
v1.13.2: f159b587ec80ad03bf3b9bb09de5d64b773d01b0e34f2a4f1c816879c56aae6d
v1.13.1: f64c4328d3853f3e5680e7d296b0f3ed25e67ff98321867309edea100ebb4fd7
v1.13.0: 754f1baae5dc2ba29afc66e1f5d3b676ee59cd5c40ccce813092408d53bde3d9
v1.12.6: eb7bd0c21977bca7071c65fa0ef60d5e09c9e9a16c4fd8435be5bd7f5b0d1221
v1.12.5: f8b651816b2caa33e8b25a666e5c370e9786356d59f89579bba772f28370ed00
v1.12.4: a4697d8f3791f0408fcdb97b3de187e47d7b39a63332c75f68f95e25f4891cc9
v1.12.3: 600aad3f0d016716abd85931239806193ffbe95f2edfdcea11532d518ae5cdb1
v1.12.2: 566dfed398c20c9944f8999d6370cb584cb8c228b3c5881137b6b3d9306e4b06
v1.12.1: 4aa23cfb2fc2e2e4d0cbe0d83a648c38e4baabd6c66f5cdbbb40cbc7582fdc74
v1.12.0: f80336201f3152a5307c01f8a7206847398dde15c69b3d20c76a7d9520b60daf
kubeadm_checksums:
arm64:
2019-04-24 09:08:01 +00:00
v1.14.1: 5cf05464168e45ee4719264a267c65f9319fae1ceb9923fedab97a9d6a629e0b
v1.14.0: 7ed9d706e50cd6d3fc618a7af3d19b691b8a5343ddedaeccb4ea09af3ecfae2c
2019-03-27 10:16:21 +00:00
v1.13.5: 59a1995c171e5c1e74f5d02657eb2c155706f2d159ec1847b64dc866228c40d2
2019-03-07 07:16:56 +00:00
v1.13.4: 4de71d4cfa4dc64127148d48f3a1a1fa7ea24cf0c4fa42957459d0e7f9c03799
v1.13.3: bef1cbc2d199d32a1a31e70b864dc539b24e3c1cb87b50a1295cf03bec4832b0
v1.13.2: 08279a3bfeff8c4f6768d6fd92ceff8276a555f9e81bf9d541112fc8eb29963e
v1.13.1: 0f5c2c8a1ffe235785c0a38c9a6530d3d9e67b00e9a07c9d5dca4c36ede2e078
v1.13.0: efc2669952b05161e181f0805bb0647308891259528a4868e69f4b1b68c70489
v1.12.6: 2552b6b623c0c390d495e1fcfbecbebb2ca8853bce4011ce9b9dc3f1763a9b2b
v1.12.5: f8b212c4a63d28e800a312c3785a62650c3c5bb26326e414d59e1a548d68fbae
v1.12.4: 929ec24bee4d7645a18b157d6987554a131fde8d1efb704391bdfe81e6dcf1d4
v1.12.3: e9e54a553447391de59a21d3da5b58d61d3431877de194434b4ae6544594009e
v1.12.2: 2230dfabd76a4d0888facccb3ec3c802b658e835aaa817cbce2310d3f8533fc4
v1.12.1: 226b9026ef913e98c2966503fde6973e3e33b5621e9c240667093dcb786bd811
v1.12.0: c0d4a75615791e6880d051d6d601eb703e0ac3ec64f94f156b76351368b2eb9c
amd64:
2019-04-24 09:08:01 +00:00
v1.14.1: c4fc478572b5623857f5d820e1c107ae02049ca02cf2993e512a091a0196957b
v1.14.0: 03678f49ee4737f8b8c4f59ace0d140a36ffbc4f6035c59561f59f45b57d0c93
2019-03-27 10:16:21 +00:00
v1.13.5: 274bf887039a9993e30f96047a4a474c39e8471c4094acb75aea6beed793f079
2019-03-07 07:16:56 +00:00
v1.13.4: c4300d1f3ebccad48c8e267e45a736c7d227b0e45ef36582fa8dcfe2ef7b1b10
v1.13.3: ab767ea53e45aceba628977ef6c8c62eace72d6d232efeaf35ac50cbea5f3739
v1.13.2: 7cb0ce57c1e6e2d85e05de3780a2f35a191fe93f89cfc5816b424efcf39834b9
v1.13.1: 438173bfa0b7014ecae994c5b9e1f27e1328ab971a3fdb06a393a8095a176ba0
v1.13.0: f5366206416dc4cfc840a7add2289957b56ccc479cc1b74f7397a4df995d6b06
v1.12.6: 9048031930be9cb0506940c04f6ce67408d9caa9384b32d65d7aa5b6f1ad58ec
v1.12.5: d61730b3deb4d9825af0cc1e452a4be2292400507128279770c39669f6599af9
v1.12.4: 674ad5892ff2403f492c9042c3cea3fa0bfa3acf95bc7d1777c3645f0ddf64d7
v1.12.3: c675aa3be82754b3f8dfdde2a1526a72986713312d46d898e65cb564c6aa8ad4
v1.12.2: 51bc4bfd1d934a27245111c0ad1f793d5147ed15389415a1509502f23fcfa642
v1.12.1: 5d95efd65aad398d85a9802799f36410ae7a95f9cbe73c8b10d2213c10a6d7be
v1.12.0: 463fb058b7fa2591fb01f29f2451b054f6cbaa0f8a20394b4a4eb5d68473176f
etcd_binary_checksums:
2019-04-08 07:34:25 +00:00
arm64: c219b254ece7d7e308ae41569fa240dbae2de460bed818ee39b408b73f6360ef
amd64: 127d4f2097c09d929beb9d3784590cc11102f4b4d4d4da7ad82d5c9e856afd38
cni_binary_checksums:
arm64: 016bbc989877e35e3cd49fafe11415fb2717e52c74fde6b1650411154cb91b81
amd64: f04339a21b8edf76d415e7f17b620e63b8f37a76b2f706671587ab6464411f2d
calicoctl_binary_checksums:
amd64:
v3.6.1: 3b01336de37550e020343d62a38c96c4605d33a3ed7ddba2fe38bc172a5b42b5
v3.5.4: 197194b838cc2a9a7455c2ebd5505a5e24f8f3d994eb75c17f5dd568944100b8
v3.4.4: 93bd084e053cf1bf3b7fef369677bd6767c30fe7135e2c7e044e31693422ef61
arm64:
v3.6.1: 60fbaeb257061647bdf12b5ede7a0d4298a5ee216f6472e5a92bb14ef5c2a5d3
v3.5.4: a4481178665658658a73e4ceca9a1dff5cccded4179615c91d1c3e49fd96f237
v3.4.4: ff35d9e8b5c00e9fe47d05e8f5123ec98fd641370f8cd93f4fbb3d913da77ab6
etcd_binary_checksum: "{{ etcd_binary_checksums[image_arch] }}"
cni_binary_checksum: "{{ cni_binary_checksums[image_arch] }}"
hyperkube_binary_checksum: "{{ hyperkube_checksums[image_arch][kube_version] }}"
kubeadm_binary_checksum: "{{ kubeadm_checksums[image_arch][kubeadm_version] }}"
calicoctl_binary_checksum: "{{ calicoctl_binary_checksums[image_arch][calico_ctl_version] }}"
# Containers
# In some cases, we need a way to set --registry-mirror or --insecure-registry for docker,
# it helps a lot for local private development or bare metal environment.
# So you need define --registry-mirror or --insecure-registry, and modify the following url address.
# example:
# You need to deploy kubernetes cluster on local private development.
# Also provide the address of your own private registry.
# And use --insecure-registry options for docker
etcd_image_repo: "quay.io/coreos/etcd"
2018-08-20 14:07:27 +00:00
etcd_image_tag: "{{ etcd_version }}{%- if image_arch != 'amd64' -%}-{{ image_arch }}{%- endif -%}"
flannel_image_repo: "quay.io/coreos/flannel"
flannel_image_tag: "{{ flannel_version }}"
flannel_cni_image_repo: "quay.io/coreos/flannel-cni"
flannel_cni_image_tag: "{{ flannel_cni_version }}"
2019-04-04 08:20:49 +00:00
calico_node_image_repo: "docker.io/calico/node"
calico_node_image_tag: "{{ calico_version }}"
calico_cni_image_repo: "docker.io/calico/cni"
calico_cni_image_tag: "{{ calico_cni_version }}"
calico_policy_image_repo: "docker.io/calico/kube-controllers"
calico_policy_image_tag: "{{ calico_policy_version }}"
calico_rr_image_repo: "docker.io/calico/routereflector"
calico_rr_image_tag: "{{ calico_rr_version }}"
calico_typha_image_repo: "docker.io/calico/typha"
calico_typha_image_tag: "{{ calico_typha_version }}"
pod_infra_image_repo: "gcr.io/google_containers/pause-{{ image_arch }}"
pod_infra_image_tag: "{{ pod_infra_version }}"
install_socat_image_repo: "docker.io/xueshanf/install-socat"
install_socat_image_tag: "latest"
netcheck_version: "v1.0"
netcheck_agent_image_repo: "quay.io/l23network/k8s-netchecker-agent"
netcheck_agent_image_tag: "{{ netcheck_version }}"
netcheck_server_image_repo: "quay.io/l23network/k8s-netchecker-server"
netcheck_server_image_tag: "{{ netcheck_version }}"
weave_kube_image_repo: "docker.io/weaveworks/weave-kube"
2017-01-10 14:47:39 +00:00
weave_kube_image_tag: "{{ weave_version }}"
weave_npc_image_repo: "docker.io/weaveworks/weave-npc"
2017-01-10 14:47:39 +00:00
weave_npc_image_tag: "{{ weave_version }}"
contiv_image_repo: "docker.io/contiv/netplugin"
contiv network support (#1914) * Add Contiv support Contiv is a network plugin for Kubernetes and Docker. It supports vlan/vxlan/BGP/Cisco ACI technologies. It support firewall policies, multiple networks and bridging pods onto physical networks. * Update contiv version to 1.1.4 Update contiv version to 1.1.4 and added SVC_SUBNET in contiv-config. * Load openvswitch module to workaround on CentOS7.4 * Set contiv cni version to 0.1.0 Correct contiv CNI version to 0.1.0. * Use kube_apiserver_endpoint for K8S_API_SERVER Use kube_apiserver_endpoint as K8S_API_SERVER to make contiv talks to a available endpoint no matter if there's a loadbalancer or not. * Make contiv use its own etcd Before this commit, contiv is using a etcd proxy mode to k8s etcd, this work fine when the etcd hosts are co-located with contiv etcd proxy, however the k8s peering certs are only in etcd group, as a result the etcd-proxy is not able to peering with the k8s etcd on etcd group, plus the netplugin is always trying to find the etcd endpoint on localhost, this will cause problem for all netplugins not runnign on etcd group nodes. This commit make contiv uses its own etcd, separate from k8s one. on kube-master nodes (where net-master runs), it will run as leader mode and on all rest nodes it will run as proxy mode. * Use cp instead of rsync to copy cni binaries Since rsync has been removed from hyperkube, this commit changes it to use cp instead. * Make contiv-etcd able to run on master nodes * Add rbac_enabled flag for contiv pods * Add contiv into CNI network plugin lists * migrate contiv test to tests/files Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com> * Add required rules for contiv netplugin * Better handling json return of fwdMode * Make contiv etcd port configurable * Use default var instead of templating * roles/download/defaults/main.yml: use contiv 1.1.7 Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com>
2017-11-29 14:24:16 +00:00
contiv_image_tag: "{{ contiv_version }}"
contiv_init_image_repo: "docker.io/contiv/netplugin-init"
2018-09-17 14:45:05 +00:00
contiv_init_image_tag: "latest"
contiv_auth_proxy_image_repo: "docker.io/contiv/auth_proxy"
contiv network support (#1914) * Add Contiv support Contiv is a network plugin for Kubernetes and Docker. It supports vlan/vxlan/BGP/Cisco ACI technologies. It support firewall policies, multiple networks and bridging pods onto physical networks. * Update contiv version to 1.1.4 Update contiv version to 1.1.4 and added SVC_SUBNET in contiv-config. * Load openvswitch module to workaround on CentOS7.4 * Set contiv cni version to 0.1.0 Correct contiv CNI version to 0.1.0. * Use kube_apiserver_endpoint for K8S_API_SERVER Use kube_apiserver_endpoint as K8S_API_SERVER to make contiv talks to a available endpoint no matter if there's a loadbalancer or not. * Make contiv use its own etcd Before this commit, contiv is using a etcd proxy mode to k8s etcd, this work fine when the etcd hosts are co-located with contiv etcd proxy, however the k8s peering certs are only in etcd group, as a result the etcd-proxy is not able to peering with the k8s etcd on etcd group, plus the netplugin is always trying to find the etcd endpoint on localhost, this will cause problem for all netplugins not runnign on etcd group nodes. This commit make contiv uses its own etcd, separate from k8s one. on kube-master nodes (where net-master runs), it will run as leader mode and on all rest nodes it will run as proxy mode. * Use cp instead of rsync to copy cni binaries Since rsync has been removed from hyperkube, this commit changes it to use cp instead. * Make contiv-etcd able to run on master nodes * Add rbac_enabled flag for contiv pods * Add contiv into CNI network plugin lists * migrate contiv test to tests/files Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com> * Add required rules for contiv netplugin * Better handling json return of fwdMode * Make contiv etcd port configurable * Use default var instead of templating * roles/download/defaults/main.yml: use contiv 1.1.7 Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com>
2017-11-29 14:24:16 +00:00
contiv_auth_proxy_image_tag: "{{ contiv_version }}"
contiv_etcd_init_image_repo: "docker.io/ferest/etcd-initer"
contiv_etcd_init_image_tag: latest
contiv_ovs_image_repo: "docker.io/contiv/ovs"
2018-09-17 14:45:05 +00:00
contiv_ovs_image_tag: "latest"
cilium_image_repo: "docker.io/cilium/cilium"
cilium_image_tag: "{{ cilium_version }}"
cilium_init_image_repo: "docker.io/library/busybox"
cilium_init_image_tag: "1.28.4"
kube_router_image_repo: "docker.io/cloudnativelabs/kube-router"
kube_router_image_tag: "{{ kube_router_version }}"
multus_image_repo: "docker.io/nfvpe/multus"
multus_image_tag: "{{ multus_version }}"
nginx_image_repo: docker.io/nginx
nginx_image_tag: 1.15
2018-08-21 15:53:23 +00:00
haproxy_image_repo: docker.io/haproxy
haproxy_image_tag: 1.9
2019-04-10 20:40:08 +00:00
coredns_version: "1.5.0"
coredns_image_repo: "docker.io/coredns/coredns"
coredns_image_tag: "{{ coredns_version }}"
2018-08-21 15:53:23 +00:00
nodelocaldns_version: "1.15.1"
nodelocaldns_image_repo: "k8s.gcr.io/k8s-dns-node-cache"
nodelocaldns_image_tag: "{{ nodelocaldns_version }}"
dnsautoscaler_version: 1.4.0
dnsautoscaler_image_repo: "k8s.gcr.io/cluster-proportional-autoscaler-{{ image_arch }}"
dnsautoscaler_image_tag: "{{ dnsautoscaler_version }}"
test_image_repo: docker.io/busybox
test_image_tag: latest
busybox_image_repo: docker.io/busybox
busybox_image_tag: 1.29.2
2019-04-07 14:04:25 +00:00
helm_version: "v2.13.1"
helm_image_repo: "docker.io/lachlanevenson/k8s-helm"
2017-03-17 11:56:25 +00:00
helm_image_tag: "{{ helm_version }}"
tiller_image_repo: "gcr.io/kubernetes-helm/tiller"
tiller_image_tag: "{{ helm_version }}"
registry_image_repo: "docker.io/registry"
registry_image_tag: "2.6"
registry_proxy_image_repo: "gcr.io/google_containers/kube-registry-proxy"
registry_proxy_image_tag: "0.4"
metrics_server_version: "v0.3.2"
metrics_server_image_repo: "gcr.io/google_containers/metrics-server-amd64"
metrics_server_image_tag: "{{ metrics_server_version }}"
local_volume_provisioner_image_repo: "quay.io/external_storage/local-volume-provisioner"
local_volume_provisioner_image_tag: "v2.1.0"
cephfs_provisioner_image_repo: "quay.io/external_storage/cephfs-provisioner"
cephfs_provisioner_image_tag: "v2.1.0-k8s1.11"
rbd_provisioner_image_repo: "quay.io/external_storage/rbd-provisioner"
rbd_provisioner_image_tag: "v2.1.1-k8s1.11"
local_path_provisioner_image_repo: "docker.io/rancher/local-path-provisioner"
local_path_provisioner_image_tag: "v0.0.2"
ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller"
ingress_nginx_controller_image_tag: "0.21.0"
cert_manager_version: "v0.5.2"
cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller"
cert_manager_controller_image_tag: "{{ cert_manager_version }}"
addon_resizer_version: "1.8.3"
addon_resizer_image_repo: "k8s.gcr.io/addon-resizer"
addon_resizer_image_tag: "{{ addon_resizer_version }}"
2017-03-17 11:56:25 +00:00
dashboard_image_repo: "gcr.io/google_containers/kubernetes-dashboard-{{ image_arch }}"
dashboard_image_tag: "v1.10.1"
2015-12-31 15:05:25 +00:00
downloads:
netcheck_server:
enabled: "{{ deploy_netchecker }}"
container: true
repo: "{{ netcheck_server_image_repo }}"
tag: "{{ netcheck_server_image_tag }}"
sha256: "{{ netcheck_server_digest_checksum|default(None) }}"
groups:
- k8s-cluster
netcheck_agent:
enabled: "{{ deploy_netchecker }}"
container: true
repo: "{{ netcheck_agent_image_repo }}"
tag: "{{ netcheck_agent_image_tag }}"
sha256: "{{ netcheck_agent_digest_checksum|default(None) }}"
groups:
- k8s-cluster
etcd:
container: "{{ etcd_deployment_type != 'host' }}"
file: "{{ etcd_deployment_type == 'host' }}"
enabled: true
2018-07-30 09:55:25 +00:00
version: "{{ etcd_version }}"
dest: "{{local_release_dir}}/etcd-{{ etcd_version }}-linux-amd64.tar.gz"
repo: "{{ etcd_image_repo }}"
tag: "{{ etcd_image_tag }}"
sha256: "{{ etcd_binary_checksum if etcd_deployment_type == 'host' else etcd_digest_checksum|d(None) }}"
2018-07-30 09:55:25 +00:00
url: "{{ etcd_download_url }}"
unarchive: true
owner: "root"
mode: "0755"
groups:
- etcd
cni:
enabled: true
file: true
version: "{{ cni_version }}"
dest: "{{local_release_dir}}/cni-plugins-{{ image_arch }}-{{ cni_version }}.tgz"
sha256: "{{ cni_binary_checksum }}"
url: "{{ cni_download_url }}"
unarchive: false
owner: "root"
mode: "0755"
groups:
- k8s-cluster
kubeadm:
enabled: true
file: true
version: "{{ kubeadm_version }}"
dest: "{{ local_release_dir }}/kubeadm-{{ kubeadm_version }}-{{ image_arch }}"
sha256: "{{ kubeadm_binary_checksum }}"
url: "{{ kubeadm_download_url }}"
unarchive: false
owner: "root"
mode: "0755"
groups:
- k8s-cluster
2018-07-30 09:55:25 +00:00
hyperkube_file:
enabled: true
file: true
version: "{{ kube_version }}"
dest: "{{ local_release_dir }}/hyperkube-{{ kube_version }}-{{ image_arch }}"
sha256: "{{ hyperkube_binary_checksum }}"
2018-07-30 09:55:25 +00:00
url: "{{ hyperkube_download_url }}"
unarchive: false
owner: "root"
mode: "0755"
groups:
- k8s-cluster
cilium:
enabled: "{{ kube_network_plugin == 'cilium' }}"
container: true
repo: "{{ cilium_image_repo }}"
tag: "{{ cilium_image_tag }}"
sha256: "{{ cilium_digest_checksum|default(None) }}"
groups:
- k8s-cluster
cilium_init:
enabled: "{{ kube_network_plugin == 'cilium' }}"
container: true
repo: "{{ cilium_init_image_repo }}"
tag: "{{ cilium_init_image_tag }}"
sha256: "{{ cilium_init_digest_checksum|default(None) }}"
groups:
- k8s-cluster
multus:
enabled: "{{ kube_network_plugin_multus }}"
container: true
repo: "{{ multus_image_repo }}"
tag: "{{ multus_image_tag }}"
sha256: "{{ multus_digest_checksum|default(None) }}"
groups:
- k8s-cluster
flannel:
enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}"
container: true
repo: "{{ flannel_image_repo }}"
tag: "{{ flannel_image_tag }}"
sha256: "{{ flannel_digest_checksum|default(None) }}"
groups:
- k8s-cluster
flannel_cni:
enabled: "{{ kube_network_plugin == 'flannel' }}"
container: true
repo: "{{ flannel_cni_image_repo }}"
tag: "{{ flannel_cni_image_tag }}"
sha256: "{{ flannel_cni_digest_checksum|default(None) }}"
groups:
- k8s-cluster
calicoctl:
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
file: true
version: "{{ calico_ctl_version }}"
dest: "{{local_release_dir}}/calicoctl"
sha256: "{{ calicoctl_binary_checksum }}"
url: "{{ calicoctl_download_url }}"
unarchive: false
owner: "root"
mode: "0755"
groups:
- k8s-cluster
calico_node:
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
container: true
repo: "{{ calico_node_image_repo }}"
tag: "{{ calico_node_image_tag }}"
sha256: "{{ calico_node_digest_checksum|default(None) }}"
groups:
- k8s-cluster
calico_cni:
enabled: "{{ kube_network_plugin == 'calico' or kube_network_plugin == 'canal' }}"
container: true
repo: "{{ calico_cni_image_repo }}"
tag: "{{ calico_cni_image_tag }}"
sha256: "{{ calico_cni_digest_checksum|default(None) }}"
groups:
- k8s-cluster
calico_policy:
enabled: "{{ enable_network_policy or kube_network_plugin == 'canal' }}"
container: true
repo: "{{ calico_policy_image_repo }}"
tag: "{{ calico_policy_image_tag }}"
sha256: "{{ calico_policy_digest_checksum|default(None) }}"
groups:
- k8s-cluster
calico_rr:
2018-03-08 09:04:16 +00:00
enabled: "{{ peer_with_calico_rr is defined and peer_with_calico_rr and kube_network_plugin == 'calico' }}"
container: true
repo: "{{ calico_rr_image_repo }}"
tag: "{{ calico_rr_image_tag }}"
sha256: "{{ calico_rr_digest_checksum|default(None) }}"
groups:
- calico-rr
calico_typha:
enabled: "{{ typha_enabled == 'calico' }}"
container: true
repo: "{{ calico_typha_image_repo }}"
tag: "{{ calico_typha_image_tag }}"
sha256: "{{ calico_typha_digest_checksum|default(None) }}"
groups:
- k8s-cluster
2017-01-10 14:47:39 +00:00
weave_kube:
enabled: "{{ kube_network_plugin == 'weave' }}"
2017-01-10 14:47:39 +00:00
container: true
repo: "{{ weave_kube_image_repo }}"
tag: "{{ weave_kube_image_tag }}"
sha256: "{{ weave_kube_digest_checksum|default(None) }}"
groups:
- k8s-cluster
2017-01-10 14:47:39 +00:00
weave_npc:
enabled: "{{ kube_network_plugin == 'weave' }}"
2017-01-10 14:47:39 +00:00
container: true
repo: "{{ weave_npc_image_repo }}"
tag: "{{ weave_npc_image_tag }}"
sha256: "{{ weave_npc_digest_checksum|default(None) }}"
groups:
- k8s-cluster
contiv network support (#1914) * Add Contiv support Contiv is a network plugin for Kubernetes and Docker. It supports vlan/vxlan/BGP/Cisco ACI technologies. It support firewall policies, multiple networks and bridging pods onto physical networks. * Update contiv version to 1.1.4 Update contiv version to 1.1.4 and added SVC_SUBNET in contiv-config. * Load openvswitch module to workaround on CentOS7.4 * Set contiv cni version to 0.1.0 Correct contiv CNI version to 0.1.0. * Use kube_apiserver_endpoint for K8S_API_SERVER Use kube_apiserver_endpoint as K8S_API_SERVER to make contiv talks to a available endpoint no matter if there's a loadbalancer or not. * Make contiv use its own etcd Before this commit, contiv is using a etcd proxy mode to k8s etcd, this work fine when the etcd hosts are co-located with contiv etcd proxy, however the k8s peering certs are only in etcd group, as a result the etcd-proxy is not able to peering with the k8s etcd on etcd group, plus the netplugin is always trying to find the etcd endpoint on localhost, this will cause problem for all netplugins not runnign on etcd group nodes. This commit make contiv uses its own etcd, separate from k8s one. on kube-master nodes (where net-master runs), it will run as leader mode and on all rest nodes it will run as proxy mode. * Use cp instead of rsync to copy cni binaries Since rsync has been removed from hyperkube, this commit changes it to use cp instead. * Make contiv-etcd able to run on master nodes * Add rbac_enabled flag for contiv pods * Add contiv into CNI network plugin lists * migrate contiv test to tests/files Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com> * Add required rules for contiv netplugin * Better handling json return of fwdMode * Make contiv etcd port configurable * Use default var instead of templating * roles/download/defaults/main.yml: use contiv 1.1.7 Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com>
2017-11-29 14:24:16 +00:00
contiv:
enabled: "{{ kube_network_plugin == 'contiv' }}"
container: true
repo: "{{ contiv_image_repo }}"
tag: "{{ contiv_image_tag }}"
sha256: "{{ contiv_digest_checksum|default(None) }}"
groups:
- k8s-cluster
contiv network support (#1914) * Add Contiv support Contiv is a network plugin for Kubernetes and Docker. It supports vlan/vxlan/BGP/Cisco ACI technologies. It support firewall policies, multiple networks and bridging pods onto physical networks. * Update contiv version to 1.1.4 Update contiv version to 1.1.4 and added SVC_SUBNET in contiv-config. * Load openvswitch module to workaround on CentOS7.4 * Set contiv cni version to 0.1.0 Correct contiv CNI version to 0.1.0. * Use kube_apiserver_endpoint for K8S_API_SERVER Use kube_apiserver_endpoint as K8S_API_SERVER to make contiv talks to a available endpoint no matter if there's a loadbalancer or not. * Make contiv use its own etcd Before this commit, contiv is using a etcd proxy mode to k8s etcd, this work fine when the etcd hosts are co-located with contiv etcd proxy, however the k8s peering certs are only in etcd group, as a result the etcd-proxy is not able to peering with the k8s etcd on etcd group, plus the netplugin is always trying to find the etcd endpoint on localhost, this will cause problem for all netplugins not runnign on etcd group nodes. This commit make contiv uses its own etcd, separate from k8s one. on kube-master nodes (where net-master runs), it will run as leader mode and on all rest nodes it will run as proxy mode. * Use cp instead of rsync to copy cni binaries Since rsync has been removed from hyperkube, this commit changes it to use cp instead. * Make contiv-etcd able to run on master nodes * Add rbac_enabled flag for contiv pods * Add contiv into CNI network plugin lists * migrate contiv test to tests/files Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com> * Add required rules for contiv netplugin * Better handling json return of fwdMode * Make contiv etcd port configurable * Use default var instead of templating * roles/download/defaults/main.yml: use contiv 1.1.7 Signed-off-by: Cristian Staretu <cristian.staretu@gmail.com>
2017-11-29 14:24:16 +00:00
contiv_auth_proxy:
enabled: "{{ kube_network_plugin == 'contiv' }}"
container: true
repo: "{{ contiv_auth_proxy_image_repo }}"
tag: "{{ contiv_auth_proxy_image_tag }}"
sha256: "{{ contiv_auth_proxy_digest_checksum|default(None) }}"
groups:
- k8s-cluster
contiv_etcd_init:
enabled: "{{ kube_network_plugin == 'contiv' }}"
container: true
repo: "{{ contiv_etcd_init_image_repo }}"
tag: "{{ contiv_etcd_init_image_tag }}"
sha256: "{{ contiv_etcd_init_digest_checksum|default(None) }}"
groups:
- k8s-cluster
kube_router:
enabled: "{{ kube_network_plugin == 'kube-router' }}"
container: true
repo: "{{ kube_router_image_repo }}"
tag: "{{ kube_router_image_tag }}"
sha256: "{{ kube_router_digest_checksum|default(None) }}"
groups:
- k8s-cluster
pod_infra:
enabled: true
container: true
repo: "{{ pod_infra_image_repo }}"
tag: "{{ pod_infra_image_tag }}"
sha256: "{{ pod_infra_digest_checksum|default(None) }}"
groups:
- k8s-cluster
install_socat:
enabled: "{{ ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] }}"
container: true
repo: "{{ install_socat_image_repo }}"
tag: "{{ install_socat_image_tag }}"
sha256: "{{ install_socat_digest_checksum|default(None) }}"
groups:
- k8s-cluster
nginx:
enabled: "{{ loadbalancer_apiserver_localhost and loadbalancer_apiserver_type == 'nginx' }}"
container: true
repo: "{{ nginx_image_repo }}"
tag: "{{ nginx_image_tag }}"
sha256: "{{ nginx_digest_checksum|default(None) }}"
groups:
- kube-node
haproxy:
enabled: "{{ loadbalancer_apiserver_localhost and loadbalancer_apiserver_type == 'haproxy' }}"
container: true
repo: "{{ haproxy_image_repo }}"
tag: "{{ haproxy_image_tag }}"
sha256: "{{ haproxy_digest_checksum|default(None) }}"
groups:
- kube-node
coredns:
enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}"
container: true
repo: "{{ coredns_image_repo }}"
tag: "{{ coredns_image_tag }}"
sha256: "{{ coredns_digest_checksum|default(None) }}"
groups:
- kube-master
nodelocaldns:
enabled: "{{ enable_nodelocaldns }}"
container: true
repo: "{{ nodelocaldns_image_repo }}"
tag: "{{ nodelocaldns_image_tag }}"
sha256: "{{ nodelocaldns_digest_checksum|default(None) }}"
groups:
- k8s-cluster
dnsautoscaler:
enabled: "{{ dns_mode in ['coredns', 'coredns_dual'] }}"
container: true
repo: "{{ dnsautoscaler_image_repo }}"
tag: "{{ dnsautoscaler_image_tag }}"
sha256: "{{ dnsautoscaler_digest_checksum|default(None) }}"
groups:
- kube-master
busybox:
enabled: "{{ kube_network_plugin in ['kube-router'] }}"
container: true
repo: "{{ busybox_image_repo }}"
tag: "{{ busybox_image_tag }}"
sha256: "{{ busybox_digest_checksum|default(None) }}"
groups:
- k8s-cluster
testbox:
enabled: false
container: true
repo: "{{ test_image_repo }}"
tag: "{{ test_image_tag }}"
sha256: "{{ testbox_digest_checksum|default(None) }}"
2017-03-17 11:56:25 +00:00
helm:
enabled: "{{ helm_enabled }}"
2017-03-17 11:56:25 +00:00
container: true
repo: "{{ helm_image_repo }}"
tag: "{{ helm_image_tag }}"
sha256: "{{ helm_digest_checksum|default(None) }}"
groups:
- kube-node
2017-03-17 11:56:25 +00:00
tiller:
enabled: "{{ helm_enabled }}"
2017-03-17 11:56:25 +00:00
container: true
repo: "{{ tiller_image_repo }}"
tag: "{{ tiller_image_tag }}"
sha256: "{{ tiller_digest_checksum|default(None) }}"
groups:
- kube-node
registry:
enabled: "{{ registry_enabled }}"
container: true
repo: "{{ registry_image_repo }}"
tag: "{{ registry_image_tag }}"
sha256: "{{ registry_digest_checksum|default(None) }}"
groups:
- kube-node
registry_proxy:
enabled: "{{ registry_enabled }}"
container: true
repo: "{{ registry_proxy_image_repo }}"
tag: "{{ registry_proxy_image_tag }}"
sha256: "{{ registry_proxy_digest_checksum|default(None) }}"
groups:
- kube-node
metrics_server:
enabled: "{{ metrics_server_enabled }}"
container: true
repo: "{{ metrics_server_image_repo }}"
tag: "{{ metrics_server_image_tag }}"
sha256: "{{ metrics_server_digest_checksum|default(None) }}"
groups:
- kube-master
addon_resizer:
# Currently addon_resizer is only used by metrics server
enabled: "{{ metrics_server_enabled }}"
container: true
repo: "{{ addon_resizer_image_repo }}"
tag: "{{ addon_resizer_image_tag }}"
sha256: "{{ addon_resizer_digest_checksum|default(None) }}"
groups:
- kube-master
local_volume_provisioner:
enabled: "{{ local_volume_provisioner_enabled }}"
container: true
repo: "{{ local_volume_provisioner_image_repo }}"
tag: "{{ local_volume_provisioner_image_tag }}"
sha256: "{{ local_volume_provisioner_digest_checksum|default(None) }}"
groups:
- kube-node
cephfs_provisioner:
enabled: "{{ cephfs_provisioner_enabled }}"
container: true
repo: "{{ cephfs_provisioner_image_repo }}"
tag: "{{ cephfs_provisioner_image_tag }}"
sha256: "{{ cephfs_provisioner_digest_checksum|default(None) }}"
groups:
- kube-node
rbd_provisioner:
enabled: "{{ rbd_provisioner_enabled }}"
container: true
repo: "{{ rbd_provisioner_image_repo }}"
tag: "{{ rbd_provisioner_image_tag }}"
sha256: "{{ rbd_provisioner_digest_checksum|default(None) }}"
groups:
- kube-node
local_path_provisioner:
enabled: "{{ local_volume_provisioner_enabled }}"
container: true
repo: "{{ local_path_provisioner_image_repo }}"
tag: "{{ local_path_provisioner_image_tag }}"
sha256: "{{ local_path_provisioner_digest_checksum|default(None) }}"
groups:
- kube-node
ingress_nginx_controller:
enabled: "{{ ingress_nginx_enabled }}"
container: true
repo: "{{ ingress_nginx_controller_image_repo }}"
tag: "{{ ingress_nginx_controller_image_tag }}"
sha256: "{{ ingress_nginx_controller_digest_checksum|default(None) }}"
groups:
- kube-node
cert_manager_controller:
enabled: "{{ cert_manager_enabled }}"
container: true
repo: "{{ cert_manager_controller_image_repo }}"
tag: "{{ cert_manager_controller_image_tag }}"
sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}"
groups:
- kube-node
dashboard:
enabled: "{{ dashboard_enabled }}"
container: true
repo: "{{ dashboard_image_repo }}"
tag: "{{ dashboard_image_tag }}"
sha256: "{{ dashboard_digest_checksum|default(None) }}"
groups:
- kube-master
download_defaults:
container: false
file: false
repo: None
tag: None
enabled: false
dest: None
version: None
url: None
unarchive: false
owner: kube
mode: None