Add support calico kubernetes datastore and typha. (#4498)

* Add support calico kubernetes datastore and typha.

* Add typha_enabled to kubespray-defaults.
This commit is contained in:
grialeyur 2019-04-25 16:00:48 +04:00 committed by Kubernetes Prow Robot
parent 6ca2019002
commit 82119ca923
12 changed files with 453 additions and 19 deletions

View file

@ -24,3 +24,12 @@
# Advertise Cluster IPs
# calico_advertise_cluster_ips: true
# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore)
# calico_datastore: "etcd"
# Use typha (only with kdd)
# typha_enabled: false
# Number of typha replicas
# typha_replicas: 1

View file

@ -49,6 +49,7 @@ calico_ctl_version: "v3.4.4"
calico_cni_version: "v3.4.0"
calico_policy_version: "v3.4.0"
calico_rr_version: "v0.6.1"
calico_typha_version: "v3.4.4"
flannel_version: "v0.11.0"
flannel_cni_version: "v0.3.0"
@ -181,6 +182,8 @@ calico_policy_image_repo: "docker.io/calico/kube-controllers"
calico_policy_image_tag: "{{ calico_policy_version }}"
calico_rr_image_repo: "docker.io/calico/routereflector"
calico_rr_image_tag: "{{ calico_rr_version }}"
calico_typha_image_repo: "docker.io/calico/typha"
calico_typha_image_tag: "{{ calico_typha_version }}"
pod_infra_image_repo: "gcr.io/google_containers/pause-{{ image_arch }}"
pod_infra_image_tag: "{{ pod_infra_version }}"
install_socat_image_repo: "docker.io/xueshanf/install-socat"
@ -434,6 +437,15 @@ downloads:
groups:
- calico-rr
calico_typha:
enabled: "{{ typha_enabled == 'calico' }}"
container: true
repo: "{{ calico_typha_image_repo }}"
tag: "{{ calico_typha_image_tag }}"
sha256: "{{ calico_typha_digest_checksum|default(None) }}"
groups:
- k8s-cluster
weave_kube:
enabled: "{{ kube_network_plugin == 'weave' }}"
container: true

View file

@ -9,6 +9,8 @@
state: "latest"
with_items:
- "{{ calico_node_manifests.results }}"
- "{{ calico_node_kdd_manifest.results }}"
- "{{ calico_node_typha_manifest.results }}"
when:
- inventory_hostname == groups['kube-master'][0] and not item is skipped
loop_control:

View file

@ -501,3 +501,5 @@ pip_extra_args: |-
etcd_config_dir: /etc/ssl/etcd
etcd_cert_dir: "{{ etcd_config_dir }}/ssl"
typha_enabled: false

View file

@ -65,3 +65,10 @@ calico_upgrade_needed: false
kube_etcd_cacert_file: ca.pem
kube_etcd_cert_file: node-{{ inventory_hostname }}.pem
kube_etcd_key_file: node-{{ inventory_hostname }}-key.pem
# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore)
calico_datastore: "etcd"
# Use typha (only with kdd)
typha_enabled: false
# Number of typha replicas
typha_replicas: 1

View file

@ -350,3 +350,25 @@
when:
- inventory_hostname in groups['kube-master']
- rbac_enabled or item.type not in rbac_resources
- name: Calico | Create calico manifests for kdd
template:
src: "{{item.file}}.j2"
dest: "{{kube_config_dir}}/{{item.file}}"
with_items:
- {name: calico, file: kdd-crds.yml, type: kdd}
register: calico_node_kdd_manifest
when:
- inventory_hostname in groups['kube-master']
- calico_datastore == "kdd"
- name: Calico | Create calico manifests for typha
template:
src: "{{item.file}}.j2"
dest: "{{kube_config_dir}}/{{item.file}}"
with_items:
- {name: calico, file: calico-typha.yml, type: typha}
register: calico_node_typha_manifest
when:
- inventory_hostname in groups['kube-master']
- typha_enabled and calico_datastore == "kdd"

View file

@ -4,10 +4,17 @@ metadata:
name: calico-config
namespace: kube-system
data:
{% if calico_datastore == "etcd" %}
etcd_endpoints: "{{ etcd_access_addresses }}"
etcd_ca: "/calico-secrets/ca_cert.crt"
etcd_cert: "/calico-secrets/cert.crt"
etcd_key: "/calico-secrets/key.pem"
{% elif calico_datastore == "kdd" and typha_enabled == "true" %}
# To enable Typha, set this to "calico-typha" *and* set a non-zero value for Typha replicas
# below. We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is
# essential.
typha_service_name: "calico-typha"
{% endif %}
{% if calico_network_backend is defined and calico_network_backend == 'none' %}
cluster_type: "kubespray"
calico_backend: "none"

View file

@ -19,11 +19,16 @@ rules:
verbs:
- watch
- list
{% if calico_datastore == "kdd" %}
# Used to discover Typhas.
- get
{% endif %}
- apiGroups: [""]
resources:
- nodes/status
verbs:
- patch
{% if calico_datastore == "etcd" %}
- apiGroups:
- policy
resourceNames:
@ -32,3 +37,73 @@ rules:
- podsecuritypolicies
verbs:
- use
{% elif calico_datastore == "kdd" %}
# Calico stores some configuration information in node annotations.
- update
# Watch for changes to Kubernetes NetworkPolicies.
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
# Used by Calico for policy information.
- apiGroups: [""]
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
# The CNI plugin patches pods/status.
- apiGroups: [""]
resources:
- pods/status
verbs:
- patch
# Calico monitors various CRDs for config.
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- clusterinformations
- hostendpoints
verbs:
- get
- list
- watch
# Calico must create and update some CRDs on startup.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
# Calico stores some configuration information on the node.
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- watch
# These permissions are only requried for upgrade from v2.6, and can
# be removed after upgrade or on fresh installations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
{% endif %}

View file

@ -61,6 +61,13 @@ spec:
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
{% if calico_datastore == "kdd" %}
# Set the hostname based on the k8s node name.
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
{% endif %}
volumeMounts:
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
@ -98,11 +105,51 @@ spec:
image: {{ calico_node_image_repo }}:{{ calico_node_image_tag }}
env:
# The location of the Calico etcd cluster.
{% if calico_datastore == "etcd" %}
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
{% elif calico_datastore == "kdd" %}
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
{% if typha_enabled == "true" %}
# Typha support: controlled by the ConfigMap.
- name: FELIX_TYPHAK8SSERVICENAME
valueFrom:
configMapKeyRef:
name: calico-config
key: typha_service_name
{% endif %}
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# Set based on the k8s node name.
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
{% endif %}
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
@ -167,24 +214,6 @@ spec:
- name: CALICO_ADVERTISE_CLUSTER_IPS
value: "{{ kube_service_addresses }}"
{% endif %}
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_ca
# Location of the client key for etcd.
- name: ETCD_KEY_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_key
# Location of the client certificate for etcd.
- name: ETCD_CERT_FILE
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_cert
{% if calico_ip_auto_method is defined %}
- name: IP_AUTODETECTION_METHOD
value: "{{ calico_ip_auto_method }}"
@ -243,8 +272,10 @@ spec:
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
{% if calico_datastore == "etcd" %}
- mountPath: /calico-secrets
name: etcd-certs
{% endif %}
- name: xtables-lock
mountPath: /run/xtables.lock
readOnly: false
@ -266,10 +297,12 @@ spec:
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
{% if calico_datastore == "etcd" %}
# Mount in the etcd TLS secrets.
- name: etcd-certs
hostPath:
path: "{{ calico_cert_dir }}"
{% endif %}
# Mount the global iptables lock file, used by calico/node
- name: xtables-lock
hostPath:

View file

@ -0,0 +1,123 @@
# This manifest creates a Service, which will be backed by Calico's Typha daemon.
# Typha sits in between Felix and the API server, reducing Calico's load on the API server.
apiVersion: v1
kind: Service
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
spec:
ports:
- port: 5473
protocol: TCP
targetPort: calico-typha
name: calico-typha
selector:
k8s-app: calico-typha
---
# This manifest creates a Deployment of Typha to back the above service.
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
spec:
# Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the
# typha_service_name variable in the calico-config ConfigMap above.
#
# We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential
# (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In
# production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade.
replicas: {{ typha_replicas }}
revisionHistoryLimit: 2
template:
metadata:
labels:
k8s-app: calico-typha
annotations:
# This, along with the CriticalAddonsOnly toleration below, marks the pod as a critical
# add-on, ensuring it gets priority scheduling and that its resources are reserved
# if it ever gets evicted.
scheduler.alpha.kubernetes.io/critical-pod: ''
cluster-autoscaler.kubernetes.io/safe-to-evict: 'true'
spec:
nodeSelector:
beta.kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
# Since Calico can't network a pod until Typha is up, we need to run Typha itself
# as a host-networked pod.
serviceAccountName: calico-node
containers:
# - image: calico/typha:v3.4.4
- image: {{ calico_typha_image_repo }}:{{ calico_typha_image_tag }}
name: calico-typha
ports:
- containerPort: 5473
name: calico-typha
protocol: TCP
env:
# Enable "info" logging by default. Can be set to "debug" to increase verbosity.
- name: TYPHA_LOGSEVERITYSCREEN
value: "info"
# Disable logging to file and syslog since those don't make sense in Kubernetes.
- name: TYPHA_LOGFILEPATH
value: "none"
- name: TYPHA_LOGSEVERITYSYS
value: "none"
# Monitor the Kubernetes API to find the number of running instances and rebalance
# connections.
- name: TYPHA_CONNECTIONREBALANCINGMODE
value: "kubernetes"
- name: TYPHA_DATASTORETYPE
value: "kubernetes"
- name: TYPHA_HEALTHENABLED
value: "true"
# Uncomment these lines to enable prometheus metrics. Since Typha is host-networked,
# this opens a port on the host, which may need to be secured.
#- name: TYPHA_PROMETHEUSMETRICSENABLED
# value: "true"
#- name: TYPHA_PROMETHEUSMETRICSPORT
# value: "9093"
livenessProbe:
exec:
command:
- calico-typha
- check
- liveness
periodSeconds: 30
initialDelaySeconds: 30
readinessProbe:
exec:
command:
- calico-typha
- check
- readiness
periodSeconds: 10
---
# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
spec:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: calico-typha

View file

@ -9,16 +9,25 @@
"nodename": "{{ calico_baremetal_nodename }}",
{% endif %}
"type": "calico",
"log_level": "info",
{% if calico_datastore == "etcd" %}
"etcd_endpoints": "{{ etcd_access_addresses }}",
"etcd_cert_file": "{{ calico_cert_dir }}/cert.crt",
"etcd_key_file": "{{ calico_cert_dir }}/key.pem",
"etcd_ca_cert_file": "{{ calico_cert_dir }}/ca_cert.crt",
"log_level": "info",
"ipam": {
"type": "calico-ipam",
"assign_ipv4": "true",
"ipv4_pools": ["{{ calico_pool_cidr | default(kube_pods_subnet) }}"]
},
{% elif calico_datastore == "kdd" %}
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
{% endif %}
{% if enable_network_policy %}
"policy": {
"type": "k8s"

View file

@ -0,0 +1,133 @@
# Create all the CustomResourceDefinitions needed for
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: felixconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: FelixConfiguration
plural: felixconfigurations
singular: felixconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgppeers.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPPeer
plural: bgppeers
singular: bgppeer
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgpconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPConfiguration
plural: bgpconfigurations
singular: bgpconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPPool
plural: ippools
singular: ippool
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: hostendpoints.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: HostEndpoint
plural: hostendpoints
singular: hostendpoint
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterinformations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: ClusterInformation
plural: clusterinformations
singular: clusterinformation
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworksets.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkSet
plural: globalnetworksets
singular: globalnetworkset
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networkpolicies.crd.projectcalico.org
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkPolicy
plural: networkpolicies
singular: networkpolicy