add Kube-OVN cni to kubespray (#5020)

This commit is contained in:
Oilbeater 2019-07-31 11:10:20 +08:00 committed by Kubernetes Prow Robot
parent 8afbf339f7
commit 1be788f785
17 changed files with 716 additions and 4 deletions

View file

@ -86,6 +86,11 @@ packet_centos7-calico-ha:
<<: *packet
when: manual
packet_centos7-kube-ovn:
stage: deploy-part2
<<: *packet
when: on_success
packet_centos7-kube-router:
stage: deploy-part2
<<: *packet

View file

@ -158,7 +158,7 @@ These limits are safe guarded by Kubespray. Actual requirements for your workloa
Network Plugins
---------------
You can choose between 6 network plugins. (default: `calico`, except Vagrant uses `flannel`)
You can choose between 10 network plugins. (default: `calico`, except Vagrant uses `flannel`)
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
@ -174,6 +174,8 @@ You can choose between 6 network plugins. (default: `calico`, except Vagrant use
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
(Please refer to `weave` [troubleshooting documentation](https://www.weave.works/docs/net/latest/troubleshooting/)).
- [kube-ovn](docs/kube-ovn.md): Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
- [kube-router](docs/kube-router.md): Kube-router is a L3 CNI for Kubernetes networking aiming to provide operational
simplicity and high performance: it uses IPVS to provide Kube Services Proxy (if setup to replace kube-proxy),
iptables for network policies, and BGP for ods L3 networking (with optionally BGP peering with out-of-cluster BGP peers).

48
docs/kube-ovn.md Normal file
View file

@ -0,0 +1,48 @@
Kube-OVN
===========
Kube-OVN integrates the OVN-based Network Virtualization with Kubernetes. It offers an advanced Container Network Fabric for Enterprises.
For more information please check [Kube-OVN documentation](https://github.com/alauda/kube-ovn)
## How to use it
Enable kube-ovn in `group_vars/k8s-cluster/k8s-cluster.yml`
```
...
kube_network_plugin: kube-ovn
...
```
## Verifying kube-ovn install
Kube-OVN run ovn and controller in `kube-ovn` namespace
* Check the status of kube-ovn pods
```
# From the CLI
kubectl get pod -n kube-ovn
# Output
NAME READY STATUS RESTARTS AGE
kube-ovn-cni-49lsm 1/1 Running 0 2d20h
kube-ovn-cni-9db8f 1/1 Running 0 2d20h
kube-ovn-cni-wftdk 1/1 Running 0 2d20h
kube-ovn-controller-68d7bb48bd-7tnvg 1/1 Running 0 2d21h
ovn-central-6675dbb7d9-d7z8m 1/1 Running 0 4d16h
ovs-ovn-hqn8p 1/1 Running 0 4d16h
ovs-ovn-hvpl8 1/1 Running 0 4d16h
ovs-ovn-r5frh 1/1 Running 0 4d16h
```
* Check the default and node subnet
```
# From the CLI
kubectl get subnet
# Output
NAME PROTOCOL CIDR PRIVATE NAT
join IPv4 100.64.0.0/16 false false
ovn-default IPv4 10.16.0.0/16 false true
```

View file

@ -74,6 +74,7 @@ weave_version: 2.5.2
pod_infra_version: 3.1
contiv_version: 1.2.1
cilium_version: "v1.3.0"
kube_ovn_version: "v0.6.0"
kube_router_version: "v0.2.5"
multus_version: "v3.1.autoconf"
@ -238,6 +239,14 @@ cilium_image_repo: "docker.io/cilium/cilium"
cilium_image_tag: "{{ cilium_version }}"
cilium_init_image_repo: "docker.io/library/busybox"
cilium_init_image_tag: "1.28.4"
kube_ovn_db_image_repo: "index.alauda.cn/alaudak8s/kube-ovn-db"
kube_ovn_node_image_repo: "index.alauda.cn/alaudak8s/kube-ovn-node"
kube_ovn_cni_image_repo: "index.alauda.cn/alaudak8s/kube-ovn-cni"
kube_ovn_controller_image_repo: "index.alauda.cn/alaudak8s/kube-ovn-controller"
kube_ovn_db_image_tag: "{{ kube_ovn_version }}"
kube_ovn_node_image_tag: "{{ kube_ovn_version }}"
kube_ovn_controller_image_tag: "{{ kube_ovn_version }}"
kube_ovn_cni_image_tag: "{{ kube_ovn_version }}"
kube_router_image_repo: "docker.io/cloudnativelabs/kube-router"
kube_router_image_tag: "{{ kube_router_version }}"
multus_image_repo: "docker.io/nfvpe/multus"
@ -536,6 +545,42 @@ downloads:
groups:
- k8s-cluster
kube_ovn_db:
enabled: "{{ kube_network_plugin == 'kube-ovn' }}"
container: true
repo: "{{ kube_ovn_db_image_repo }}"
tag: "{{ kube_ovn_db_image_tag }}"
sha256: "{{ kube_ovn_digest_checksum|default(None) }}"
groups:
- k8s-cluster
kube_ovn_node:
enabled: "{{ kube_network_plugin == 'kube-ovn' }}"
container: true
repo: "{{ kube_ovn_node_image_repo }}"
tag: "{{ kube_ovn_node_image_tag }}"
sha256: "{{ kube_ovn_digest_checksum|default(None) }}"
groups:
- k8s-cluster
kube_ovn_controller:
enabled: "{{ kube_network_plugin == 'kube-ovn' }}"
container: true
repo: "{{ kube_ovn_controller_image_repo }}"
tag: "{{ kube_ovn_controller_image_tag }}"
sha256: "{{ kube_ovn_digest_checksum|default(None) }}"
groups:
- k8s-cluster
kube_ovn_cni:
enabled: "{{ kube_network_plugin == 'kube-ovn' }}"
container: true
repo: "{{ kube_ovn_cni_image_repo }}"
tag: "{{ kube_ovn_cni_image_tag }}"
sha256: "{{ kube_ovn_digest_checksum|default(None) }}"
groups:
- k8s-cluster
kube_router:
enabled: "{{ kube_network_plugin == 'kube-router' }}"
container: true

View file

@ -0,0 +1,9 @@
---
- name: Kube-OVN | Start Resources
kube:
name: "{{ item.item.name }}"
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
state: "latest"
with_items: "{{ kube_ovn_node_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0] and not item is skipped

View file

@ -30,6 +30,11 @@ dependencies:
tags:
- cni
- role: kubernetes-apps/network_plugin/kube-ovn
when: kube_network_plugin == 'kube-ovn'
tags:
- kube-ovn
- role: kubernetes-apps/network_plugin/weave
when: kube_network_plugin == 'weave'
tags:

View file

@ -50,7 +50,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
{% endif %}
KUBELET_ARGS="{{ kubelet_args_base }} {% if node_taints|default([]) %}--register-with-taints={{ node_taints | join(',') }} {% endif %}--node-labels={{ all_node_labels | join(',') }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}{% if inventory_hostname in groups['kube-node'] %}{% if kubelet_node_custom_flags is string %} {{kubelet_node_custom_flags}} {% else %}{% for flag in kubelet_node_custom_flags %} {{flag}} {% endfor %}{% endif %}{% endif %}"
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "cni", "flannel", "weave", "contiv", "cilium", "kube-router", "macvlan"] %}
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "cni", "flannel", "weave", "contiv", "cilium", "kube-ovn", "kube-router", "macvlan"] %}
KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
{% elif kube_network_plugin is defined and kube_network_plugin == "cloud" %}
KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kubenet"

View file

@ -21,7 +21,7 @@
- name: Stop if unknown network plugin
assert:
that: kube_network_plugin in ['calico', 'canal', 'flannel', 'weave', 'cloud', 'cilium', 'cni', 'contiv', 'kube-router', 'macvlan']
that: kube_network_plugin in ['calico', 'canal', 'flannel', 'weave', 'cloud', 'cilium', 'cni', 'contiv', 'kube-ovn', 'kube-router', 'macvlan']
when: kube_network_plugin is defined
ignore_errors: "{{ ignore_assert_errors }}"

View file

@ -52,7 +52,7 @@
- "/opt/cni/bin"
- "/var/lib/calico"
when:
- kube_network_plugin in ["calico", "weave", "canal", "flannel", "contiv", "cilium", "kube-router", "macvlan"]
- kube_network_plugin in ["calico", "weave", "canal", "flannel", "contiv", "cilium", "kube-ovn", "kube-router", "macvlan"]
- inventory_hostname in groups['k8s-cluster']
tags:
- network
@ -61,6 +61,7 @@
- weave
- canal
- contiv
- kube-ovn
- kube-router
- bootstrap-os

View file

@ -0,0 +1,6 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- oilbeater
reviewers:
- oilbeater

View file

@ -0,0 +1,11 @@
---
kube_ovn_db_cpu_request: 200m
kube_ovn_db_memory_request: 300Mi
kube_ovn_db_cpu_limit: 400m
kube_ovn_db_memory_limit: 500Mi
kube_ovn_node_cpu_request: 100m
kube_ovn_node_memory_request: 300Mi
kube_ovn_node_cpu_limit: 200m
kube_ovn_node_memory_limit: 500Mi
traffic_mirror: true

View file

@ -0,0 +1,16 @@
---
- name: Kube-OVN | Label ovn-db node
shell: >-
{{ bin_dir }}/kubectl label --overwrite node {{ groups['kube-master'] | first }} kube-ovn/role=master
when:
- inventory_hostname == groups['kube-master'][0]
- name: Kube-OVN | Create Kube-OVN manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
with_items:
- {name: kube-ovn-crd, file: cni-kube-ovn-crd.yml}
- {name: ovn, file: cni-ovn.yml}
- {name: kube-ovn, file: cni-kube-ovn.yml}
register: kube_ovn_node_manifests

View file

@ -0,0 +1,65 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ips.kubeovn.io
spec:
group: kubeovn.io
version: v1
scope: Cluster
names:
plural: ips
singular: ip
kind: IP
shortNames:
- ip
additionalPrinterColumns:
- name: IP
type: string
JSONPath: .spec.ipAddress
- name: Mac
type: string
JSONPath: .spec.macAddress
- name: Node
type: string
JSONPath: .spec.nodeName
- name: Subnet
type: string
JSONPath: .spec.subnet
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: subnets.kubeovn.io
spec:
group: kubeovn.io
version: v1
scope: Cluster
names:
plural: subnets
singular: subnet
kind: Subnet
shortNames:
- subnet
additionalPrinterColumns:
- name: Protocol
type: string
JSONPath: .spec.protocol
- name: CIDR
type: string
JSONPath: .spec.cidrBlock
- name: Private
type: boolean
JSONPath: .spec.private
- name: NAT
type: boolean
JSONPath: .spec.natOutgoing
validation:
openAPIV3Schema:
properties:
spec:
required: ["cidrBlock","gateway"]
properties:
cidrBlock:
type: "string"
gateway:
type: "string"

View file

@ -0,0 +1,176 @@
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: kube-ovn-controller
namespace: kube-ovn
annotations:
kubernetes.io/description: |
kube-ovn controller
spec:
replicas: 2
selector:
matchLabels:
app: kube-ovn-controller
strategy:
rollingUpdate:
maxSurge: 0%
maxUnavailable: 100%
type: RollingUpdate
template:
metadata:
labels:
app: kube-ovn-controller
component: network
type: infra
spec:
tolerations:
- operator: Exists
effect: NoSchedule
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: kube-ovn-controller
topologyKey: kubernetes.io/hostname
serviceAccountName: ovn
hostNetwork: true
containers:
- name: kube-ovn-controller
image: {{ kube_ovn_controller_image_repo }}:{{ kube_ovn_controller_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- /kube-ovn/start-controller.sh
args:
- --default-cidr={{ kube_pods_subnet }}
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: KUBE_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
readinessProbe:
exec:
command:
- nc
- -z
- -w3
- 127.0.0.1
- "10660"
periodSeconds: 3
livenessProbe:
exec:
command:
- nc
- -z
- -w3
- 127.0.0.1
- "10660"
initialDelaySeconds: 30
periodSeconds: 7
failureThreshold: 5
nodeSelector:
beta.kubernetes.io/os: "linux"
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: kube-ovn-cni
namespace: kube-ovn
annotations:
kubernetes.io/description: |
This daemon set launches the kube-ovn cni daemon.
spec:
selector:
matchLabels:
app: kube-ovn-cni
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app: kube-ovn-cni
component: network
type: infra
spec:
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: ovn
hostNetwork: true
hostPID: true
initContainers:
- name: install-cni
image: {{ kube_ovn_cni_image_repo }}:{{ kube_ovn_cni_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
command: ["/kube-ovn/install-cni.sh"]
volumeMounts:
- mountPath: /etc/cni/net.d
name: cni-conf
- mountPath: /opt/cni/bin
name: cni-bin
containers:
- name: cni-server
image: {{ kube_ovn_cni_image_repo }}:{{ kube_ovn_cni_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
command:
- sh
- /kube-ovn/start-cniserver.sh
args:
- --enable-mirror={{ traffic_mirror }}
securityContext:
runAsUser: 0
privileged: true
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /run/openvswitch
name: host-run-ovs
readinessProbe:
exec:
command:
- nc
- -z
- -w3
- 127.0.0.1
- "10665"
periodSeconds: 3
livenessProbe:
exec:
command:
- nc
- -z
- -w3
- 127.0.0.1
- "10665"
initialDelaySeconds: 30
periodSeconds: 7
failureThreshold: 5
nodeSelector:
beta.kubernetes.io/os: "linux"
volumes:
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: cni-conf
hostPath:
path: /etc/cni/net.d
- name: cni-bin
hostPath:
path: /opt/cni/bin

View file

@ -0,0 +1,309 @@
apiVersion: v1
kind: Namespace
metadata:
name: kube-ovn
---
apiVersion: v1
kind: ConfigMap
metadata:
name: ovn-config
namespace: kube-ovn
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: ovn
namespace: kube-ovn
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.k8s.io/system-only: "true"
name: system:ovn
rules:
- apiGroups:
- "kubeovn.io"
resources:
- subnets
- ips
verbs:
- "*"
- apiGroups:
- ""
resources:
- pods
- namespaces
- nodes
- configmaps
verbs:
- create
- get
- list
- watch
- patch
- update
- apiGroups:
- ""
- networking.k8s.io
- apps
resources:
- networkpolicies
- services
- endpoints
- statefulsets
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ovn
roleRef:
name: system:ovn
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: ovn
namespace: kube-ovn
---
kind: Service
apiVersion: v1
metadata:
name: ovn-nb
namespace: kube-ovn
spec:
ports:
- name: ovn-nb
protocol: TCP
port: 6641
targetPort: 6641
type: ClusterIP
selector:
app: ovn-central
sessionAffinity: None
---
kind: Service
apiVersion: v1
metadata:
name: ovn-sb
namespace: kube-ovn
spec:
ports:
- name: ovn-sb
protocol: TCP
port: 6642
targetPort: 6642
type: ClusterIP
selector:
app: ovn-central
sessionAffinity: None
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: ovn-central
namespace: kube-ovn
annotations:
kubernetes.io/description: |
OVN components: northd, nb and sb.
spec:
replicas: 1
strategy:
rollingUpdate:
maxSurge: 0%
maxUnavailable: 100%
type: RollingUpdate
selector:
matchLabels:
app: ovn-central
template:
metadata:
labels:
app: ovn-central
component: network
type: infra
spec:
tolerations:
- operator: Exists
effect: NoSchedule
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: ovn-central
topologyKey: kubernetes.io/hostname
serviceAccountName: ovn
hostNetwork: true
containers:
- name: ovn-central
image: {{ kube_ovn_db_image_repo }}:{{ kube_ovn_db_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
resources:
requests:
cpu: {{ kube_ovn_db_cpu_request }}
memory: {{ kube_ovn_db_memory_request }}
limits:
cpu: {{ kube_ovn_db_cpu_limit }}
memory: {{ kube_ovn_db_memory_limit }}
volumeMounts:
- mountPath: /run/openvswitch
name: host-run-ovs
- mountPath: /var/run/openvswitch
name: host-run-ovs
- mountPath: /sys
name: host-sys
readOnly: true
- mountPath: /etc/openvswitch
name: host-config-openvswitch
- mountPath: /var/log/openvswitch
name: host-log
readinessProbe:
exec:
command:
- sh
- /root/ovn-is-leader.sh
periodSeconds: 3
livenessProbe:
exec:
command:
- sh
- /root/ovn-healthcheck.sh
initialDelaySeconds: 30
periodSeconds: 7
failureThreshold: 5
nodeSelector:
beta.kubernetes.io/os: "linux"
kube-ovn/role: "master"
volumes:
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-sys
hostPath:
path: /sys
- name: host-config-openvswitch
hostPath:
path: /etc/origin/openvswitch
- name: host-log
hostPath:
path: /var/log/openvswitch
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: ovs-ovn
namespace: kube-ovn
annotations:
kubernetes.io/description: |
This daemon set launches the openvswitch daemon.
spec:
selector:
matchLabels:
app: ovs
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app: ovs
component: network
type: infra
spec:
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: ovn
hostNetwork: true
hostPID: true
containers:
- name: openvswitch
image: {{ kube_ovn_node_image_repo }}:{{ kube_ovn_node_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
securityContext:
runAsUser: 0
privileged: true
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- mountPath: /lib/modules
name: host-modules
readOnly: true
- mountPath: /run/openvswitch
name: host-run-ovs
- mountPath: /var/run/openvswitch
name: host-run-ovs
- mountPath: /sys
name: host-sys
readOnly: true
- mountPath: /etc/openvswitch
name: host-config-openvswitch
- mountPath: /var/log/openvswitch
name: host-log
readinessProbe:
exec:
command:
- sh
- /root/ovs-healthcheck.sh
periodSeconds: 5
livenessProbe:
exec:
command:
- sh
- /root/ovs-healthcheck.sh
initialDelaySeconds: 10
periodSeconds: 5
failureThreshold: 5
resources:
requests:
cpu: {{ kube_ovn_node_cpu_request }}
memory: {{ kube_ovn_node_memory_request }}
limits:
cpu: {{ kube_ovn_node_cpu_limit }}
memory: {{ kube_ovn_node_memory_limit }}
nodeSelector:
beta.kubernetes.io/os: "linux"
volumes:
- name: host-modules
hostPath:
path: /lib/modules
- name: host-run-ovs
hostPath:
path: /run/openvswitch
- name: host-sys
hostPath:
path: /sys
- name: host-config-openvswitch
hostPath:
path: /etc/origin/openvswitch
- name: host-log
hostPath:
path: /var/log/openvswitch

View file

@ -40,6 +40,11 @@ dependencies:
tags:
- contiv
- role: network_plugin/kube-ovn
when: kube_network_plugin == 'kube-ovn'
tags:
- kube-ovn
- role: network_plugin/kube-router
when: kube_network_plugin == 'kube-router'
tags:

View file

@ -0,0 +1,9 @@
---
# Instance settings
cloud_image: centos-7
mode: default
# Kubespray settings
kube_network_plugin: kube-ovn
deploy_netchecker: true
dns_min_replicas: 1