Initial version of Flannel using CNI (#1486)

* Updates Controller Manager/Kubelet with Flannel's required configuration for CNI
* Removes old Flannel installation
* Install CNI enabled Flannel DaemonSet/ConfigMap/CNI bins and config (with portmap plugin) on host
* Uses RBAC if enabled
* Fixed an issue that could occur if br_netfilter is not a module and net.bridge.bridge-nf-call-iptables sysctl was not set
This commit is contained in:
Chad Swenson 2017-08-25 02:07:50 -05:00 committed by Matthew Mosesohn
parent 4550dccb84
commit a39e78d42d
16 changed files with 279 additions and 142 deletions

View file

@ -23,13 +23,6 @@ ip a show dev flannel.1
valid_lft forever preferred_lft forever valid_lft forever preferred_lft forever
``` ```
* Docker must be configured with a bridge ip in the flannel subnet.
```
ps aux | grep docker
root 20196 1.7 2.7 1260616 56840 ? Ssl 10:18 0:07 /usr/bin/docker daemon --bip=10.233.16.1/24 --mtu=1450
```
* Try to run a container and check its ip address * Try to run a container and check its ip address
``` ```

View file

@ -1,3 +1,3 @@
[Service] [Service]
Environment="DOCKER_OPTS={{ docker_options | default('') }} \ Environment="DOCKER_OPTS={{ docker_options | default('') }} \
--iptables={% if kube_network_plugin == 'flannel' %}true{% else %}false{% endif %}" --iptables=false"

View file

@ -27,7 +27,8 @@ calico_ctl_version: "v1.4.0"
calico_cni_version: "v1.10.0" calico_cni_version: "v1.10.0"
calico_policy_version: "v0.7.0" calico_policy_version: "v0.7.0"
weave_version: 2.0.1 weave_version: 2.0.1
flannel_version: v0.8.0 flannel_version: "v0.8.0"
flannel_cni_version: "v0.2.0"
pod_infra_version: 3.0 pod_infra_version: 3.0
# Download URL's # Download URL's
@ -43,6 +44,8 @@ etcd_image_repo: "quay.io/coreos/etcd"
etcd_image_tag: "{{ etcd_version }}" etcd_image_tag: "{{ etcd_version }}"
flannel_image_repo: "quay.io/coreos/flannel" flannel_image_repo: "quay.io/coreos/flannel"
flannel_image_tag: "{{ flannel_version }}" flannel_image_tag: "{{ flannel_version }}"
flannel_cni_image_repo: "quay.io/coreos/flannel-cni"
flannel_cni_image_tag: "{{ flannel_cni_version }}"
calicoctl_image_repo: "quay.io/calico/ctl" calicoctl_image_repo: "quay.io/calico/ctl"
calicoctl_image_tag: "{{ calico_ctl_version }}" calicoctl_image_tag: "{{ calico_ctl_version }}"
calico_node_image_repo: "quay.io/calico/node" calico_node_image_repo: "quay.io/calico/node"
@ -138,6 +141,12 @@ downloads:
tag: "{{ flannel_image_tag }}" tag: "{{ flannel_image_tag }}"
sha256: "{{ flannel_digest_checksum|default(None) }}" sha256: "{{ flannel_digest_checksum|default(None) }}"
enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}" enabled: "{{ kube_network_plugin == 'flannel' or kube_network_plugin == 'canal' }}"
flannel_cni:
container: true
repo: "{{ flannel_cni_image_repo }}"
tag: "{{ flannel_cni_image_tag }}"
sha256: "{{ flannel_cni_digest_checksum|default(None) }}"
enabled: "{{ kube_network_plugin == 'flannel' }}"
calicoctl: calicoctl:
container: true container: true
repo: "{{ calicoctl_image_repo }}" repo: "{{ calicoctl_image_repo }}"

View file

@ -0,0 +1,22 @@
---
- name: "Flannel | Create ServiceAccount ClusterRole and ClusterRoleBinding"
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/cni-flannel-rbac.yml"
run_once: true
when: rbac_enabled and flannel_rbac_manifest.changed
- name: Flannel | Start Resources
kube:
name: "kube-flannel"
kubectl: "{{ bin_dir }}/kubectl"
filename: "{{ kube_config_dir }}/cni-flannel.yml"
resource: "ds"
namespace: "{{system_namespace}}"
state: "{{ item | ternary('latest','present') }}"
with_items: "{{ flannel_manifest.changed }}"
when: inventory_hostname == groups['kube-master'][0]
- name: Flannel | Wait for flannel subnet.env file presence
wait_for:
path: /run/flannel/subnet.env
delay: 5
timeout: 600

View file

@ -3,6 +3,9 @@ dependencies:
- role: kubernetes-apps/network_plugin/canal - role: kubernetes-apps/network_plugin/canal
when: kube_network_plugin == 'canal' when: kube_network_plugin == 'canal'
tags: canal tags: canal
- role: kubernetes-apps/network_plugin/flannel
when: kube_network_plugin == 'flannel'
tags: flannel
- role: kubernetes-apps/network_plugin/weave - role: kubernetes-apps/network_plugin/weave
when: kube_network_plugin == 'weave' when: kube_network_plugin == 'weave'
tags: weave tags: weave

View file

@ -45,9 +45,12 @@ spec:
- --cloud-provider={{cloud_provider}} - --cloud-provider={{cloud_provider}}
{% endif %} {% endif %}
{% if kube_network_plugin is defined and kube_network_plugin == 'cloud' %} {% if kube_network_plugin is defined and kube_network_plugin == 'cloud' %}
- --allocate-node-cidrs=true
- --configure-cloud-routes=true - --configure-cloud-routes=true
{% endif %}
{% if kube_network_plugin is defined and kube_network_plugin in ["cloud", "flannel"] %}
- --allocate-node-cidrs=true
- --cluster-cidr={{ kube_pods_subnet }} - --cluster-cidr={{ kube_pods_subnet }}
- --service-cluster-ip-range={{ kube_service_addresses }}
{% endif %} {% endif %}
{% if kube_feature_gates %} {% if kube_feature_gates %}
- --feature-gates={{ kube_feature_gates|join(',') }} - --feature-gates={{ kube_feature_gates|join(',') }}

View file

@ -56,7 +56,7 @@ KUBELET_HOSTNAME="--hostname-override={{ kube_override_hostname }}"
{% endif %} {% endif %}
KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ node_labels }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}" KUBELET_ARGS="{{ kubelet_args_base }} {{ kubelet_args_dns }} {{ kubelet_args_kubeconfig }} {{ node_labels }} {% if kube_feature_gates %} --feature-gates={{ kube_feature_gates|join(',') }} {% endif %} {% if kubelet_custom_flags is string %} {{kubelet_custom_flags}} {% else %}{% for flag in kubelet_custom_flags %} {{flag}} {% endfor %}{% endif %}"
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "weave", "canal"] %} {% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "flannel", "weave"] %}
KUBELET_NETWORK_PLUGIN="--network-plugin=cni --network-plugin-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin" KUBELET_NETWORK_PLUGIN="--network-plugin=cni --network-plugin-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
{% elif kube_network_plugin is defined and kube_network_plugin == "weave" %} {% elif kube_network_plugin is defined and kube_network_plugin == "weave" %}
DOCKER_SOCKET="--docker-endpoint=unix:/var/run/weave/weave.sock" DOCKER_SOCKET="--docker-endpoint=unix:/var/run/weave/weave.sock"

View file

@ -1,7 +1,8 @@
--- ---
# Flannel public IP # Flannel public IP
# The address that flannel should advertise as how to access the system # The address that flannel should advertise as how to access the system
flannel_public_ip: "{{ access_ip|default(ip|default(ansible_default_ipv4.address)) }}" # Disabled until https://github.com/coreos/flannel/issues/712 is fixed
# flannel_public_ip: "{{ access_ip|default(ip|default(ansible_default_ipv4.address)) }}"
## interface that should be used for flannel operations ## interface that should be used for flannel operations
## This is actually an inventory node-level item ## This is actually an inventory node-level item
@ -17,5 +18,5 @@ flannel_cpu_limit: 300m
flannel_memory_requests: 64M flannel_memory_requests: 64M
flannel_cpu_requests: 150m flannel_cpu_requests: 150m
flannel_cert_dir: /etc/flannel/certs # Legacy directory, will be removed if found.
etcd_cert_dir: /etc/ssl/etcd/ssl flannel_cert_dir: /etc/flannel/certs

View file

@ -4,6 +4,10 @@
failed_when: false failed_when: false
notify: Flannel | restart docker notify: Flannel | restart docker
- name: Flannel | delete flannel interface
command: ip link delete flannel.1
failed_when: false
# special cases for atomic because it defaults to live-restore: true # special cases for atomic because it defaults to live-restore: true
# So we disable live-restore to pickup the new flannel IP. After # So we disable live-restore to pickup the new flannel IP. After
# we enable it, we have to restart docker again to pickup the new # we enable it, we have to restart docker again to pickup the new

View file

@ -3,3 +3,6 @@ dependencies:
- role: download - role: download
file: "{{ downloads.flannel }}" file: "{{ downloads.flannel }}"
tags: download tags: download
- role: download
file: "{{ downloads.flannel_cni }}"
tags: download

View file

@ -1,83 +1,47 @@
--- ---
- name: Flannel | Set Flannel etcd configuration - include: pre-upgrade.yml
command: |-
{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} \
set /{{ cluster_name }}/network/config \
'{ "Network": "{{ kube_pods_subnet }}", "SubnetLen": {{ kube_network_node_prefix }}, "Backend": { "Type": "{{ flannel_backend_type }}" } }'
delegate_to: "{{groups['etcd'][0]}}"
run_once: true
- name: Flannel | Create flannel certs directory - name: Flannel | Verify if br_netfilter module exists
file: shell: "modinfo br_netfilter"
dest: "{{ flannel_cert_dir }}" register: modinfo_br_netfilter
state: directory failed_when: modinfo_br_netfilter.rc not in [0, 1]
mode: 0750 changed_when: false
owner: root
group: root
- name: Flannel | Link etcd certificates for flanneld - name: Flannel | Enable br_netfilter module
file: modprobe:
src: "{{ etcd_cert_dir }}/{{ item.s }}" name: br_netfilter
dest: "{{ flannel_cert_dir }}/{{ item.d }}" state: present
state: hard when: modinfo_br_netfilter.rc == 0
force: yes
# kube-proxy needs net.bridge.bridge-nf-call-iptables enabled when found if br_netfilter is not a module
- name: Flannel | Check if bridge-nf-call-iptables key exists
command: "sysctl net.bridge.bridge-nf-call-iptables"
failed_when: false
changed_when: false
register: sysctl_bridge_nf_call_iptables
- name: Flannel | Enable bridge-nf-call tables
sysctl:
name: "{{ item }}"
state: present
value: 1
reload: yes
when: modinfo_br_netfilter.rc == 1 and sysctl_bridge_nf_call_iptables.rc == 0
with_items: with_items:
- {s: "ca.pem", d: "ca_cert.crt"} - net.bridge.bridge-nf-call-iptables
- {s: "node-{{ inventory_hostname }}.pem", d: "cert.crt"} - net.bridge.bridge-nf-call-arptables
- {s: "node-{{ inventory_hostname }}-key.pem", d: "key.pem"} - net.bridge.bridge-nf-call-ip6tables
- name: Flannel | Create flannel pod manifest - name: Flannel | Create cni-flannel-rbac manifest
template: template:
src: flannel-pod.yml src: cni-flannel-rbac.yml.j2
dest: "{{kube_manifest_dir}}/flannel-pod.manifest" dest: "{{ kube_config_dir }}/cni-flannel-rbac.yml"
notify: Flannel | delete default docker bridge register: flannel_rbac_manifest
when: inventory_hostname == groups['kube-master'][0] and rbac_enabled
- name: Flannel | Wait for flannel subnet.env file presence - name: Flannel | Create cni-flannel manifest
wait_for:
path: /run/flannel/subnet.env
delay: 5
timeout: 600
- name: Flannel | Get flannel_subnet from subnet.env
shell: cat /run/flannel/subnet.env | awk -F'=' '$1 == "FLANNEL_SUBNET" {print $2}'
register: flannel_subnet_output
changed_when: false
check_mode: no
- set_fact:
flannel_subnet: "{{ flannel_subnet_output.stdout }}"
- name: Flannel | Get flannel_mtu from subnet.env
shell: cat /run/flannel/subnet.env | awk -F'=' '$1 == "FLANNEL_MTU" {print $2}'
register: flannel_mtu_output
changed_when: false
check_mode: no
- set_fact:
flannel_mtu: "{{ flannel_mtu_output.stdout }}"
- set_fact:
docker_options_file: >-
{%- if ansible_os_family == "Debian" -%}/etc/default/docker{%- elif ansible_os_family == "RedHat" -%}/etc/sysconfig/docker{%- endif -%}
tags: facts
- set_fact:
docker_options_name: >-
{%- if ansible_os_family == "Debian" -%}DOCKER_OPTS{%- elif ansible_os_family == "RedHat" -%}other_args{%- endif -%}
tags: facts
- set_fact:
docker_network_options: '"--bip={{ flannel_subnet }} --mtu={{ flannel_mtu }}"'
- name: Flannel | Ensure path for docker network systemd drop-in
file:
path: "/etc/systemd/system/docker.service.d"
state: directory
owner: root
- name: Flannel | Create docker network systemd drop-in
template: template:
src: flannel-options.conf.j2 src: cni-flannel.yml.j2
dest: "/etc/systemd/system/docker.service.d/flannel-options.conf" dest: "{{ kube_config_dir }}/cni-flannel.yml"
notify: register: flannel_manifest
- Flannel | restart docker when: inventory_hostname == groups['kube-master'][0]

View file

@ -0,0 +1,19 @@
---
- name: Flannel pre-upgrade | Purge legacy flannel systemd unit file
file:
path: "/etc/systemd/system/docker.service.d/flannel-options.conf"
state: absent
notify:
- Flannel | delete default docker bridge
- name: Flannel pre-upgrade | Purge legacy Flannel static pod manifest
file:
path: "{{ kube_manifest_dir }}/flannel-pod.manifest"
state: absent
notify:
- Flannel | delete flannel interface
- name: Flannel pre-upgrade | Remove Flannel's certificate directory not required by CNI
file:
dest: "{{ flannel_cert_dir }}"
state: absent

View file

@ -0,0 +1,44 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: "{{system_namespace}}"
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: "{{system_namespace}}"

View file

@ -0,0 +1,125 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: "{{system_namespace}}"
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name":"cbr0",
"cniVersion":"0.3.1",
"plugins":[
{
"type":"flannel",
"delegate":{
"forceAddress":true,
"isDefaultGateway":true
}
},
{
"type":"portmap",
"capabilities":{
"portMappings":true
}
}
]
}
net-conf.json: |
{
"Network": "{{ kube_pods_subnet }}",
"Backend": {
"Type": "{{ flannel_backend_type }}"
}
}
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-flannel
namespace: "{{system_namespace}}"
labels:
tier: node
k8s-app: flannel
spec:
template:
metadata:
labels:
tier: node
k8s-app: flannel
spec:
{% if rbac_enabled %}
serviceAccountName: flannel
{% endif %}
containers:
- name: kube-flannel
image: {{ flannel_image_repo }}:{{ flannel_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
limits:
cpu: {{ flannel_cpu_limit }}
memory: {{ flannel_memory_limit }}
requests:
cpu: {{ flannel_cpu_requests }}
memory: {{ flannel_memory_requests }}
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr"{% if flannel_interface is defined %}, "--iface={{ flannel_interface }}"{% endif %} ]
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: install-cni
image: {{ flannel_cni_image_repo }}:{{ flannel_cni_image_tag }}
command: ["/install-cni.sh"]
env:
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: kube-flannel-cfg
key: cni-conf.json
- name: CNI_CONF_NAME
value: "10-flannel.conflist"
volumeMounts:
- name: cni
mountPath: /host/etc/cni/net.d
- name: host-cni-bin
mountPath: /host/opt/cni/bin/
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: host-cni-bin
hostPath:
path: /opt/cni/bin
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate

View file

@ -1,6 +0,0 @@
[Service]
{% if ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] %}
Environment="DOCKER_OPT_BIP=--bip={{ flannel_subnet }} --mtu={{ flannel_mtu }}"
{% else %}
Environment="DOCKER_NETWORK_OPTIONS=--bip={{ flannel_subnet }} --mtu={{ flannel_mtu }}"
{% endif %}

View file

@ -1,47 +0,0 @@
---
kind: "Pod"
apiVersion: "v1"
metadata:
name: "flannel"
namespace: "{{system_namespace}}"
labels:
app: "flannel"
version: "v0.1"
spec:
tolerations:
- effect: NoSchedule
operator: Exists
volumes:
- name: "subnetenv"
hostPath:
path: "/run/flannel"
- name: "etcd-certs"
hostPath:
path: "{{ flannel_cert_dir }}"
containers:
- name: "flannel-container"
image: "{{ flannel_image_repo }}:{{ flannel_image_tag }}"
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
limits:
cpu: {{ flannel_cpu_limit }}
memory: {{ flannel_memory_limit }}
requests:
cpu: {{ flannel_cpu_requests }}
memory: {{ flannel_memory_requests }}
command:
- "/bin/sh"
- "-c"
- "/opt/bin/flanneld -etcd-endpoints {{ etcd_access_endpoint }} -etcd-prefix /{{ cluster_name }}/network -etcd-cafile {{ flannel_cert_dir }}/ca_cert.crt -etcd-certfile {{ flannel_cert_dir }}/cert.crt -etcd-keyfile {{ flannel_cert_dir }}/key.pem {% if flannel_interface is defined %}-iface {{ flannel_interface }}{% endif %} {% if flannel_public_ip is defined %}-public-ip {{ flannel_public_ip }}{% endif %}"
ports:
- hostPort: 10253
containerPort: 10253
volumeMounts:
- name: "subnetenv"
mountPath: "/run/flannel"
- name: "etcd-certs"
mountPath: "{{ flannel_cert_dir }}"
readOnly: true
securityContext:
privileged: true
hostNetwork: true