From bcaf2f9ea3a000ad6f0acda218f6346f0660e5af Mon Sep 17 00:00:00 2001 From: Erwan Miran Date: Mon, 17 Sep 2018 16:45:05 +0200 Subject: [PATCH] contiv 1.2.1 --- docs/contiv.md | 4 +- inventory/sample/group_vars/all/all.yml | 7 ++ roles/download/defaults/main.yml | 6 +- .../network_plugin/contiv/tasks/configure.yml | 40 +++++++++ .../network_plugin/contiv/tasks/main.yml | 1 - roles/kubespray-defaults/defaults/main.yaml | 5 ++ roles/network_plugin/contiv/defaults/main.yml | 14 +++ roles/network_plugin/contiv/tasks/main.yml | 51 ++++++++--- roles/network_plugin/contiv/tasks/reset.yml | 9 ++ .../contiv/templates/contiv-api-proxy.yml.j2 | 5 +- .../contiv/templates/contiv-config.yml.j2 | 18 ++-- .../contiv/templates/contiv-etcd-proxy.yml.j2 | 2 + .../contiv-netmaster-clusterrole.yml.j2 | 1 + .../contiv/templates/contiv-netmaster.yml.j2 | 27 ++++-- .../contiv/templates/contiv-netplugin.yml.j2 | 85 ++++++++++++------- roles/reset/tasks/main.yml | 7 +- 16 files changed, 220 insertions(+), 62 deletions(-) create mode 100644 roles/network_plugin/contiv/tasks/reset.yml diff --git a/docs/contiv.md b/docs/contiv.md index 1366a2dfd..29a8ebbc7 100644 --- a/docs/contiv.md +++ b/docs/contiv.md @@ -54,16 +54,18 @@ The default configuration uses VXLAN to create an overlay. Two networks are crea You can change the default network configuration by overriding the `contiv_networks` variable. -The default forward mode is set to routing: +The default forward mode is set to routing and the default network mode is vxlan: ```yaml contiv_fwd_mode: routing +contiv_net_mode: vxlan ``` The following is an example of how you can use VLAN instead of VXLAN: ```yaml contiv_fwd_mode: bridge +contiv_net_mode: vlan contiv_vlan_interface: eth0 contiv_networks: - name: default-net diff --git a/inventory/sample/group_vars/all/all.yml b/inventory/sample/group_vars/all/all.yml index faf65eb1a..eff115f9f 100644 --- a/inventory/sample/group_vars/all/all.yml +++ b/inventory/sample/group_vars/all/all.yml @@ -43,6 +43,13 @@ bin_dir: /usr/local/bin ## The subnets of each nodes will be distributed by the datacenter router #peer_with_router: false +## With contiv, L3 BGP mode is possible by setting contiv_fwd_mode to "routing". +## In this case, you may need to peer with an uplink +## NB: The hostvars must contain a key "contiv" of which value is a dict containing "router_ip", "as"(defaults to contiv_global_as), "neighbor_as" (defaults to contiv_global_neighbor_as), "neighbor" +#contiv_peer_with_uplink_leaf: false +#contiv_global_as: "65002" +#contiv_global_neighbor_as: "500" + ## Upstream dns servers used by dnsmasq #upstream_dns_servers: # - 8.8.8.8 diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 896fe8ba3..4000cffd1 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -48,7 +48,7 @@ flannel_cni_version: "v0.3.0" vault_version: 0.10.1 weave_version: "2.4.1" pod_infra_version: 3.1 -contiv_version: 1.1.7 +contiv_version: 1.2.1 cilium_version: "v1.2.0" # Download URLs @@ -104,10 +104,14 @@ weave_npc_image_repo: "docker.io/weaveworks/weave-npc" weave_npc_image_tag: "{{ weave_version }}" contiv_image_repo: "contiv/netplugin" contiv_image_tag: "{{ contiv_version }}" +contiv_init_image_repo: "contiv/netplugin-init" +contiv_init_image_tag: "latest" contiv_auth_proxy_image_repo: "contiv/auth_proxy" contiv_auth_proxy_image_tag: "{{ contiv_version }}" contiv_etcd_init_image_repo: "ferest/etcd-initer" contiv_etcd_init_image_tag: latest +contiv_ovs_image_repo: "contiv/ovs" +contiv_ovs_image_tag: "latest" cilium_image_repo: "docker.io/cilium/cilium" cilium_image_tag: "{{ cilium_version }}" nginx_image_repo: nginx diff --git a/roles/kubernetes-apps/network_plugin/contiv/tasks/configure.yml b/roles/kubernetes-apps/network_plugin/contiv/tasks/configure.yml index 35eeeacfc..a080aa4f0 100644 --- a/roles/kubernetes-apps/network_plugin/contiv/tasks/configure.yml +++ b/roles/kubernetes-apps/network_plugin/contiv/tasks/configure.yml @@ -33,6 +33,46 @@ when: "contiv_global_config.networkInfraType != contiv_fabric_mode" run_once: true +- name: Contiv | Set peer hostname + set_fact: + contiv_peer_hostname: >- + {%- if override_system_hostname|default(true) -%} + {{ contiv_peer_hostname|default({})|combine({item: hostvars[item]['inventory_hostname']}) }} + {%- else -%} + {{ contiv_peer_hostname|default({})|combine({item: hostvars[item]['ansible_fqdn']}) }} + {%- endif -%} + with_items: "{{ groups['k8s-cluster'] }}" + run_once: true + when: + - contiv_fwd_mode == 'routing' + - contiv_peer_with_uplink_leaf + +- name: Contiv | Get BGP configuration + command: | + {{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \ + bgp ls --json + register: bgp_config + run_once: true + changed_when: false + when: + - contiv_fwd_mode == 'routing' + - contiv_peer_with_uplink_leaf + +- name: Contiv | Configure peering with router(s) + command: | + {{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \ + bgp create {{ item.value }} \ + --router-ip="{{ hostvars[item.key]['contiv']['router_ip'] }}" \ + --as="{{ hostvars[item.key]['contiv']['as'] | default(contiv_global_as) }}" \ + --neighbor-as="{{ hostvars[item.key]['contiv']['neighbor_as'] | default(contiv_global_neighbor_as) }}" \ + --neighbor="{{ hostvars[item.key]['contiv']['neighbor'] }}" + run_once: true + with_dict: "{{ contiv_peer_hostname }}" + when: + - contiv_fwd_mode == 'routing' + - contiv_peer_with_uplink_leaf + - bgp_config.stdout|from_json|length == 0 or not item.value in bgp_config.stdout|from_json|map(attribute='key')|list + - name: Contiv | Get existing networks command: | {{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \ diff --git a/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml b/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml index 5289296dc..1bca92329 100644 --- a/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml +++ b/roles/kubernetes-apps/network_plugin/contiv/tasks/main.yml @@ -9,7 +9,6 @@ filename: "{{ contiv_config_dir }}/{{ item.item.file }}" state: "{{ item.changed | ternary('latest','present') }}" with_items: "{{ contiv_manifests_results.results }}" - delegate_to: "{{ groups['kube-master'][0] }}" run_once: true - import_tasks: configure.yml diff --git a/roles/kubespray-defaults/defaults/main.yaml b/roles/kubespray-defaults/defaults/main.yaml index e405c7a3f..d3e563935 100644 --- a/roles/kubespray-defaults/defaults/main.yaml +++ b/roles/kubespray-defaults/defaults/main.yaml @@ -303,6 +303,11 @@ weave_mode_seed: false weave_seed: uninitialized weave_peers: uninitialized +# Contiv L3 BGP Mode +contiv_peer_with_uplink_leaf: false +contiv_global_as: "65002" +contiv_global_neighbor_as: "500" + ## Set no_proxy to all assigned cluster IPs and hostnames no_proxy: >- {%- if http_proxy is defined or https_proxy is defined %} diff --git a/roles/network_plugin/contiv/defaults/main.yml b/roles/network_plugin/contiv/defaults/main.yml index b6e237df5..622d0fd8d 100644 --- a/roles/network_plugin/contiv/defaults/main.yml +++ b/roles/network_plugin/contiv/defaults/main.yml @@ -6,8 +6,10 @@ contiv_etcd_data_dir: "/var/lib/etcd/contiv-data" contiv_netmaster_port: 9999 contiv_cni_version: 0.1.0 +# No need to download it by default, but must be defined contiv_etcd_image_repo: "{{ etcd_image_repo }}" contiv_etcd_image_tag: "{{ etcd_image_tag }}" + contiv_etcd_listen_port: 6666 contiv_etcd_peer_port: 6667 contiv_etcd_endpoints: |- @@ -26,9 +28,21 @@ contiv_fwd_mode: routing # Fabric mode: aci, aci-opflex or default contiv_fabric_mode: default +# Defaut netmode: vxlan or vlan +contiv_net_mode: vxlan + # Dataplane interface contiv_vlan_interface: "" +# Default loglevels are INFO +contiv_netmaster_loglevel: "WARN" +contiv_netplugin_loglevel: "WARN" +contiv_ovsdb_server_loglevel: "warn" +contiv_ovs_vswitchd_loglevel: "warn" + +# VxLAN port +contiv_vxlan_port: 4789 + # Default network configuration contiv_networks: - name: contivh1 diff --git a/roles/network_plugin/contiv/tasks/main.yml b/roles/network_plugin/contiv/tasks/main.yml index bc9dcd3c0..9f8258785 100644 --- a/roles/network_plugin/contiv/tasks/main.yml +++ b/roles/network_plugin/contiv/tasks/main.yml @@ -16,8 +16,25 @@ with_items: - "{{ contiv_etcd_conf_dir }}" - "{{ contiv_etcd_data_dir }}" + when: inventory_hostname in groups['kube-master'] -- set_fact: +- name: Contiv | Workaround https://github.com/contiv/netplugin/issues/1152 + set_fact: + kube_apiserver_endpoint_for_contiv: |- + {% if not is_kube_master and loadbalancer_apiserver_localhost -%} + https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }} + {%- elif loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%} + https://{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }} + {%- if loadbalancer_apiserver.port|string != "443" -%} + :{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }} + {%- endif -%} + {%- else -%} + https://{{ first_kube_master }}:{{ kube_apiserver_port }} + {%- endif %} + when: inventory_hostname in groups['kube-master'] + +- name: Contiv | Set necessary facts + set_fact: contiv_config_dir: "{{ contiv_config_dir }}" contiv_enable_api_proxy: "{{ contiv_enable_api_proxy }}" contiv_fabric_mode: "{{ contiv_fabric_mode }}" @@ -26,22 +43,26 @@ contiv_networks: "{{ contiv_networks }}" contiv_manifests: - {name: contiv-config, file: contiv-config.yml, type: configmap} + - {name: contiv-etcd, file: contiv-etcd.yml, type: daemonset} + - {name: contiv-etcd-proxy, file: contiv-etcd-proxy.yml, type: daemonset} + - {name: contiv-ovs, file: contiv-ovs.yml, type: daemonset} - {name: contiv-netmaster, file: contiv-netmaster-clusterrolebinding.yml, type: clusterrolebinding} - {name: contiv-netmaster, file: contiv-netmaster-clusterrole.yml, type: clusterrole} - {name: contiv-netmaster, file: contiv-netmaster-serviceaccount.yml, type: serviceaccount} + - {name: contiv-netmaster, file: contiv-netmaster.yml, type: daemonset} - {name: contiv-netplugin, file: contiv-netplugin-clusterrolebinding.yml, type: clusterrolebinding} - {name: contiv-netplugin, file: contiv-netplugin-clusterrole.yml, type: clusterrole} - {name: contiv-netplugin, file: contiv-netplugin-serviceaccount.yml, type: serviceaccount} - - {name: contiv-etcd, file: contiv-etcd.yml, type: daemonset} - - {name: contiv-etcd-proxy, file: contiv-etcd-proxy.yml, type: daemonset} - {name: contiv-netplugin, file: contiv-netplugin.yml, type: daemonset} - - {name: contiv-netmaster, file: contiv-netmaster.yml, type: daemonset} + when: inventory_hostname in groups['kube-master'] - set_fact: contiv_manifests: |- {% set _ = contiv_manifests.append({"name": "contiv-api-proxy", "file": "contiv-api-proxy.yml", "type": "daemonset"}) %} {{ contiv_manifests }} - when: contiv_enable_api_proxy + when: + - contiv_enable_api_proxy + - inventory_hostname in groups['kube-master'] - name: Contiv | Create /var/contiv file: @@ -55,21 +76,23 @@ mode: 0755 owner: root group: root + when: inventory_hostname in groups['kube-master'] - name: Contiv | Install all Kubernetes resources template: src: "{{ item.file }}.j2" dest: "{{ contiv_config_dir }}/{{ item.file }}" with_items: "{{ contiv_manifests }}" - delegate_to: "{{ groups['kube-master'][0] }}" - run_once: true register: contiv_manifests_results + when: inventory_hostname in groups['kube-master'] - name: Contiv | Generate contiv-api-proxy certificates script: generate-certificate.sh args: creates: /var/contiv/auth_proxy_key.pem - when: "contiv_enable_api_proxy and contiv_generate_certificate" + when: + - contiv_enable_api_proxy + - contiv_generate_certificate delegate_to: "{{ groups['kube-master'][0] }}" run_once: true @@ -81,7 +104,9 @@ with_items: - auth_proxy_key.pem - auth_proxy_cert.pem - when: "contiv_enable_api_proxy and contiv_generate_certificate" + when: + - contiv_enable_api_proxy + - contiv_generate_certificate delegate_to: "{{ groups['kube-master'][0] }}" run_once: true @@ -92,9 +117,11 @@ with_items: - auth_proxy_key.pem - auth_proxy_cert.pem - when: "inventory_hostname != groups['kube-master'][0] - and inventory_hostname in groups['kube-master'] - and contiv_enable_api_proxy and contiv_generate_certificate" + when: + - inventory_hostname != groups['kube-master'][0] + - inventory_hostname in groups['kube-master'] + - contiv_enable_api_proxy + - contiv_generate_certificate - name: Contiv | Copy cni plugins from hyperkube command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/bash -c '/bin/cp -fa /opt/cni/bin/* /cnibindir/'" diff --git a/roles/network_plugin/contiv/tasks/reset.yml b/roles/network_plugin/contiv/tasks/reset.yml new file mode 100644 index 000000000..3cf293610 --- /dev/null +++ b/roles/network_plugin/contiv/tasks/reset.yml @@ -0,0 +1,9 @@ +--- +- name: reset | check contiv vxlan_sys network device + stat: + path: "/sys/class/net/vxlan_sys_{{ contiv_vxlan_port | default('4789') }}" + register: contiv_vxlan_sys + +- name: reset | remove the vxlan_sys network device created by contiv + command: "ip link del vxlan_sys_{{ contiv_vxlan_port | default('4789') }}" + when: contiv_vxlan_sys.stat.exists diff --git a/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2 b/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2 index cea0efe51..16b8a9713 100644 --- a/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-api-proxy.yml.j2 @@ -35,16 +35,19 @@ spec: - --listen-address=0.0.0.0:{{ contiv_api_proxy_port }} - --tls-key-file=/var/contiv/auth_proxy_key.pem - --tls-certificate=/var/contiv/auth_proxy_cert.pem + - --data-store-driver=$(STORE_DRIVER) - --data-store-address=$(CONTIV_ETCD) - --netmaster-address=127.0.0.1:{{ contiv_netmaster_port }} env: - name: NO_NETMASTER_STARTUP_CHECK value: "0" + - name: STORE_DRIVER + value: etcd - name: CONTIV_ETCD valueFrom: configMapKeyRef: name: contiv-config - key: cluster_store + key: contiv_etcd securityContext: privileged: false volumeMounts: diff --git a/roles/network_plugin/contiv/templates/contiv-config.yml.j2 b/roles/network_plugin/contiv/templates/contiv-config.yml.j2 index 249d9d88e..18b7748eb 100644 --- a/roles/network_plugin/contiv/templates/contiv-config.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-config.yml.j2 @@ -7,20 +7,22 @@ metadata: name: contiv-config namespace: kube-system data: - # The location of your cluster store. This is set to the - # avdertise-client value below from the contiv-etcd service. - # Change it to an external etcd/consul instance if required. - cluster_store: "etcd://127.0.0.1:{{ contiv_etcd_listen_port }}" - # The CNI network configuration to install on each node. - cni_config: |- + contiv_netmaster_loglevel: {{ contiv_netmaster_loglevel }} + contiv_netplugin_loglevel: {{ contiv_netplugin_loglevel }} + contiv_ovsdb_server_extra_flags: "--verbose={{ contiv_ovsdb_server_loglevel }}" + contiv_ovs_vswitchd_extra_flags: "--verbose={{ contiv_ovs_vswitchd_loglevel }}" + contiv_fwdmode: {{ contiv_fwd_mode }} + contiv_netmode: {{ contiv_net_mode }} + contiv_etcd: "http://127.0.0.1:{{ contiv_etcd_listen_port }}" + contiv_cni_config: |- { "cniVersion": "{{ contiv_cni_version }}", "name": "contiv-net", "type": "contivk8s" } - config: |- + contiv_k8s_config: |- { - "K8S_API_SERVER": "{{ kube_apiserver_endpoint }}", + "K8S_API_SERVER": "{{ kube_apiserver_endpoint_for_contiv }}", "K8S_CA": "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", "K8S_KEY": "", "K8S_CERT": "", diff --git a/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2 b/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2 index 75946d821..a4adedd46 100644 --- a/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-etcd-proxy.yml.j2 @@ -19,6 +19,8 @@ spec: spec: hostNetwork: true hostPID: true + nodeSelector: + node-role.kubernetes.io/node: "true" containers: - name: contiv-etcd-proxy image: {{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }} diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2 index 92b4f588d..4c179e6c4 100644 --- a/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netmaster-clusterrole.yml.j2 @@ -13,6 +13,7 @@ rules: - namespaces - networkpolicies verbs: + - get - watch - list - update diff --git a/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2 index 787fe5c27..be0f23360 100644 --- a/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netmaster.yml.j2 @@ -1,4 +1,4 @@ -# This manifest deploys the Contiv API Server on Kubernetes. +--- kind: DaemonSet apiVersion: extensions/v1beta1 metadata: @@ -31,20 +31,31 @@ spec: containers: - name: contiv-netmaster image: {{ contiv_image_repo }}:{{ contiv_image_tag }} - args: - - -m - - -pkubernetes env: - - name: CONTIV_ETCD + - name: CONTIV_ROLE + value: netmaster + - name: CONTIV_NETMASTER_MODE + value: kubernetes + - name: CONTIV_NETMASTER_ETCD_ENDPOINTS valueFrom: configMapKeyRef: name: contiv-config - key: cluster_store - - name: CONTIV_CONFIG + key: contiv_etcd + - name: CONTIV_NETMASTER_FORWARD_MODE valueFrom: configMapKeyRef: name: contiv-config - key: config + key: contiv_fwdmode + - name: CONTIV_NETMASTER_NET_MODE + valueFrom: + configMapKeyRef: + name: contiv-config + key: contiv_netmode + - name: CONTIV_NETMASTER_LOG_LEVEL + valueFrom: + configMapKeyRef: + name: contiv-config + key: contiv_netmaster_loglevel securityContext: privileged: true volumeMounts: diff --git a/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2 b/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2 index b7927f51c..755e9b204 100644 --- a/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2 +++ b/roles/network_plugin/contiv/templates/contiv-netplugin.yml.j2 @@ -1,3 +1,4 @@ +--- # This manifest installs contiv-netplugin container, as well # as the Contiv CNI plugins and network config on # each master and worker node in a Kubernetes cluster. @@ -27,73 +28,99 @@ spec: - key: node-role.kubernetes.io/master effect: NoSchedule serviceAccountName: contiv-netplugin + initContainers: + - name: contiv-netplugin-init + image: {{ contiv_init_image_repo }}:{{ contiv_init_image_tag }} + env: + - name: CONTIV_ROLE + value: netplugin + - name: CONTIV_MODE + value: kubernetes + - name: CONTIV_K8S_CONFIG + valueFrom: + configMapKeyRef: + name: contiv-config + key: contiv_k8s_config + - name: CONTIV_CNI_CONFIG + valueFrom: + configMapKeyRef: + name: contiv-config + key: contiv_cni_config + volumeMounts: + - mountPath: /var/contiv + name: var-contiv + readOnly: false + - mountPath: /etc/cni/net.d/ + name: etc-cni-dir + readOnly: false + - name: contiv-cni + image: {{ contiv_image_repo }}:{{ contiv_version }} + command: ["cp", "/contiv/bin/contivk8s", "/opt/cni/bin/contivk8s"] + volumeMounts: + - mountPath: /opt/cni/bin + name: cni-bin-dir + readOnly: false containers: # Runs netplugin container on each Kubernetes node. This # container programs network policy and routes on each # host. - name: contiv-netplugin image: {{ contiv_image_repo }}:{{ contiv_image_tag }} - args: - - -pkubernetes - - -x env: - name: VLAN_IF value: {{ contiv_vlan_interface }} - - name: VTEP_IP + - name: CONTIV_NETPLUGIN_VLAN_UPLINKS + value: {{ contiv_vlan_interface }} + - name: CONTIV_NETPLUGIN_VXLAN_PORT + value: "{{ contiv_vxlan_port }}" + - name: CONTIV_ROLE + value: netplugin + - name: CONTIV_NETPLUGIN_MODE + value: kubernetes + - name: CONTIV_NETPLUGIN_VTEP_IP valueFrom: fieldRef: fieldPath: status.podIP - - name: CONTIV_ETCD + - name: CONTIV_NETPLUGIN_ETCD_ENDPOINTS valueFrom: configMapKeyRef: name: contiv-config - key: cluster_store - - name: CONTIV_CNI_CONFIG + key: contiv_etcd + - name: CONTIV_NETPLUGIN_FORWARD_MODE valueFrom: configMapKeyRef: name: contiv-config - key: cni_config - - name: CONTIV_CONFIG + key: contiv_fwdmode + - name: CONTIV_NETPLUGIN_NET_MODE valueFrom: configMapKeyRef: name: contiv-config - key: config + key: contiv_netmode + - name: CONTIV_NETPLUGIN_LOG_LEVEL + valueFrom: + configMapKeyRef: + name: contiv-config + key: contiv_netplugin_loglevel + resources: + requests: + cpu: 250m securityContext: privileged: true volumeMounts: - - mountPath: /etc/openvswitch - name: etc-openvswitch - readOnly: false - - mountPath: /lib/modules - name: lib-modules - readOnly: false - mountPath: /var/run name: var-run readOnly: false - mountPath: /var/contiv name: var-contiv readOnly: false - - mountPath: /opt/cni/bin - name: cni-bin-dir - readOnly: false - - mountPath: /etc/cni/net.d/ - name: etc-cni-dir - readOnly: false volumes: # Used by contiv-netplugin - - name: etc-openvswitch - hostPath: - path: /etc/openvswitch - - name: lib-modules - hostPath: - path: /lib/modules - name: var-run hostPath: path: /var/run - name: var-contiv hostPath: path: /var/contiv - # Used to install CNI. - name: cni-bin-dir hostPath: path: /opt/cni/bin diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 88dec8d7a..0fe73408b 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -150,6 +150,11 @@ - "{{ bin_dir }}/weave" - /var/lib/rkt - /etc/vault + - /etc/contiv + - /var/contiv + - /run/contiv + - /etc/openvswitch + - /run/openvswitch ignore_errors: yes tags: - files @@ -181,7 +186,7 @@ - name: reset | include file with reset tasks specific to the network_plugin if exists include_tasks: "{{ (role_path + '/../network_plugin/' + kube_network_plugin + '/tasks/reset.yml') | realpath }}" when: - - kube_network_plugin in ['flannel', 'cilium'] + - kube_network_plugin in ['flannel', 'cilium', 'contiv'] tags: - network