Remove contiv related files (#6964)

This commit is contained in:
Florian Ruynat 2020-11-30 15:48:50 +01:00 committed by GitHub
parent 4a8a52bad9
commit f6eed8091e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
42 changed files with 48 additions and 1304 deletions

View file

@ -128,12 +128,6 @@ packet_ubuntu18-ovn4nfv:
extends: .packet_periodic
when: on_success
# Contiv does not work in k8s v1.16
# packet_ubuntu16-contiv-sep:
# stage: deploy-part2
# extends: .packet_pr
# when: on_success
# ### MANUAL JOBS
packet_ubuntu16-weave-sep:

View file

@ -126,7 +126,6 @@ Note: Upstart/SysV init based OS types are not supported.
- [calico](https://github.com/projectcalico/calico) v3.16.5
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
- [cilium](https://github.com/cilium/cilium) v1.8.5
- [contiv](https://github.com/contiv/install) v1.2.1
- [flanneld](https://github.com/coreos/flannel) v0.13.0
- [kube-ovn](https://github.com/alauda/kube-ovn) v1.5.2
- [kube-router](https://github.com/cloudnativelabs/kube-router) v1.1.0
@ -179,9 +178,6 @@ You can choose between 10 network plugins. (default: `calico`, except Vagrant us
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
- [contiv](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
- [ovn4nfv](docs/ovn4nfv.md): [ovn4nfv-k8s-plugins](https://github.com/opnfv/ovn4nfv-k8s-plugin) is the network controller, OVS agent and CNI server to offer basic SFC and OVN overlay networking.
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.

View file

@ -12,7 +12,6 @@
* [Air-Gap Installation](docs/offline-environment.md)
* CNI
* [Calico](docs/calico.md)
* [Contiv](docs/contiv.md)
* [Flannel](docs/flannel.md)
* [Kube Router](docs/kube-router.md)
* [Weave](docs/weave.md)

View file

@ -4,51 +4,51 @@ To generate this Matrix run `./tests/scripts/md-table/main.py`
## docker
| OS / CNI | calico | canal | cilium | contiv | flannel | kube-ovn | kube-router | macvlan | ovn4nfv | weave |
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :white_check_mark: |
centos8 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
debian10 | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian9 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
fedora31 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
fedora32 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
oracle7 | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu16 | :x: | :white_check_mark: | :x: | :white_check_mark: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :white_check_mark: |
ubuntu18 | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :white_check_mark: |
ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
| OS / CNI | calico | canal | cilium | flannel | kube-ovn | kube-router | macvlan | ovn4nfv | weave |
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- |
amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :white_check_mark: |
centos8 | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
debian10 | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: |
debian9 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: |
fedora31 | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
fedora32 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
opensuse | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
oracle7 | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu16 | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :white_check_mark: | :x: | :x: | :white_check_mark: |
ubuntu18 | :white_check_mark: | :x: | :white_check_mark: | :white_check_mark: | :x: | :x: | :x: | :white_check_mark: | :white_check_mark: |
ubuntu20 | :white_check_mark: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
## crio
| OS / CNI | calico | canal | cilium | contiv | flannel | kube-ovn | kube-router | macvlan | ovn4nfv | weave |
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora31 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora32 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
| OS / CNI | calico | canal | cilium | flannel | kube-ovn | kube-router | macvlan | ovn4nfv | weave |
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- |
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora31 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora32 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu18 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
## containerd
| OS / CNI | calico | canal | cilium | contiv | flannel | kube-ovn | kube-router | macvlan | ovn4nfv | weave |
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos7 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
centos8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian10 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora31 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora32 | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu18 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
| OS / CNI | calico | canal | cilium | flannel | kube-ovn | kube-router | macvlan | ovn4nfv | weave |
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- |
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos7 | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
centos8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian10 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora31 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora32 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
opensuse | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
oracle7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu16 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
ubuntu18 | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: | :x: |
ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |

View file

@ -1,72 +0,0 @@
# Contiv
Here is the [Contiv documentation](https://contiv.github.io/documents/).
## Administrate Contiv
There are two ways to manage Contiv:
* a web UI managed by the api proxy service
* a CLI named `netctl`
### Interfaces
#### The Web Interface
This UI is hosted on all kubernetes master nodes. The service is available at `https://<one of your master node>:10000`.
You can configure the api proxy by overriding the following variables:
```yaml
contiv_enable_api_proxy: true
contiv_api_proxy_port: 10000
contiv_generate_certificate: true
```
The default credentials to log in are: admin/admin.
#### The Command Line Interface
The second way to modify the Contiv configuration is to use the CLI. To do this, you have to connect to the server and export an environment variable to tell netctl how to connect to the cluster:
```bash
export NETMASTER=http://127.0.0.1:9999
```
The port can be changed by overriding the following variable:
```yaml
contiv_netmaster_port: 9999
```
The CLI doesn't use the authentication process needed by the web interface.
### Network configuration
The default configuration uses VXLAN to create an overlay. Two networks are created by default:
* `contivh1`: an infrastructure network. It allows nodes to access the pods IPs. It is mandatory in a Kubernetes environment that uses VXLAN.
* `default-net` : the default network that hosts pods.
You can change the default network configuration by overriding the `contiv_networks` variable.
The default forward mode is set to routing and the default network mode is vxlan:
```yaml
contiv_fwd_mode: routing
contiv_net_mode: vxlan
```
The following is an example of how you can use VLAN instead of VXLAN:
```yaml
contiv_fwd_mode: bridge
contiv_net_mode: vlan
contiv_vlan_interface: eth0
contiv_networks:
- name: default-net
subnet: "{{ kube_pods_subnet }}"
gateway: "{{ kube_pods_subnet|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
encap: vlan
pkt_tag: 10
```

View file

@ -62,7 +62,7 @@ credentials_dir: "{{ inventory_dir }}/credentials"
# kube_webhook_authorization_url: https://...
# kube_webhook_authorization_url_skip_tls_verify: false
# Choose network plugin (cilium, calico, contiv, weave or flannel. Use cni for generic cni plugin)
# Choose network plugin (cilium, calico, weave or flannel. Use cni for generic cni plugin)
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
kube_network_plugin: calico

View file

@ -1,20 +0,0 @@
# see roles/network_plugin/contiv/defaults/main.yml
# Forwarding mode: bridge or routing
# contiv_fwd_mode: routing
## With contiv, L3 BGP mode is possible by setting contiv_fwd_mode to "routing".
## In this case, you may need to peer with an uplink
## NB: The hostvars must contain a key "contiv" of which value is a dict containing "router_ip", "as"(defaults to contiv_global_as), "neighbor_as" (defaults to contiv_global_neighbor_as), "neighbor"
# contiv_peer_with_uplink_leaf: false
# contiv_global_as: "65002"
# contiv_global_neighbor_as: "500"
# Fabric mode: aci, aci-opflex or default
# contiv_fabric_mode: default
# Default netmode: vxlan or vlan
# contiv_net_mode: vxlan
# Dataplane interface
# contiv_vlan_interface: ""

View file

@ -78,7 +78,6 @@ cni_version: "v0.8.7"
weave_version: 2.7.0
pod_infra_version: "3.3"
contiv_version: 1.2.1
cilium_version: "v1.8.5"
kube_ovn_version: "v1.5.2"
kube_router_version: "v1.1.0"
@ -444,16 +443,6 @@ weave_kube_image_repo: "{{ docker_image_repo }}/weaveworks/weave-kube"
weave_kube_image_tag: "{{ weave_version }}"
weave_npc_image_repo: "{{ docker_image_repo }}/weaveworks/weave-npc"
weave_npc_image_tag: "{{ weave_version }}"
contiv_image_repo: "{{ docker_image_repo }}/contiv/netplugin"
contiv_image_tag: "{{ contiv_version }}"
contiv_init_image_repo: "{{ docker_image_repo }}/contiv/netplugin-init"
contiv_init_image_tag: "{{ contiv_version }}"
contiv_auth_proxy_image_repo: "{{ docker_image_repo }}/contiv/auth_proxy"
contiv_auth_proxy_image_tag: "{{ contiv_version }}"
contiv_etcd_init_image_repo: "{{ docker_image_repo }}/ferest/etcd-initer"
contiv_etcd_init_image_tag: latest
contiv_ovs_image_repo: "{{ docker_image_repo }}/contiv/ovs"
contiv_ovs_image_tag: "latest"
cilium_image_repo: "{{ quay_image_repo }}/cilium/cilium"
cilium_image_tag: "{{ cilium_version }}"
cilium_init_image_repo: "{{ quay_image_repo }}/cilium/cilium-init"
@ -808,33 +797,6 @@ downloads:
groups:
- k8s-cluster
contiv:
enabled: "{{ kube_network_plugin == 'contiv' }}"
container: true
repo: "{{ contiv_image_repo }}"
tag: "{{ contiv_image_tag }}"
sha256: "{{ contiv_digest_checksum|default(None) }}"
groups:
- k8s-cluster
contiv_auth_proxy:
enabled: "{{ kube_network_plugin == 'contiv' }}"
container: true
repo: "{{ contiv_auth_proxy_image_repo }}"
tag: "{{ contiv_auth_proxy_image_tag }}"
sha256: "{{ contiv_auth_proxy_digest_checksum|default(None) }}"
groups:
- k8s-cluster
contiv_etcd_init:
enabled: "{{ kube_network_plugin == 'contiv' }}"
container: true
repo: "{{ contiv_etcd_init_image_repo }}"
tag: "{{ contiv_etcd_init_image_tag }}"
sha256: "{{ contiv_etcd_init_digest_checksum|default(None) }}"
groups:
- k8s-cluster
kube_ovn:
enabled: "{{ kube_network_plugin == 'kube-ovn' }}"
container: true

View file

@ -1,113 +0,0 @@
---
- name: Contiv | Wait for netmaster
uri:
url: "http://127.0.0.1:{{ contiv_netmaster_port }}/info"
register: result
until: result.status is defined and result.status == 200
retries: 10
delay: 5
- name: Contiv | Get global configuration
command: |
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
global info --json --all
register: global_config
run_once: true
changed_when: false
- name: Contiv | Set contiv_global_config
set_fact:
contiv_global_config: "{{ (global_config.stdout|from_json)[0] }}"
- name: Contiv | Set global forwarding mode
command: |
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
global set --fwd-mode={{ contiv_fwd_mode }}
when: "contiv_global_config.get('fwdMode', '') != contiv_fwd_mode"
run_once: true
- name: Contiv | Set global fabric mode
command: |
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
global set --fabric-mode={{ contiv_fabric_mode }}
when: "contiv_global_config.networkInfraType != contiv_fabric_mode"
run_once: true
- name: Contiv | Set peer hostname
set_fact:
contiv_peer_hostname: >-
{%- if override_system_hostname|default(true) -%}
{{ contiv_peer_hostname|default({})|combine({item: hostvars[item]['inventory_hostname']}) }}
{%- else -%}
{{ contiv_peer_hostname|default({})|combine({item: hostvars[item]['ansible_fqdn']}) }}
{%- endif -%}
with_items: "{{ groups['k8s-cluster'] }}"
run_once: true
when:
- contiv_fwd_mode == 'routing'
- contiv_peer_with_uplink_leaf
- name: Contiv | Get BGP configuration
command: |
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
bgp ls --json
register: bgp_config
run_once: true
changed_when: false
when:
- contiv_fwd_mode == 'routing'
- contiv_peer_with_uplink_leaf
- name: Contiv | Configure peering with router(s)
command: |
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
bgp create {{ item.value }} \
--router-ip="{{ hostvars[item.key]['contiv']['router_ip'] }}" \
--as="{{ hostvars[item.key]['contiv']['as'] | default(contiv_global_as) }}" \
--neighbor-as="{{ hostvars[item.key]['contiv']['neighbor_as'] | default(contiv_global_neighbor_as) }}" \
--neighbor="{{ hostvars[item.key]['contiv']['neighbor'] }}"
run_once: true
with_dict: "{{ contiv_peer_hostname }}"
when:
- contiv_fwd_mode == 'routing'
- contiv_peer_with_uplink_leaf
- bgp_config.stdout|from_json|length == 0 or not item.value in bgp_config.stdout|from_json|map(attribute='key')|list
- name: Contiv | Get existing networks
command: |
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
net ls -q
register: net_result
run_once: true
changed_when: false
- name: Contiv | Create networks
command: |
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
net create \
--encap={{ item.encap|default("vxlan") }} \
--gateway={{ item.gateway }} \
--nw-type={{ item.nw_type|default("data") }} \
--pkt-tag={{ item.pkt_tag|default("0") }} \
--subnet={{ item.subnet }} \
--tenant={{ item.tenant|default("default") }} \
"{{ item.name }}"
with_items: "{{ contiv_networks }}"
when: item['name'] not in net_result.stdout_lines
run_once: true
- name: Contiv | Check if default group exists
command: |
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
group ls -q
register: group_result
run_once: true
changed_when: false
- name: Contiv | Create default group
command: |
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
group create default-net default
when: "'default' not in group_result.stdout_lines"
run_once: true

View file

@ -1,14 +0,0 @@
---
- name: Contiv | Create Kubernetes resources
kube:
name: "{{ item.item.name }}"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ contiv_config_dir }}/{{ item.item.file }}"
state: "{{ item.changed | ternary('latest','present') }}"
with_items: "{{ contiv_manifests_results.results }}"
run_once: true
- import_tasks: configure.yml

View file

@ -20,11 +20,6 @@ dependencies:
tags:
- flannel
- role: kubernetes-apps/network_plugin/contiv
when: kube_network_plugin == 'contiv'
tags:
- contiv
- role: kubernetes-apps/network_plugin/kube-ovn
when: kube_network_plugin == 'kube-ovn'
tags:

View file

@ -38,7 +38,7 @@ KUBELET_ARGS="{{ kubelet_args_base }} {% if node_taints|default([]) %}--register
{% if kubelet_flexvolumes_plugins_dir is defined %}
KUBELET_VOLUME_PLUGIN="--volume-plugin-dir={{ kubelet_flexvolumes_plugins_dir }}"
{% endif %}
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "cni", "flannel", "weave", "contiv", "cilium", "kube-ovn", "ovn4nfv", "kube-router", "macvlan"] %}
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "canal", "cni", "flannel", "weave", "cilium", "kube-ovn", "ovn4nfv", "kube-router", "macvlan"] %}
KUBELET_NETWORK_PLUGIN="--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
{% elif kube_network_plugin is defined and kube_network_plugin == "cloud" %}
KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kubenet"

View file

@ -30,7 +30,7 @@
- name: Stop if unknown network plugin
assert:
that: kube_network_plugin in ['calico', 'canal', 'flannel', 'weave', 'cloud', 'cilium', 'cni', 'contiv', 'ovn4nfv','kube-ovn', 'kube-router', 'macvlan']
that: kube_network_plugin in ['calico', 'canal', 'flannel', 'weave', 'cloud', 'cilium', 'cni', 'ovn4nfv','kube-ovn', 'kube-router', 'macvlan']
msg: "{{ kube_network_plugin }} is not supported"
when:
- kube_network_plugin is defined

View file

@ -71,7 +71,7 @@
- "/opt/cni/bin"
- "/var/lib/calico"
when:
- kube_network_plugin in ["calico", "weave", "canal", "flannel", "contiv", "cilium", "kube-ovn", "ovn4nfv", "kube-router", "macvlan"]
- kube_network_plugin in ["calico", "weave", "canal", "flannel", "cilium", "kube-ovn", "ovn4nfv", "kube-router", "macvlan"]
- inventory_hostname in groups['k8s-cluster']
tags:
- network
@ -79,7 +79,6 @@
- calico
- weave
- canal
- contiv
- ovn4nfv
- kube-ovn
- kube-router

View file

@ -421,11 +421,6 @@ local_volume_provisioner_storage_classes: |
# you can use --extra-vars to pass the password in command line
weave_password: EnterPasswordHere
# Contiv L3 BGP Mode
contiv_peer_with_uplink_leaf: false
contiv_global_as: "65002"
contiv_global_neighbor_as: "500"
ssl_ca_dirs: |-
[
{% if ansible_os_family in ['Flatcar Container Linux by Kinvolk'] -%}

View file

@ -1,55 +0,0 @@
---
contiv_config_dir: "{{ kube_config_dir }}/contiv"
contiv_etcd_conf_dir: "/etc/contiv/etcd"
contiv_etcd_data_dir: "/var/lib/etcd/contiv-data"
contiv_netmaster_port: 9999
contiv_cni_version: 0.3.1
# No need to download it by default, but must be defined
contiv_etcd_image_repo: "{{ etcd_image_repo }}"
contiv_etcd_image_tag: "{{ etcd_image_tag }}"
contiv_etcd_listen_port: 6666
contiv_etcd_peer_port: 6667
contiv_etcd_endpoints: |-
{% for host in groups['kube-master'] -%}
contiv_etcd{{ loop.index }}=http://{{ hostvars[host]['ip'] | default(fallback_ips[host]) }}:{{ contiv_etcd_peer_port }}{% if not loop.last %},{% endif %}
{%- endfor %}
# Parameters for Contiv api-proxy
contiv_enable_api_proxy: true
contiv_api_proxy_port: 10000
contiv_generate_certificate: true
# Forwarding mode: bridge or routing
contiv_fwd_mode: routing
# Fabric mode: aci, aci-opflex or default
contiv_fabric_mode: default
# Default netmode: vxlan or vlan
contiv_net_mode: vxlan
# Dataplane interface
contiv_vlan_interface: ""
# Default loglevels are INFO
contiv_netmaster_loglevel: "WARN"
contiv_netplugin_loglevel: "WARN"
contiv_ovsdb_server_loglevel: "warn"
contiv_ovs_vswitchd_loglevel: "warn"
# VxLAN port
contiv_vxlan_port: 4789
# Default network configuration
contiv_networks:
- name: contivh1
subnet: "10.233.128.0/18"
gateway: "10.233.128.1"
nw_type: infra
- name: default-net
subnet: "{{ kube_pods_subnet }}"
gateway: "{{ kube_pods_subnet|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
pkt_tag: 10

View file

@ -1,10 +0,0 @@
#!/bin/bash
set -e
echo "Starting cleanup"
ovs-vsctl list-br | grep contiv | xargs -I % ovs-vsctl del-br %
for p in $(ifconfig | grep vport | awk '{print $1}');
do
ip link delete $p type veth
done
touch /tmp/cleanup.done
sleep 60

View file

@ -1,6 +0,0 @@
---
- name: Contiv | Reload kernel modules
service:
name: systemd-modules-load
state: restarted
enabled: yes

View file

@ -1,3 +0,0 @@
---
dependencies:
- role: network_plugin/cni

View file

@ -1,156 +0,0 @@
---
- name: Contiv | Load openvswitch kernel module
copy:
dest: /etc/modules-load.d/openvswitch.conf
content: "openvswitch"
notify:
- Contiv | Reload kernel modules
- name: Contiv | Create contiv etcd directories
file:
dest: "{{ item }}"
state: directory
mode: 0750
owner: root
group: root
with_items:
- "{{ contiv_etcd_conf_dir }}"
- "{{ contiv_etcd_data_dir }}"
when: inventory_hostname in groups['kube-master']
- name: Contiv | Workaround https://github.com/contiv/netplugin/issues/1152
set_fact:
kube_apiserver_endpoint_for_contiv: |-
{% if not is_kube_master and loadbalancer_apiserver_localhost -%}
https://localhost:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}
{%- elif loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%}
https://{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}
{%- if loadbalancer_apiserver.port|string != "443" -%}
:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }}
{%- endif -%}
{%- else -%}
https://{{ first_kube_master }}:{{ kube_apiserver_port }}
{%- endif %}
when: inventory_hostname in groups['kube-master']
- name: Contiv | Set necessary facts
set_fact:
contiv_config_dir: "{{ contiv_config_dir }}"
contiv_enable_api_proxy: "{{ contiv_enable_api_proxy }}"
contiv_fabric_mode: "{{ contiv_fabric_mode }}"
contiv_fwd_mode: "{{ contiv_fwd_mode }}"
contiv_netmaster_port: "{{ contiv_netmaster_port }}"
contiv_networks: "{{ contiv_networks }}"
contiv_manifests:
- {name: contiv-config, file: contiv-config.yml, type: configmap}
- {name: contiv-etcd, file: contiv-etcd.yml, type: daemonset}
- {name: contiv-etcd-proxy, file: contiv-etcd-proxy.yml, type: daemonset}
- {name: contiv-ovs, file: contiv-ovs.yml, type: daemonset}
- {name: contiv-netmaster, file: contiv-netmaster-clusterrolebinding.yml, type: clusterrolebinding}
- {name: contiv-netmaster, file: contiv-netmaster-clusterrole.yml, type: clusterrole}
- {name: contiv-netmaster, file: contiv-netmaster-serviceaccount.yml, type: serviceaccount}
- {name: contiv-netmaster, file: contiv-netmaster.yml, type: daemonset}
- {name: contiv-netplugin, file: contiv-netplugin-clusterrolebinding.yml, type: clusterrolebinding}
- {name: contiv-netplugin, file: contiv-netplugin-clusterrole.yml, type: clusterrole}
- {name: contiv-netplugin, file: contiv-netplugin-serviceaccount.yml, type: serviceaccount}
- {name: contiv-netplugin, file: contiv-netplugin.yml, type: daemonset}
when: inventory_hostname in groups['kube-master']
- name: Contiv | Add another manifest if contiv_enable_api_proxy is true
set_fact:
contiv_manifests: |-
{% set _ = contiv_manifests.append({"name": "contiv-api-proxy", "file": "contiv-api-proxy.yml", "type": "daemonset"}) %}
{{ contiv_manifests }}
when:
- contiv_enable_api_proxy
- inventory_hostname in groups['kube-master']
- name: Contiv | Create /var/contiv
file:
path: /var/contiv
state: directory
- name: Contiv | Create contiv config directory
file:
dest: "{{ contiv_config_dir }}"
state: directory
mode: 0755
owner: root
group: root
when: inventory_hostname in groups['kube-master']
- name: Contiv | Install all Kubernetes resources
template:
src: "{{ item.file }}.j2"
dest: "{{ contiv_config_dir }}/{{ item.file }}"
with_items: "{{ contiv_manifests }}"
register: contiv_manifests_results
when: inventory_hostname in groups['kube-master']
- name: Contiv | Copy certs generation script
template:
src: "generate-certificate.sh.j2"
dest: "/var/contiv/generate-certificate.sh"
mode: 0700
when:
- contiv_enable_api_proxy
- contiv_generate_certificate
delegate_to: "{{ groups['kube-master'][0] }}"
run_once: true
- name: Contiv | Check for cert key existence
stat:
path: /var/contiv/auth_proxy_key.pem
register: contiv_certificate_key_state
when:
- contiv_enable_api_proxy
- contiv_generate_certificate
delegate_to: "{{ groups['kube-master'][0] }}"
run_once: true
- name: Contiv | Generate contiv-api-proxy certificates
command: /var/contiv/generate-certificate.sh
when:
- contiv_enable_api_proxy
- contiv_generate_certificate
- (not contiv_certificate_key_state.stat.exists)
delegate_to: "{{ groups['kube-master'][0] }}"
run_once: true
- name: Contiv | Fetch the generated certificate
fetch:
src: "/var/contiv/{{ item }}"
dest: "/tmp/kubespray-contiv-{{ item }}"
flat: yes
with_items:
- auth_proxy_key.pem
- auth_proxy_cert.pem
when:
- contiv_enable_api_proxy
- contiv_generate_certificate
delegate_to: "{{ groups['kube-master'][0] }}"
run_once: true
- name: Contiv | Copy the generated certificate on nodes
copy:
src: "/tmp/kubespray-contiv-{{ item }}"
dest: "/var/contiv/{{ item }}"
with_items:
- auth_proxy_key.pem
- auth_proxy_cert.pem
when:
- inventory_hostname != groups['kube-master'][0]
- inventory_hostname in groups['kube-master']
- contiv_enable_api_proxy
- contiv_generate_certificate
- name: Contiv | Copy netctl binary from docker container
command: sh -c "{{ docker_bin_dir }}/docker rm -f netctl-binarycopy;
{{ docker_bin_dir }}/docker create --name netctl-binarycopy {{ contiv_image_repo }}:{{ contiv_image_tag }} &&
{{ docker_bin_dir }}/docker cp netctl-binarycopy:/contiv/bin/netctl {{ bin_dir }}/netctl &&
{{ docker_bin_dir }}/docker rm -f netctl-binarycopy"
register: contiv_task_result
until: contiv_task_result.rc == 0
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
changed_when: false

View file

@ -1,66 +0,0 @@
---
- name: reset | Check that kubectl is still here
stat:
path: "{{ bin_dir }}/kubectl"
register: contiv_kubectl
- name: reset | Delete contiv netplugin and netmaster daemonsets
kube:
name: "{{ item }}"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "ds"
state: absent
with_items:
- contiv-netplugin
- contiv-netmaster
register: contiv_cleanup_deletion
tags:
- network
when:
- contiv_kubectl.stat.exists
- inventory_hostname == groups['kube-master'][0]
- name: reset | Copy contiv temporary cleanup script
copy:
src: ../files/contiv-cleanup.sh # noqa 404 Not in role_path so we must trick...
dest: /opt/cni/bin/cleanup
owner: root
group: root
mode: 0750
when:
- contiv_kubectl.stat.exists
- name: reset | Lay down contiv cleanup template
template:
src: ../templates/contiv-cleanup.yml.j2 # noqa 404 Not in role_path so we must trick...
dest: "{{ kube_config_dir }}/contiv-cleanup.yml" # kube_config_dir is used here as contiv_config_dir is not necessarily set at reset
register: contiv_cleanup_manifest
when:
- contiv_kubectl.stat.exists
- inventory_hostname == groups['kube-master'][0]
- name: reset | Start contiv cleanup resources
kube:
name: "contiv-cleanup"
namespace: "kube-system"
kubectl: "{{ bin_dir }}/kubectl"
resource: "ds"
state: latest
filename: "{{ kube_config_dir }}/contiv-cleanup.yml"
when:
- contiv_kubectl.stat.exists
- inventory_hostname == groups['kube-master'][0]
ignore_errors: true
- name: reset | Wait until contiv cleanup is done
command: "{{ bin_dir }}/kubectl -n kube-system get ds contiv-cleanup -o jsonpath='{.status.numberReady}'"
register: cleanup_done_all_nodes
until: cleanup_done_all_nodes.stdout|int == groups['k8s-cluster']|length
retries: 5
delay: 5
ignore_errors: true
changed_when: false
when:
- contiv_kubectl.stat.exists
- inventory_hostname == groups['kube-master'][0]

View file

@ -1,9 +0,0 @@
---
- name: reset | check contiv vxlan_sys network device
stat:
path: "/sys/class/net/vxlan_sys_{{ contiv_vxlan_port | default('4789') }}"
register: contiv_vxlan_sys
- name: reset | remove the vxlan_sys network device created by contiv
command: "ip link del vxlan_sys_{{ contiv_vxlan_port | default('4789') }}"
when: contiv_vxlan_sys.stat.exists

View file

@ -1,62 +0,0 @@
# This manifest deploys the Contiv API Proxy Server on Kubernetes.
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: contiv-api-proxy
namespace: kube-system
labels:
k8s-app: contiv-api-proxy
spec:
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
k8s-app: contiv-api-proxy
template:
metadata:
name: contiv-api-proxy
namespace: kube-system
labels:
k8s-app: contiv-api-proxy
spec:
priorityClassName: system-node-critical
# The API proxy must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
hostPID: true
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- operator: Exists
serviceAccountName: contiv-netmaster
containers:
- name: contiv-api-proxy
image: {{ contiv_auth_proxy_image_repo }}:{{ contiv_auth_proxy_image_tag }}
args:
- --listen-address=0.0.0.0:{{ contiv_api_proxy_port }}
- --tls-key-file=/var/contiv/auth_proxy_key.pem
- --tls-certificate=/var/contiv/auth_proxy_cert.pem
- --data-store-driver=$(STORE_DRIVER)
- --data-store-address=$(CONTIV_ETCD)
- --netmaster-address=127.0.0.1:{{ contiv_netmaster_port }}
env:
- name: NO_NETMASTER_STARTUP_CHECK
value: "0"
- name: STORE_DRIVER
value: etcd
- name: CONTIV_ETCD
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_etcd
securityContext:
privileged: false
volumeMounts:
- mountPath: /var/contiv
name: var-contiv
readOnly: false
volumes:
- name: var-contiv
hostPath:
path: /var/contiv

View file

@ -1,58 +0,0 @@
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: contiv-cleanup
namespace: kube-system
labels:
k8s-app: contiv-cleanup
spec:
selector:
matchLabels:
k8s-app: contiv-cleanup
template:
metadata:
labels:
k8s-app: contiv-cleanup
spec:
priorityClassName: system-node-critical
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
hostPID: true
tolerations:
- operator: Exists
serviceAccountName: contiv-netplugin
containers:
- name: contiv-ovs-cleanup
image: {{ contiv_ovs_image_repo }}:{{ contiv_ovs_image_tag }}
command: ["/opt/cni/bin/cleanup"]
securityContext:
privileged: true
volumeMounts:
- mountPath: /etc/openvswitch
name: etc-openvswitch
readOnly: false
- mountPath: /var/run
name: var-run
readOnly: false
- mountPath: /opt/cni/bin
name: cni-bin-dir
readOnly: false
readinessProbe:
exec:
command:
- cat
- /tmp/cleanup.done
initialDelaySeconds: 3
periodSeconds: 3
successThreshold: 1
volumes:
- name: etc-openvswitch
hostPath:
path: /etc/openvswitch
- name: var-run
hostPath:
path: /var/run
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin

View file

@ -1,31 +0,0 @@
# This ConfigMap is used to configure a self-hosted Contiv installation.
# It can be used with an external cluster store(etcd or consul) or used
# with the etcd instance being installed as contiv-etcd
kind: ConfigMap
apiVersion: v1
metadata:
name: contiv-config
namespace: kube-system
data:
contiv_netmaster_loglevel: {{ contiv_netmaster_loglevel }}
contiv_netplugin_loglevel: {{ contiv_netplugin_loglevel }}
contiv_ovsdb_server_extra_flags: "--verbose={{ contiv_ovsdb_server_loglevel }}"
contiv_ovs_vswitchd_extra_flags: "--verbose={{ contiv_ovs_vswitchd_loglevel }}"
contiv_fwdmode: {{ contiv_fwd_mode }}
contiv_netmode: {{ contiv_net_mode }}
contiv_etcd: "http://127.0.0.1:{{ contiv_etcd_listen_port }}"
contiv_cni_config: |-
{
"cniVersion": "{{ contiv_cni_version }}",
"name": "contiv-net",
"type": "contivk8s"
}
contiv_k8s_config: |-
{
"K8S_API_SERVER": "{{ kube_apiserver_endpoint_for_contiv }}",
"K8S_CA": "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt",
"K8S_KEY": "",
"K8S_CERT": "",
"K8S_TOKEN": "",
"SVC_SUBNET": "{{ kube_service_addresses }}"
}

View file

@ -1,38 +0,0 @@
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: contiv-etcd-proxy
namespace: kube-system
labels:
k8s-app: contiv-etcd-proxy
spec:
selector:
matchLabels:
k8s-app: contiv-etcd-proxy
template:
metadata:
labels:
k8s-app: contiv-etcd-proxy
spec:
priorityClassName: system-node-critical
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
hostPID: true
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: DoesNotExist
containers:
- name: contiv-etcd-proxy
image: {{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }}
env:
- name: ETCD_LISTEN_CLIENT_URLS
value: 'http://127.0.0.1:{{ contiv_etcd_listen_port }}'
- name: ETCD_PROXY
value: "on"
- name: ETCD_INITIAL_CLUSTER
value: '{{ contiv_etcd_endpoints }}'

View file

@ -1,65 +0,0 @@
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: contiv-etcd
namespace: kube-system
labels:
k8s-app: contiv-etcd
spec:
selector:
matchLabels:
k8s-app: contiv-etcd
template:
metadata:
labels:
k8s-app: contiv-etcd
spec:
priorityClassName: system-node-critical
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
hostPID: true
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- operator: Exists
initContainers:
- name: contiv-etcd-init
image: {{ contiv_etcd_init_image_repo }}:{{ contiv_etcd_init_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
env:
- name: ETCD_INIT_ARGSFILE
value: '{{ contiv_etcd_conf_dir }}/contiv-etcd-args'
- name: ETCD_INIT_LISTEN_PORT
value: '{{ contiv_etcd_listen_port }}'
- name: ETCD_INIT_PEER_PORT
value: '{{ contiv_etcd_peer_port }}'
- name: ETCD_INIT_CLUSTER
value: '{{ contiv_etcd_endpoints }}'
- name: ETCD_INIT_DATA_DIR
value: '{{ contiv_etcd_data_dir }}'
volumeMounts:
- name: contiv-etcd-conf-dir
mountPath: {{ contiv_etcd_conf_dir }}
containers:
- name: contiv-etcd
image: {{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }}
command:
- sh
- -c
- "/usr/local/bin/etcd $(cat $ETCD_INIT_ARGSFILE)"
env:
- name: ETCD_INIT_ARGSFILE
value: {{ contiv_etcd_conf_dir }}/contiv-etcd-args
volumeMounts:
- name: contiv-etcd-conf-dir
mountPath: {{ contiv_etcd_conf_dir }}
- name: contiv-etcd-data-dir
mountPath: {{ contiv_etcd_data_dir }}
volumes:
- name: contiv-etcd-data-dir
hostPath:
path: {{ contiv_etcd_data_dir }}
- name: contiv-etcd-conf-dir
hostPath:
path: {{ contiv_etcd_conf_dir }}

View file

@ -1,27 +0,0 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: contiv-netmaster
namespace: kube-system
rules:
- apiGroups:
- ""
- extensions
resources:
- pods
- nodes
- namespaces
- networkpolicies
verbs:
- get
- watch
- list
- update
- apiGroups:
- policy
resourceNames:
- privileged
resources:
- podsecuritypolicies
verbs:
- use

View file

@ -1,12 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: contiv-netmaster
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: contiv-netmaster
subjects:
- kind: ServiceAccount
name: contiv-netmaster
namespace: kube-system

View file

@ -1,5 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: contiv-netmaster
namespace: kube-system

View file

@ -1,71 +0,0 @@
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: contiv-netmaster
namespace: kube-system
labels:
k8s-app: contiv-netmaster
spec:
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
k8s-app: contiv-netmaster
template:
metadata:
name: contiv-netmaster
namespace: kube-system
labels:
k8s-app: contiv-netmaster
spec:
priorityClassName: system-node-critical
# The netmaster must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
hostPID: true
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- operator: Exists
serviceAccountName: contiv-netmaster
containers:
- name: contiv-netmaster
image: {{ contiv_image_repo }}:{{ contiv_image_tag }}
env:
- name: CONTIV_ROLE
value: netmaster
- name: CONTIV_NETMASTER_MODE
value: kubernetes
- name: CONTIV_NETMASTER_ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_etcd
- name: CONTIV_NETMASTER_FORWARD_MODE
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_fwdmode
- name: CONTIV_NETMASTER_NET_MODE
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_netmode
- name: CONTIV_NETMASTER_LOG_LEVEL
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_netmaster_loglevel
securityContext:
privileged: true
volumeMounts:
- mountPath: /var/contiv
name: var-contiv
readOnly: false
volumes:
# Used by contiv-netmaster
- name: var-contiv
hostPath:
path: /var/contiv

View file

@ -1,29 +0,0 @@
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: contiv-netplugin
namespace: kube-system
rules:
- apiGroups:
- ""
- extensions
resources:
- endpoints
- nodes
- namespaces
- networkpolicies
- pods
- services
verbs:
- watch
- list
- update
- get
- apiGroups:
- policy
resourceNames:
- privileged
resources:
- podsecuritypolicies
verbs:
- use

View file

@ -1,12 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: contiv-netplugin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: contiv-netplugin
subjects:
- kind: ServiceAccount
name: contiv-netplugin
namespace: kube-system

View file

@ -1,5 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: contiv-netplugin
namespace: kube-system

View file

@ -1,128 +0,0 @@
---
# This manifest installs contiv-netplugin container, as well
# as the Contiv CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: contiv-netplugin
namespace: kube-system
labels:
k8s-app: contiv-netplugin
spec:
selector:
matchLabels:
k8s-app: contiv-netplugin
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
k8s-app: contiv-netplugin
spec:
priorityClassName: system-node-critical
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
hostPID: true
tolerations:
- operator: Exists
serviceAccountName: contiv-netplugin
initContainers:
- name: contiv-netplugin-init
image: {{ contiv_init_image_repo }}:{{ contiv_init_image_tag }}
env:
- name: CONTIV_ROLE
value: netplugin
- name: CONTIV_MODE
value: kubernetes
- name: CONTIV_K8S_CONFIG
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_k8s_config
- name: CONTIV_CNI_CONFIG
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_cni_config
volumeMounts:
- mountPath: /var/contiv
name: var-contiv
readOnly: false
- mountPath: /etc/cni/net.d/
name: etc-cni-dir
readOnly: false
- name: contiv-cni
image: {{ contiv_image_repo }}:{{ contiv_version }}
command: ["cp", "/contiv/bin/contivk8s", "/opt/cni/bin/contivk8s"]
volumeMounts:
- mountPath: /opt/cni/bin
name: cni-bin-dir
readOnly: false
containers:
# Runs netplugin container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: contiv-netplugin
image: {{ contiv_image_repo }}:{{ contiv_image_tag }}
env:
- name: VLAN_IF
value: {{ contiv_vlan_interface }}
- name: CONTIV_NETPLUGIN_VLAN_UPLINKS
value: {{ contiv_vlan_interface }}
- name: CONTIV_NETPLUGIN_VXLAN_PORT
value: "{{ contiv_vxlan_port }}"
- name: CONTIV_ROLE
value: netplugin
- name: CONTIV_NETPLUGIN_MODE
value: kubernetes
- name: CONTIV_NETPLUGIN_VTEP_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: CONTIV_NETPLUGIN_ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_etcd
- name: CONTIV_NETPLUGIN_FORWARD_MODE
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_fwdmode
- name: CONTIV_NETPLUGIN_NET_MODE
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_netmode
- name: CONTIV_NETPLUGIN_LOG_LEVEL
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_netplugin_loglevel
resources:
requests:
cpu: 250m
securityContext:
privileged: true
volumeMounts:
- mountPath: /var/run
name: var-run
readOnly: false
- mountPath: /var/contiv
name: var-contiv
readOnly: false
volumes:
# Used by contiv-netplugin
- name: var-run
hostPath:
path: /var/run
- name: var-contiv
hostPath:
path: /var/contiv
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: etc-cni-dir
hostPath:
path: /etc/cni/net.d/

View file

@ -1,79 +0,0 @@
---
apiVersion: apps/v1
# This manifest deploys the contiv-ovs pod.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: contiv-ovs
namespace: kube-system
labels:
k8s-app: contiv-ovs
spec:
selector:
matchLabels:
k8s-app: contiv-ovs
template:
metadata:
labels:
k8s-app: contiv-ovs
spec:
priorityClassName: system-node-critical
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
hostPID: true
tolerations:
- operator: Exists
containers:
# Runs ovs containers on each Kubernetes node.
- name: contiv-ovsdb-server
image: {{ contiv_ovs_image_repo }}:{{ contiv_ovs_image_tag }}
command: ["/scripts/start-ovsdb-server.sh"]
securityContext:
privileged: false
# Won't work until https://github.com/contiv/ovs-docker/pull/4 is merged and image is built again
env:
- name: OVSDBSERVER_EXTRA_FLAGS
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_ovsdb_server_extra_flags
volumeMounts:
- mountPath: /etc/openvswitch
name: etc-openvswitch
readOnly: false
- mountPath: /var/run
name: var-run
readOnly: false
- name: contiv-ovs-vswitchd
image: {{ contiv_ovs_image_repo }}:{{ contiv_ovs_image_tag }}
command: ["/scripts/start-ovs-vswitchd.sh"]
securityContext:
privileged: true
# Won't work until https://github.com/contiv/ovs-docker/pull/4 is merged and image is built again
env:
- name: OVSVSWITCHD_EXTRA_FLAGS
valueFrom:
configMapKeyRef:
name: contiv-config
key: contiv_ovs_vswitchd_extra_flags
volumeMounts:
- mountPath: /etc/openvswitch
name: etc-openvswitch
readOnly: false
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run
name: var-run
readOnly: false
volumes:
# Used by contiv-ovs
- name: etc-openvswitch
hostPath:
path: /etc/openvswitch
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run
hostPath:
path: /var/run

View file

@ -1,23 +0,0 @@
#!/bin/bash
set -euo pipefail
PREFIX="/var/contiv"
KEY_PATH="$PREFIX/auth_proxy_key.pem"
CERT_PATH="$PREFIX/auth_proxy_cert.pem"
# if both files exist, just exit
if [[ -f $KEY_PATH && -f $CERT_PATH ]]; then
exit 0
fi
mkdir -p "$PREFIX"
rm -f $KEY_PATH
rm -f $CERT_PATH
openssl genrsa -out $KEY_PATH {{certificates_key_size}} >/dev/null 2>&1
openssl req -new -x509 -sha256 -days {{certificates_duration}} \
-key $KEY_PATH \
-out $CERT_PATH \
-subj "/C=US/ST=CA/L=San Jose/O=CPSG/OU=IT Department/CN=auth-local.cisco.com"

View file

@ -35,11 +35,6 @@ dependencies:
tags:
- macvlan
- role: network_plugin/contiv
when: kube_network_plugin == 'contiv'
tags:
- contiv
- role: network_plugin/kube-ovn
when: kube_network_plugin == 'kube-ovn'
tags:

View file

@ -1,11 +1,4 @@
---
- name: reset | include file with pre-reset tasks specific to the network_plugin if exists
include_tasks: "{{ (role_path + '/../network_plugin/' + kube_network_plugin + '/tasks/pre-reset.yml') | realpath }}"
when:
- kube_network_plugin in ['contiv']
tags:
- network
- name: reset | stop services
service:
name: "{{ item }}"
@ -268,9 +261,6 @@
- "{{ bin_dir }}/netctl"
- /var/lib/cni
- /etc/vault
- /etc/contiv
- /var/contiv
- /run/contiv
- /etc/openvswitch
- /run/openvswitch
- /var/lib/kube-router
@ -306,7 +296,7 @@
- name: reset | include file with reset tasks specific to the network_plugin if exists
include_tasks: "{{ (role_path + '/../network_plugin/' + kube_network_plugin + '/tasks/reset.yml') | realpath }}"
when:
- kube_network_plugin in ['flannel', 'cilium', 'contiv', 'kube-router', 'calico']
- kube_network_plugin in ['flannel', 'cilium', 'kube-router', 'calico']
tags:
- network

View file

@ -1,9 +0,0 @@
---
# Instance settings
cloud_image: ubuntu-1604
mode: separate
# Kubespray settings
kube_network_plugin: contiv
deploy_netchecker: true
dns_min_replicas: 1

View file

@ -94,4 +94,4 @@ for f in files:
operating_system = x.group(1)
data.set(container_manager=container_manager, network_plugin=network_plugin, operating_system=operating_system)
#print(data.markdown())
print(data.jinja())
print(data.jinja())

View file

@ -127,9 +127,6 @@
with_items:
- kube-router
- flannel
- contiv-ovs
- contiv-netplugin
- contiv-netmaster
- canal-node
- calico-node
- cilium