Merge remote-tracking branch 'upstream/master' into fix/ubuntu-xenial-resolv-conf
This commit is contained in:
commit
4cbd97667d
39 changed files with 502 additions and 106 deletions
|
@ -111,10 +111,10 @@ Supported Components
|
||||||
- [cilium](https://github.com/cilium/cilium) v1.2.0
|
- [cilium](https://github.com/cilium/cilium) v1.2.0
|
||||||
- [contiv](https://github.com/contiv/install) v1.1.7
|
- [contiv](https://github.com/contiv/install) v1.1.7
|
||||||
- [flanneld](https://github.com/coreos/flannel) v0.10.0
|
- [flanneld](https://github.com/coreos/flannel) v0.10.0
|
||||||
- [weave](https://github.com/weaveworks/weave) v2.4.0
|
- [weave](https://github.com/weaveworks/weave) v2.4.1
|
||||||
- Application
|
- Application
|
||||||
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) v2.1.0-k8s1.11
|
||||||
- [cert-manager](https://github.com/jetstack/cert-manager) v0.4.1
|
- [cert-manager](https://github.com/jetstack/cert-manager) v0.5.0
|
||||||
- [coredns](https://github.com/coredns/coredns) v1.2.2
|
- [coredns](https://github.com/coredns/coredns) v1.2.2
|
||||||
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.19.0
|
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.19.0
|
||||||
|
|
||||||
|
|
|
@ -22,8 +22,6 @@ export TF_VAR_AWS_SECRET_ACCESS_KEY ="xxx"
|
||||||
export TF_VAR_AWS_SSH_KEY_NAME="yyy"
|
export TF_VAR_AWS_SSH_KEY_NAME="yyy"
|
||||||
export TF_VAR_AWS_DEFAULT_REGION="zzz"
|
export TF_VAR_AWS_DEFAULT_REGION="zzz"
|
||||||
```
|
```
|
||||||
- Rename `contrib/terraform/aws/terraform.tfvars.example` to `terraform.tfvars`
|
|
||||||
|
|
||||||
- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use CoreOS as base image. If you want to change this behaviour, see note "Using other distrib than CoreOs" below.
|
- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use CoreOS as base image. If you want to change this behaviour, see note "Using other distrib than CoreOs" below.
|
||||||
- Create an AWS EC2 SSH Key
|
- Create an AWS EC2 SSH Key
|
||||||
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
|
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
|
||||||
|
|
|
@ -54,16 +54,18 @@ The default configuration uses VXLAN to create an overlay. Two networks are crea
|
||||||
|
|
||||||
You can change the default network configuration by overriding the `contiv_networks` variable.
|
You can change the default network configuration by overriding the `contiv_networks` variable.
|
||||||
|
|
||||||
The default forward mode is set to routing:
|
The default forward mode is set to routing and the default network mode is vxlan:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
contiv_fwd_mode: routing
|
contiv_fwd_mode: routing
|
||||||
|
contiv_net_mode: vxlan
|
||||||
```
|
```
|
||||||
|
|
||||||
The following is an example of how you can use VLAN instead of VXLAN:
|
The following is an example of how you can use VLAN instead of VXLAN:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
contiv_fwd_mode: bridge
|
contiv_fwd_mode: bridge
|
||||||
|
contiv_net_mode: vlan
|
||||||
contiv_vlan_interface: eth0
|
contiv_vlan_interface: eth0
|
||||||
contiv_networks:
|
contiv_networks:
|
||||||
- name: default-net
|
- name: default-net
|
||||||
|
|
|
@ -43,6 +43,13 @@ bin_dir: /usr/local/bin
|
||||||
## The subnets of each nodes will be distributed by the datacenter router
|
## The subnets of each nodes will be distributed by the datacenter router
|
||||||
#peer_with_router: false
|
#peer_with_router: false
|
||||||
|
|
||||||
|
## With contiv, L3 BGP mode is possible by setting contiv_fwd_mode to "routing".
|
||||||
|
## In this case, you may need to peer with an uplink
|
||||||
|
## NB: The hostvars must contain a key "contiv" of which value is a dict containing "router_ip", "as"(defaults to contiv_global_as), "neighbor_as" (defaults to contiv_global_neighbor_as), "neighbor"
|
||||||
|
#contiv_peer_with_uplink_leaf: false
|
||||||
|
#contiv_global_as: "65002"
|
||||||
|
#contiv_global_neighbor_as: "500"
|
||||||
|
|
||||||
## Upstream dns servers used by dnsmasq
|
## Upstream dns servers used by dnsmasq
|
||||||
#upstream_dns_servers:
|
#upstream_dns_servers:
|
||||||
# - 8.8.8.8
|
# - 8.8.8.8
|
||||||
|
|
|
@ -46,9 +46,9 @@ flannel_version: "v0.10.0"
|
||||||
flannel_cni_version: "v0.3.0"
|
flannel_cni_version: "v0.3.0"
|
||||||
|
|
||||||
vault_version: 0.10.1
|
vault_version: 0.10.1
|
||||||
weave_version: "2.4.0"
|
weave_version: "2.4.1"
|
||||||
pod_infra_version: 3.1
|
pod_infra_version: 3.1
|
||||||
contiv_version: 1.1.7
|
contiv_version: 1.2.1
|
||||||
cilium_version: "v1.2.0"
|
cilium_version: "v1.2.0"
|
||||||
|
|
||||||
# Download URLs
|
# Download URLs
|
||||||
|
@ -98,16 +98,20 @@ netcheck_agent_img_repo: "mirantis/k8s-netchecker-agent"
|
||||||
netcheck_agent_tag: "{{ netcheck_version }}"
|
netcheck_agent_tag: "{{ netcheck_version }}"
|
||||||
netcheck_server_img_repo: "mirantis/k8s-netchecker-server"
|
netcheck_server_img_repo: "mirantis/k8s-netchecker-server"
|
||||||
netcheck_server_tag: "{{ netcheck_version }}"
|
netcheck_server_tag: "{{ netcheck_version }}"
|
||||||
weave_kube_image_repo: "weaveworks/weave-kube"
|
weave_kube_image_repo: "docker.io/weaveworks/weave-kube"
|
||||||
weave_kube_image_tag: "{{ weave_version }}"
|
weave_kube_image_tag: "{{ weave_version }}"
|
||||||
weave_npc_image_repo: "weaveworks/weave-npc"
|
weave_npc_image_repo: "docker.io/weaveworks/weave-npc"
|
||||||
weave_npc_image_tag: "{{ weave_version }}"
|
weave_npc_image_tag: "{{ weave_version }}"
|
||||||
contiv_image_repo: "contiv/netplugin"
|
contiv_image_repo: "contiv/netplugin"
|
||||||
contiv_image_tag: "{{ contiv_version }}"
|
contiv_image_tag: "{{ contiv_version }}"
|
||||||
|
contiv_init_image_repo: "contiv/netplugin-init"
|
||||||
|
contiv_init_image_tag: "latest"
|
||||||
contiv_auth_proxy_image_repo: "contiv/auth_proxy"
|
contiv_auth_proxy_image_repo: "contiv/auth_proxy"
|
||||||
contiv_auth_proxy_image_tag: "{{ contiv_version }}"
|
contiv_auth_proxy_image_tag: "{{ contiv_version }}"
|
||||||
contiv_etcd_init_image_repo: "ferest/etcd-initer"
|
contiv_etcd_init_image_repo: "ferest/etcd-initer"
|
||||||
contiv_etcd_init_image_tag: latest
|
contiv_etcd_init_image_tag: latest
|
||||||
|
contiv_ovs_image_repo: "contiv/ovs"
|
||||||
|
contiv_ovs_image_tag: "latest"
|
||||||
cilium_image_repo: "docker.io/cilium/cilium"
|
cilium_image_repo: "docker.io/cilium/cilium"
|
||||||
cilium_image_tag: "{{ cilium_version }}"
|
cilium_image_tag: "{{ cilium_version }}"
|
||||||
nginx_image_repo: nginx
|
nginx_image_repo: nginx
|
||||||
|
@ -164,7 +168,7 @@ ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/ngin
|
||||||
ingress_nginx_controller_image_tag: "0.19.0"
|
ingress_nginx_controller_image_tag: "0.19.0"
|
||||||
ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend"
|
ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend"
|
||||||
ingress_nginx_default_backend_image_tag: "1.4"
|
ingress_nginx_default_backend_image_tag: "1.4"
|
||||||
cert_manager_version: "v0.4.1"
|
cert_manager_version: "v0.5.0"
|
||||||
cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller"
|
cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller"
|
||||||
cert_manager_controller_image_tag: "{{ cert_manager_version }}"
|
cert_manager_controller_image_tag: "{{ cert_manager_version }}"
|
||||||
|
|
||||||
|
|
|
@ -1,21 +1,25 @@
|
||||||
---
|
---
|
||||||
- name: install | Copy etcd binary from download dir
|
- name: install | Copy etcd and etcdctl binary from download dir
|
||||||
shell: |
|
synchronize:
|
||||||
rsync -piu "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-amd64/etcd" "{{ bin_dir }}/etcd"
|
src: "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-amd64/{{ item }}"
|
||||||
rsync -piu "{{ local_release_dir }}/etcd-{{ etcd_version }}-linux-amd64/etcdctl" "{{ bin_dir }}/etcdctl"
|
dest: "{{ bin_dir }}/{{ item }}"
|
||||||
|
compress: no
|
||||||
|
perms: yes
|
||||||
|
owner: no
|
||||||
|
group: no
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
delegate_to: "{{ inventory_hostname }}"
|
||||||
|
with_items:
|
||||||
|
- "etcd"
|
||||||
|
- "etcdctl"
|
||||||
when: etcd_cluster_setup
|
when: etcd_cluster_setup
|
||||||
|
|
||||||
- name: install | Set etcd binary permissions
|
- name: install | Set etcd and etcdctl binary permissions
|
||||||
file:
|
file:
|
||||||
path: "{{ bin_dir }}/etcd"
|
path: "{{ bin_dir }}/{{ item }}"
|
||||||
mode: "0755"
|
|
||||||
state: file
|
|
||||||
when: etcd_cluster_setup
|
|
||||||
|
|
||||||
- name: install | Set etcdctl binary permissions
|
|
||||||
file:
|
|
||||||
path: "{{ bin_dir }}/etcdctl"
|
|
||||||
mode: "0755"
|
mode: "0755"
|
||||||
state: file
|
state: file
|
||||||
|
with_items:
|
||||||
|
- "etcd"
|
||||||
|
- "etcdctl"
|
||||||
when: etcd_cluster_setup
|
when: etcd_cluster_setup
|
|
@ -68,6 +68,7 @@
|
||||||
{% if tiller_max_history is defined %} --history-max={{ tiller_max_history }}{% endif %}
|
{% if tiller_max_history is defined %} --history-max={{ tiller_max_history }}{% endif %}
|
||||||
{% if tiller_enable_tls %} --tiller-tls --tiller-tls-verify --tiller-tls-cert={{ tiller_tls_cert }} --tiller-tls-key={{ tiller_tls_key }} --tls-ca-cert={{ tiller_tls_ca_cert }} {% endif %}
|
{% if tiller_enable_tls %} --tiller-tls --tiller-tls-verify --tiller-tls-cert={{ tiller_tls_cert }} --tiller-tls-key={{ tiller_tls_key }} --tls-ca-cert={{ tiller_tls_ca_cert }} {% endif %}
|
||||||
{% if tiller_secure_release_info %} --override 'spec.template.spec.containers[0].command'='{/tiller,--storage=secret}' {% endif %}
|
{% if tiller_secure_release_info %} --override 'spec.template.spec.containers[0].command'='{/tiller,--storage=secret}' {% endif %}
|
||||||
|
--debug --dry-run
|
||||||
| kubectl apply -f -
|
| kubectl apply -f -
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when:
|
when:
|
||||||
|
|
|
@ -5,3 +5,4 @@ metadata:
|
||||||
name: {{ cert_manager_namespace }}
|
name: {{ cert_manager_namespace }}
|
||||||
labels:
|
labels:
|
||||||
name: {{ cert_manager_namespace }}
|
name: {{ cert_manager_namespace }}
|
||||||
|
certmanager.k8s.io/disable-validation: "true"
|
||||||
|
|
|
@ -5,7 +5,7 @@ metadata:
|
||||||
name: cert-manager
|
name: cert-manager
|
||||||
labels:
|
labels:
|
||||||
app: cert-manager
|
app: cert-manager
|
||||||
chart: cert-manager-v0.4.1
|
chart: cert-manager-v0.5.0
|
||||||
release: cert-manager
|
release: cert-manager
|
||||||
heritage: Tiller
|
heritage: Tiller
|
||||||
rules:
|
rules:
|
||||||
|
@ -13,12 +13,7 @@ rules:
|
||||||
resources: ["certificates", "issuers", "clusterissuers"]
|
resources: ["certificates", "issuers", "clusterissuers"]
|
||||||
verbs: ["*"]
|
verbs: ["*"]
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
# TODO: remove endpoints once 0.4 is released. We include it here in case
|
resources: ["configmaps", "secrets", "events", "services", "pods"]
|
||||||
# users use the 'master' version of the Helm chart with a 0.2.x release of
|
|
||||||
# cert-manager that still performs leader election with Endpoint resources.
|
|
||||||
# We advise users don't do this, but some will anyway and this will reduce
|
|
||||||
# friction.
|
|
||||||
resources: ["endpoints", "configmaps", "secrets", "events", "services", "pods"]
|
|
||||||
verbs: ["*"]
|
verbs: ["*"]
|
||||||
- apiGroups: ["extensions"]
|
- apiGroups: ["extensions"]
|
||||||
resources: ["ingresses"]
|
resources: ["ingresses"]
|
||||||
|
|
|
@ -5,7 +5,7 @@ metadata:
|
||||||
name: cert-manager
|
name: cert-manager
|
||||||
labels:
|
labels:
|
||||||
app: cert-manager
|
app: cert-manager
|
||||||
chart: cert-manager-v0.4.1
|
chart: cert-manager-v0.5.0
|
||||||
release: cert-manager
|
release: cert-manager
|
||||||
heritage: Tiller
|
heritage: Tiller
|
||||||
roleRef:
|
roleRef:
|
||||||
|
|
|
@ -3,9 +3,11 @@ apiVersion: apiextensions.k8s.io/v1beta1
|
||||||
kind: CustomResourceDefinition
|
kind: CustomResourceDefinition
|
||||||
metadata:
|
metadata:
|
||||||
name: certificates.certmanager.k8s.io
|
name: certificates.certmanager.k8s.io
|
||||||
|
annotations:
|
||||||
|
"helm.sh/hook": crd-install
|
||||||
labels:
|
labels:
|
||||||
app: cert-manager
|
app: cert-manager
|
||||||
chart: cert-manager-v0.4.1
|
chart: cert-manager-v0.5.0
|
||||||
release: cert-manager
|
release: cert-manager
|
||||||
heritage: Tiller
|
heritage: Tiller
|
||||||
spec:
|
spec:
|
||||||
|
|
|
@ -3,9 +3,11 @@ apiVersion: apiextensions.k8s.io/v1beta1
|
||||||
kind: CustomResourceDefinition
|
kind: CustomResourceDefinition
|
||||||
metadata:
|
metadata:
|
||||||
name: clusterissuers.certmanager.k8s.io
|
name: clusterissuers.certmanager.k8s.io
|
||||||
|
annotations:
|
||||||
|
"helm.sh/hook": crd-install
|
||||||
labels:
|
labels:
|
||||||
app: cert-manager
|
app: cert-manager
|
||||||
chart: cert-manager-v0.4.1
|
chart: cert-manager-v0.5.0
|
||||||
release: cert-manager
|
release: cert-manager
|
||||||
heritage: Tiller
|
heritage: Tiller
|
||||||
spec:
|
spec:
|
||||||
|
|
|
@ -3,9 +3,11 @@ apiVersion: apiextensions.k8s.io/v1beta1
|
||||||
kind: CustomResourceDefinition
|
kind: CustomResourceDefinition
|
||||||
metadata:
|
metadata:
|
||||||
name: issuers.certmanager.k8s.io
|
name: issuers.certmanager.k8s.io
|
||||||
|
annotations:
|
||||||
|
"helm.sh/hook": crd-install
|
||||||
labels:
|
labels:
|
||||||
app: cert-manager
|
app: cert-manager
|
||||||
chart: cert-manager-v0.4.1
|
chart: cert-manager-v0.5.0
|
||||||
release: cert-manager
|
release: cert-manager
|
||||||
heritage: Tiller
|
heritage: Tiller
|
||||||
spec:
|
spec:
|
||||||
|
|
|
@ -6,7 +6,7 @@ metadata:
|
||||||
namespace: {{ cert_manager_namespace }}
|
namespace: {{ cert_manager_namespace }}
|
||||||
labels:
|
labels:
|
||||||
app: cert-manager
|
app: cert-manager
|
||||||
chart: cert-manager-v0.4.1
|
chart: cert-manager-v0.5.0
|
||||||
release: cert-manager
|
release: cert-manager
|
||||||
heritage: Tiller
|
heritage: Tiller
|
||||||
spec:
|
spec:
|
||||||
|
|
|
@ -6,6 +6,6 @@ metadata:
|
||||||
namespace: {{ cert_manager_namespace }}
|
namespace: {{ cert_manager_namespace }}
|
||||||
labels:
|
labels:
|
||||||
app: cert-manager
|
app: cert-manager
|
||||||
chart: cert-manager-v0.4.1
|
chart: cert-manager-v0.5.0
|
||||||
release: cert-manager
|
release: cert-manager
|
||||||
heritage: Tiller
|
heritage: Tiller
|
||||||
|
|
|
@ -33,6 +33,46 @@
|
||||||
when: "contiv_global_config.networkInfraType != contiv_fabric_mode"
|
when: "contiv_global_config.networkInfraType != contiv_fabric_mode"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
|
- name: Contiv | Set peer hostname
|
||||||
|
set_fact:
|
||||||
|
contiv_peer_hostname: >-
|
||||||
|
{%- if override_system_hostname|default(true) -%}
|
||||||
|
{{ contiv_peer_hostname|default({})|combine({item: hostvars[item]['inventory_hostname']}) }}
|
||||||
|
{%- else -%}
|
||||||
|
{{ contiv_peer_hostname|default({})|combine({item: hostvars[item]['ansible_fqdn']}) }}
|
||||||
|
{%- endif -%}
|
||||||
|
with_items: "{{ groups['k8s-cluster'] }}"
|
||||||
|
run_once: true
|
||||||
|
when:
|
||||||
|
- contiv_fwd_mode == 'routing'
|
||||||
|
- contiv_peer_with_uplink_leaf
|
||||||
|
|
||||||
|
- name: Contiv | Get BGP configuration
|
||||||
|
command: |
|
||||||
|
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
|
||||||
|
bgp ls --json
|
||||||
|
register: bgp_config
|
||||||
|
run_once: true
|
||||||
|
changed_when: false
|
||||||
|
when:
|
||||||
|
- contiv_fwd_mode == 'routing'
|
||||||
|
- contiv_peer_with_uplink_leaf
|
||||||
|
|
||||||
|
- name: Contiv | Configure peering with router(s)
|
||||||
|
command: |
|
||||||
|
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
|
||||||
|
bgp create {{ item.value }} \
|
||||||
|
--router-ip="{{ hostvars[item.key]['contiv']['router_ip'] }}" \
|
||||||
|
--as="{{ hostvars[item.key]['contiv']['as'] | default(contiv_global_as) }}" \
|
||||||
|
--neighbor-as="{{ hostvars[item.key]['contiv']['neighbor_as'] | default(contiv_global_neighbor_as) }}" \
|
||||||
|
--neighbor="{{ hostvars[item.key]['contiv']['neighbor'] }}"
|
||||||
|
run_once: true
|
||||||
|
with_dict: "{{ contiv_peer_hostname }}"
|
||||||
|
when:
|
||||||
|
- contiv_fwd_mode == 'routing'
|
||||||
|
- contiv_peer_with_uplink_leaf
|
||||||
|
- bgp_config.stdout|from_json|length == 0 or not item.value in bgp_config.stdout|from_json|map(attribute='key')|list
|
||||||
|
|
||||||
- name: Contiv | Get existing networks
|
- name: Contiv | Get existing networks
|
||||||
command: |
|
command: |
|
||||||
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
|
{{ bin_dir }}/netctl --netmaster "http://127.0.0.1:{{ contiv_netmaster_port }}" \
|
||||||
|
|
|
@ -9,7 +9,6 @@
|
||||||
filename: "{{ contiv_config_dir }}/{{ item.item.file }}"
|
filename: "{{ contiv_config_dir }}/{{ item.item.file }}"
|
||||||
state: "{{ item.changed | ternary('latest','present') }}"
|
state: "{{ item.changed | ternary('latest','present') }}"
|
||||||
with_items: "{{ contiv_manifests_results.results }}"
|
with_items: "{{ contiv_manifests_results.results }}"
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- import_tasks: configure.yml
|
- import_tasks: configure.yml
|
||||||
|
|
|
@ -10,8 +10,15 @@
|
||||||
when: kube_encrypt_secret_data
|
when: kube_encrypt_secret_data
|
||||||
|
|
||||||
- name: install | Copy kubectl binary from download dir
|
- name: install | Copy kubectl binary from download dir
|
||||||
command: rsync -piu "{{ local_release_dir }}/hyperkube" "{{ bin_dir }}/kubectl"
|
synchronize:
|
||||||
|
src: "{{ local_release_dir }}/hyperkube"
|
||||||
|
dest: "{{ bin_dir }}/kubectl"
|
||||||
|
compress: no
|
||||||
|
perms: yes
|
||||||
|
owner: no
|
||||||
|
group: no
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
delegate_to: "{{ inventory_hostname }}"
|
||||||
tags:
|
tags:
|
||||||
- hyperkube
|
- hyperkube
|
||||||
- kubectl
|
- kubectl
|
||||||
|
|
0
roles/kubernetes/node/tasks/install
Normal file
0
roles/kubernetes/node/tasks/install
Normal file
|
@ -11,6 +11,7 @@
|
||||||
src: "{{ local_release_dir }}/kubeadm"
|
src: "{{ local_release_dir }}/kubeadm"
|
||||||
dest: "{{ bin_dir }}/kubeadm"
|
dest: "{{ bin_dir }}/kubeadm"
|
||||||
compress: no
|
compress: no
|
||||||
|
perms: yes
|
||||||
owner: no
|
owner: no
|
||||||
group: no
|
group: no
|
||||||
delegate_to: "{{ inventory_hostname }}"
|
delegate_to: "{{ inventory_hostname }}"
|
||||||
|
|
0
roles/kubernetes/node/tasks/install_
Normal file
0
roles/kubernetes/node/tasks/install_
Normal file
|
@ -1,11 +1,18 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
- name: install | Copy kubelet binary from download dir
|
- name: install | Copy kubelet binary from download dir
|
||||||
command: rsync -piu "{{ local_release_dir }}/hyperkube" "{{ bin_dir }}/kubelet"
|
synchronize:
|
||||||
changed_when: false
|
src: "{{ local_release_dir }}/hyperkube"
|
||||||
|
dest: "{{ bin_dir }}/kubelet"
|
||||||
|
compress: no
|
||||||
|
perms: yes
|
||||||
|
owner: no
|
||||||
|
group: no
|
||||||
|
delegate_to: "{{ inventory_hostname }}"
|
||||||
tags:
|
tags:
|
||||||
- hyperkube
|
- hyperkube
|
||||||
- upgrade
|
- upgrade
|
||||||
|
notify: restart kubelet
|
||||||
|
|
||||||
- name: install | Set kubelet binary permissions
|
- name: install | Set kubelet binary permissions
|
||||||
file:
|
file:
|
||||||
|
@ -15,7 +22,6 @@
|
||||||
tags:
|
tags:
|
||||||
- hyperkube
|
- hyperkube
|
||||||
- upgrade
|
- upgrade
|
||||||
notify: restart kubelet
|
|
||||||
|
|
||||||
- name: install | Copy socat wrapper for Container Linux
|
- name: install | Copy socat wrapper for Container Linux
|
||||||
command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/opt/bin {{ install_socat_image_repo }}:{{ install_socat_image_tag }}"
|
command: "{{ docker_bin_dir }}/docker run --rm -v {{ bin_dir }}:/opt/bin {{ install_socat_image_repo }}:{{ install_socat_image_tag }}"
|
||||||
|
|
|
@ -303,6 +303,11 @@ weave_mode_seed: false
|
||||||
weave_seed: uninitialized
|
weave_seed: uninitialized
|
||||||
weave_peers: uninitialized
|
weave_peers: uninitialized
|
||||||
|
|
||||||
|
# Contiv L3 BGP Mode
|
||||||
|
contiv_peer_with_uplink_leaf: false
|
||||||
|
contiv_global_as: "65002"
|
||||||
|
contiv_global_neighbor_as: "500"
|
||||||
|
|
||||||
## Set no_proxy to all assigned cluster IPs and hostnames
|
## Set no_proxy to all assigned cluster IPs and hostnames
|
||||||
no_proxy: >-
|
no_proxy: >-
|
||||||
{%- if http_proxy is defined or https_proxy is defined %}
|
{%- if http_proxy is defined or https_proxy is defined %}
|
||||||
|
|
|
@ -6,8 +6,10 @@ contiv_etcd_data_dir: "/var/lib/etcd/contiv-data"
|
||||||
contiv_netmaster_port: 9999
|
contiv_netmaster_port: 9999
|
||||||
contiv_cni_version: 0.1.0
|
contiv_cni_version: 0.1.0
|
||||||
|
|
||||||
|
# No need to download it by default, but must be defined
|
||||||
contiv_etcd_image_repo: "{{ etcd_image_repo }}"
|
contiv_etcd_image_repo: "{{ etcd_image_repo }}"
|
||||||
contiv_etcd_image_tag: "{{ etcd_image_tag }}"
|
contiv_etcd_image_tag: "{{ etcd_image_tag }}"
|
||||||
|
|
||||||
contiv_etcd_listen_port: 6666
|
contiv_etcd_listen_port: 6666
|
||||||
contiv_etcd_peer_port: 6667
|
contiv_etcd_peer_port: 6667
|
||||||
contiv_etcd_endpoints: |-
|
contiv_etcd_endpoints: |-
|
||||||
|
@ -26,9 +28,21 @@ contiv_fwd_mode: routing
|
||||||
# Fabric mode: aci, aci-opflex or default
|
# Fabric mode: aci, aci-opflex or default
|
||||||
contiv_fabric_mode: default
|
contiv_fabric_mode: default
|
||||||
|
|
||||||
|
# Defaut netmode: vxlan or vlan
|
||||||
|
contiv_net_mode: vxlan
|
||||||
|
|
||||||
# Dataplane interface
|
# Dataplane interface
|
||||||
contiv_vlan_interface: ""
|
contiv_vlan_interface: ""
|
||||||
|
|
||||||
|
# Default loglevels are INFO
|
||||||
|
contiv_netmaster_loglevel: "WARN"
|
||||||
|
contiv_netplugin_loglevel: "WARN"
|
||||||
|
contiv_ovsdb_server_loglevel: "warn"
|
||||||
|
contiv_ovs_vswitchd_loglevel: "warn"
|
||||||
|
|
||||||
|
# VxLAN port
|
||||||
|
contiv_vxlan_port: 4789
|
||||||
|
|
||||||
# Default network configuration
|
# Default network configuration
|
||||||
contiv_networks:
|
contiv_networks:
|
||||||
- name: contivh1
|
- name: contivh1
|
||||||
|
|
10
roles/network_plugin/contiv/files/contiv-cleanup.sh
Normal file
10
roles/network_plugin/contiv/files/contiv-cleanup.sh
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
echo "Starting cleanup"
|
||||||
|
ovs-vsctl list-br | grep contiv | xargs -I % ovs-vsctl del-br %
|
||||||
|
for p in $(ifconfig | grep vport | awk '{print $1}');
|
||||||
|
do
|
||||||
|
ip link delete $p type veth
|
||||||
|
done
|
||||||
|
touch /tmp/cleanup.done
|
||||||
|
sleep 60
|
|
@ -16,8 +16,25 @@
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ contiv_etcd_conf_dir }}"
|
- "{{ contiv_etcd_conf_dir }}"
|
||||||
- "{{ contiv_etcd_data_dir }}"
|
- "{{ contiv_etcd_data_dir }}"
|
||||||
|
when: inventory_hostname in groups['kube-master']
|
||||||
|
|
||||||
- set_fact:
|
- name: Contiv | Workaround https://github.com/contiv/netplugin/issues/1152
|
||||||
|
set_fact:
|
||||||
|
kube_apiserver_endpoint_for_contiv: |-
|
||||||
|
{% if not is_kube_master and loadbalancer_apiserver_localhost -%}
|
||||||
|
https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }}
|
||||||
|
{%- elif loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%}
|
||||||
|
https://{{ apiserver_loadbalancer_domain_name|default('lb-apiserver.kubernetes.local') }}
|
||||||
|
{%- if loadbalancer_apiserver.port|string != "443" -%}
|
||||||
|
:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }}
|
||||||
|
{%- endif -%}
|
||||||
|
{%- else -%}
|
||||||
|
https://{{ first_kube_master }}:{{ kube_apiserver_port }}
|
||||||
|
{%- endif %}
|
||||||
|
when: inventory_hostname in groups['kube-master']
|
||||||
|
|
||||||
|
- name: Contiv | Set necessary facts
|
||||||
|
set_fact:
|
||||||
contiv_config_dir: "{{ contiv_config_dir }}"
|
contiv_config_dir: "{{ contiv_config_dir }}"
|
||||||
contiv_enable_api_proxy: "{{ contiv_enable_api_proxy }}"
|
contiv_enable_api_proxy: "{{ contiv_enable_api_proxy }}"
|
||||||
contiv_fabric_mode: "{{ contiv_fabric_mode }}"
|
contiv_fabric_mode: "{{ contiv_fabric_mode }}"
|
||||||
|
@ -26,22 +43,26 @@
|
||||||
contiv_networks: "{{ contiv_networks }}"
|
contiv_networks: "{{ contiv_networks }}"
|
||||||
contiv_manifests:
|
contiv_manifests:
|
||||||
- {name: contiv-config, file: contiv-config.yml, type: configmap}
|
- {name: contiv-config, file: contiv-config.yml, type: configmap}
|
||||||
|
- {name: contiv-etcd, file: contiv-etcd.yml, type: daemonset}
|
||||||
|
- {name: contiv-etcd-proxy, file: contiv-etcd-proxy.yml, type: daemonset}
|
||||||
|
- {name: contiv-ovs, file: contiv-ovs.yml, type: daemonset}
|
||||||
- {name: contiv-netmaster, file: contiv-netmaster-clusterrolebinding.yml, type: clusterrolebinding}
|
- {name: contiv-netmaster, file: contiv-netmaster-clusterrolebinding.yml, type: clusterrolebinding}
|
||||||
- {name: contiv-netmaster, file: contiv-netmaster-clusterrole.yml, type: clusterrole}
|
- {name: contiv-netmaster, file: contiv-netmaster-clusterrole.yml, type: clusterrole}
|
||||||
- {name: contiv-netmaster, file: contiv-netmaster-serviceaccount.yml, type: serviceaccount}
|
- {name: contiv-netmaster, file: contiv-netmaster-serviceaccount.yml, type: serviceaccount}
|
||||||
|
- {name: contiv-netmaster, file: contiv-netmaster.yml, type: daemonset}
|
||||||
- {name: contiv-netplugin, file: contiv-netplugin-clusterrolebinding.yml, type: clusterrolebinding}
|
- {name: contiv-netplugin, file: contiv-netplugin-clusterrolebinding.yml, type: clusterrolebinding}
|
||||||
- {name: contiv-netplugin, file: contiv-netplugin-clusterrole.yml, type: clusterrole}
|
- {name: contiv-netplugin, file: contiv-netplugin-clusterrole.yml, type: clusterrole}
|
||||||
- {name: contiv-netplugin, file: contiv-netplugin-serviceaccount.yml, type: serviceaccount}
|
- {name: contiv-netplugin, file: contiv-netplugin-serviceaccount.yml, type: serviceaccount}
|
||||||
- {name: contiv-etcd, file: contiv-etcd.yml, type: daemonset}
|
|
||||||
- {name: contiv-etcd-proxy, file: contiv-etcd-proxy.yml, type: daemonset}
|
|
||||||
- {name: contiv-netplugin, file: contiv-netplugin.yml, type: daemonset}
|
- {name: contiv-netplugin, file: contiv-netplugin.yml, type: daemonset}
|
||||||
- {name: contiv-netmaster, file: contiv-netmaster.yml, type: daemonset}
|
when: inventory_hostname in groups['kube-master']
|
||||||
|
|
||||||
- set_fact:
|
- set_fact:
|
||||||
contiv_manifests: |-
|
contiv_manifests: |-
|
||||||
{% set _ = contiv_manifests.append({"name": "contiv-api-proxy", "file": "contiv-api-proxy.yml", "type": "daemonset"}) %}
|
{% set _ = contiv_manifests.append({"name": "contiv-api-proxy", "file": "contiv-api-proxy.yml", "type": "daemonset"}) %}
|
||||||
{{ contiv_manifests }}
|
{{ contiv_manifests }}
|
||||||
when: contiv_enable_api_proxy
|
when:
|
||||||
|
- contiv_enable_api_proxy
|
||||||
|
- inventory_hostname in groups['kube-master']
|
||||||
|
|
||||||
- name: Contiv | Create /var/contiv
|
- name: Contiv | Create /var/contiv
|
||||||
file:
|
file:
|
||||||
|
@ -55,21 +76,23 @@
|
||||||
mode: 0755
|
mode: 0755
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
|
when: inventory_hostname in groups['kube-master']
|
||||||
|
|
||||||
- name: Contiv | Install all Kubernetes resources
|
- name: Contiv | Install all Kubernetes resources
|
||||||
template:
|
template:
|
||||||
src: "{{ item.file }}.j2"
|
src: "{{ item.file }}.j2"
|
||||||
dest: "{{ contiv_config_dir }}/{{ item.file }}"
|
dest: "{{ contiv_config_dir }}/{{ item.file }}"
|
||||||
with_items: "{{ contiv_manifests }}"
|
with_items: "{{ contiv_manifests }}"
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
|
||||||
run_once: true
|
|
||||||
register: contiv_manifests_results
|
register: contiv_manifests_results
|
||||||
|
when: inventory_hostname in groups['kube-master']
|
||||||
|
|
||||||
- name: Contiv | Generate contiv-api-proxy certificates
|
- name: Contiv | Generate contiv-api-proxy certificates
|
||||||
script: generate-certificate.sh
|
script: generate-certificate.sh
|
||||||
args:
|
args:
|
||||||
creates: /var/contiv/auth_proxy_key.pem
|
creates: /var/contiv/auth_proxy_key.pem
|
||||||
when: "contiv_enable_api_proxy and contiv_generate_certificate"
|
when:
|
||||||
|
- contiv_enable_api_proxy
|
||||||
|
- contiv_generate_certificate
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
|
@ -81,7 +104,9 @@
|
||||||
with_items:
|
with_items:
|
||||||
- auth_proxy_key.pem
|
- auth_proxy_key.pem
|
||||||
- auth_proxy_cert.pem
|
- auth_proxy_cert.pem
|
||||||
when: "contiv_enable_api_proxy and contiv_generate_certificate"
|
when:
|
||||||
|
- contiv_enable_api_proxy
|
||||||
|
- contiv_generate_certificate
|
||||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
|
@ -92,9 +117,11 @@
|
||||||
with_items:
|
with_items:
|
||||||
- auth_proxy_key.pem
|
- auth_proxy_key.pem
|
||||||
- auth_proxy_cert.pem
|
- auth_proxy_cert.pem
|
||||||
when: "inventory_hostname != groups['kube-master'][0]
|
when:
|
||||||
and inventory_hostname in groups['kube-master']
|
- inventory_hostname != groups['kube-master'][0]
|
||||||
and contiv_enable_api_proxy and contiv_generate_certificate"
|
- inventory_hostname in groups['kube-master']
|
||||||
|
- contiv_enable_api_proxy
|
||||||
|
- contiv_generate_certificate
|
||||||
|
|
||||||
- name: Contiv | Copy cni plugins from hyperkube
|
- name: Contiv | Copy cni plugins from hyperkube
|
||||||
command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/bash -c '/bin/cp -fa /opt/cni/bin/* /cnibindir/'"
|
command: "{{ docker_bin_dir }}/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /bin/bash -c '/bin/cp -fa /opt/cni/bin/* /cnibindir/'"
|
||||||
|
|
66
roles/network_plugin/contiv/tasks/pre-reset.yml
Normal file
66
roles/network_plugin/contiv/tasks/pre-reset.yml
Normal file
|
@ -0,0 +1,66 @@
|
||||||
|
---
|
||||||
|
- name: reset | Check that kubectl is still here
|
||||||
|
stat:
|
||||||
|
path: "{{ bin_dir }}/kubectl"
|
||||||
|
register: contiv_kubectl
|
||||||
|
|
||||||
|
- name: reset | Delete contiv netplugin and netmaster daemonsets
|
||||||
|
kube:
|
||||||
|
name: "{{ item }}"
|
||||||
|
namespace: "kube-system"
|
||||||
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
|
resource: "ds"
|
||||||
|
state: absent
|
||||||
|
with_items:
|
||||||
|
- contiv-netplugin
|
||||||
|
- contiv-netmaster
|
||||||
|
register: contiv_cleanup_deletion
|
||||||
|
tags:
|
||||||
|
- network
|
||||||
|
when:
|
||||||
|
- contiv_kubectl.stat.exists
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
- name: reset | Copy contiv temporary cleanup script
|
||||||
|
copy:
|
||||||
|
src: ../files/contiv-cleanup.sh # Not in role_path so we must trick...
|
||||||
|
dest: /opt/cni/bin/cleanup
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0750
|
||||||
|
when:
|
||||||
|
- contiv_kubectl.stat.exists
|
||||||
|
|
||||||
|
- name: reset | Lay down contiv cleanup template
|
||||||
|
template:
|
||||||
|
src: ../templates/contiv-cleanup.yml.j2 # Not in role_path so we must trick...
|
||||||
|
dest: "{{ kube_config_dir }}/contiv-cleanup.yml" # kube_config_dir is used here as contiv_config_dir is not necessarily set at reset
|
||||||
|
register: contiv_cleanup_manifest
|
||||||
|
when:
|
||||||
|
- contiv_kubectl.stat.exists
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
|
- name: reset | Start contiv cleanup resources
|
||||||
|
kube:
|
||||||
|
name: "contiv-cleanup"
|
||||||
|
namespace: "kube-system"
|
||||||
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
|
resource: "ds"
|
||||||
|
state: latest
|
||||||
|
filename: "{{ kube_config_dir }}/contiv-cleanup.yml"
|
||||||
|
when:
|
||||||
|
- contiv_kubectl.stat.exists
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
ignore_errors: true
|
||||||
|
|
||||||
|
- name: reset | Wait until contiv cleanup is done
|
||||||
|
command: "{{ bin_dir }}/kubectl -n kube-system get ds contiv-cleanup -o jsonpath='{.status.numberReady}'"
|
||||||
|
register: cleanup_done_all_nodes
|
||||||
|
until: cleanup_done_all_nodes.stdout|int == groups['k8s-cluster']|length
|
||||||
|
retries: 5
|
||||||
|
delay: 5
|
||||||
|
ignore_errors: true
|
||||||
|
changed_when: false
|
||||||
|
when:
|
||||||
|
- contiv_kubectl.stat.exists
|
||||||
|
- inventory_hostname == groups['kube-master'][0]
|
9
roles/network_plugin/contiv/tasks/reset.yml
Normal file
9
roles/network_plugin/contiv/tasks/reset.yml
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
---
|
||||||
|
- name: reset | check contiv vxlan_sys network device
|
||||||
|
stat:
|
||||||
|
path: "/sys/class/net/vxlan_sys_{{ contiv_vxlan_port | default('4789') }}"
|
||||||
|
register: contiv_vxlan_sys
|
||||||
|
|
||||||
|
- name: reset | remove the vxlan_sys network device created by contiv
|
||||||
|
command: "ip link del vxlan_sys_{{ contiv_vxlan_port | default('4789') }}"
|
||||||
|
when: contiv_vxlan_sys.stat.exists
|
|
@ -35,16 +35,19 @@ spec:
|
||||||
- --listen-address=0.0.0.0:{{ contiv_api_proxy_port }}
|
- --listen-address=0.0.0.0:{{ contiv_api_proxy_port }}
|
||||||
- --tls-key-file=/var/contiv/auth_proxy_key.pem
|
- --tls-key-file=/var/contiv/auth_proxy_key.pem
|
||||||
- --tls-certificate=/var/contiv/auth_proxy_cert.pem
|
- --tls-certificate=/var/contiv/auth_proxy_cert.pem
|
||||||
|
- --data-store-driver=$(STORE_DRIVER)
|
||||||
- --data-store-address=$(CONTIV_ETCD)
|
- --data-store-address=$(CONTIV_ETCD)
|
||||||
- --netmaster-address=127.0.0.1:{{ contiv_netmaster_port }}
|
- --netmaster-address=127.0.0.1:{{ contiv_netmaster_port }}
|
||||||
env:
|
env:
|
||||||
- name: NO_NETMASTER_STARTUP_CHECK
|
- name: NO_NETMASTER_STARTUP_CHECK
|
||||||
value: "0"
|
value: "0"
|
||||||
|
- name: STORE_DRIVER
|
||||||
|
value: etcd
|
||||||
- name: CONTIV_ETCD
|
- name: CONTIV_ETCD
|
||||||
valueFrom:
|
valueFrom:
|
||||||
configMapKeyRef:
|
configMapKeyRef:
|
||||||
name: contiv-config
|
name: contiv-config
|
||||||
key: cluster_store
|
key: contiv_etcd
|
||||||
securityContext:
|
securityContext:
|
||||||
privileged: false
|
privileged: false
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
|
|
57
roles/network_plugin/contiv/templates/contiv-cleanup.yml.j2
Normal file
57
roles/network_plugin/contiv/templates/contiv-cleanup.yml.j2
Normal file
|
@ -0,0 +1,57 @@
|
||||||
|
---
|
||||||
|
kind: DaemonSet
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: contiv-cleanup
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: contiv-cleanup
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
k8s-app: contiv-cleanup
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: contiv-cleanup
|
||||||
|
spec:
|
||||||
|
hostNetwork: true
|
||||||
|
hostPID: true
|
||||||
|
tolerations:
|
||||||
|
- key: node-role.kubernetes.io/master
|
||||||
|
effect: NoSchedule
|
||||||
|
serviceAccountName: contiv-netplugin
|
||||||
|
containers:
|
||||||
|
- name: contiv-ovs-cleanup
|
||||||
|
image: {{ contiv_ovs_image_repo }}:{{ contiv_ovs_image_tag }}
|
||||||
|
command: ["/opt/cni/bin/cleanup"]
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /etc/openvswitch
|
||||||
|
name: etc-openvswitch
|
||||||
|
readOnly: false
|
||||||
|
- mountPath: /var/run
|
||||||
|
name: var-run
|
||||||
|
readOnly: false
|
||||||
|
- mountPath: /opt/cni/bin
|
||||||
|
name: cni-bin-dir
|
||||||
|
readOnly: false
|
||||||
|
readinessProbe:
|
||||||
|
exec:
|
||||||
|
command:
|
||||||
|
- cat
|
||||||
|
- /tmp/cleanup.done
|
||||||
|
initialDelaySeconds: 3
|
||||||
|
periodSeconds: 3
|
||||||
|
successThreshold: 1
|
||||||
|
volumes:
|
||||||
|
- name: etc-openvswitch
|
||||||
|
hostPath:
|
||||||
|
path: /etc/openvswitch
|
||||||
|
- name: var-run
|
||||||
|
hostPath:
|
||||||
|
path: /var/run
|
||||||
|
- name: cni-bin-dir
|
||||||
|
hostPath:
|
||||||
|
path: /opt/cni/bin
|
|
@ -7,20 +7,22 @@ metadata:
|
||||||
name: contiv-config
|
name: contiv-config
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
data:
|
data:
|
||||||
# The location of your cluster store. This is set to the
|
contiv_netmaster_loglevel: {{ contiv_netmaster_loglevel }}
|
||||||
# avdertise-client value below from the contiv-etcd service.
|
contiv_netplugin_loglevel: {{ contiv_netplugin_loglevel }}
|
||||||
# Change it to an external etcd/consul instance if required.
|
contiv_ovsdb_server_extra_flags: "--verbose={{ contiv_ovsdb_server_loglevel }}"
|
||||||
cluster_store: "etcd://127.0.0.1:{{ contiv_etcd_listen_port }}"
|
contiv_ovs_vswitchd_extra_flags: "--verbose={{ contiv_ovs_vswitchd_loglevel }}"
|
||||||
# The CNI network configuration to install on each node.
|
contiv_fwdmode: {{ contiv_fwd_mode }}
|
||||||
cni_config: |-
|
contiv_netmode: {{ contiv_net_mode }}
|
||||||
|
contiv_etcd: "http://127.0.0.1:{{ contiv_etcd_listen_port }}"
|
||||||
|
contiv_cni_config: |-
|
||||||
{
|
{
|
||||||
"cniVersion": "{{ contiv_cni_version }}",
|
"cniVersion": "{{ contiv_cni_version }}",
|
||||||
"name": "contiv-net",
|
"name": "contiv-net",
|
||||||
"type": "contivk8s"
|
"type": "contivk8s"
|
||||||
}
|
}
|
||||||
config: |-
|
contiv_k8s_config: |-
|
||||||
{
|
{
|
||||||
"K8S_API_SERVER": "{{ kube_apiserver_endpoint }}",
|
"K8S_API_SERVER": "{{ kube_apiserver_endpoint_for_contiv }}",
|
||||||
"K8S_CA": "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt",
|
"K8S_CA": "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt",
|
||||||
"K8S_KEY": "",
|
"K8S_KEY": "",
|
||||||
"K8S_CERT": "",
|
"K8S_CERT": "",
|
||||||
|
|
|
@ -19,6 +19,8 @@ spec:
|
||||||
spec:
|
spec:
|
||||||
hostNetwork: true
|
hostNetwork: true
|
||||||
hostPID: true
|
hostPID: true
|
||||||
|
nodeSelector:
|
||||||
|
node-role.kubernetes.io/node: "true"
|
||||||
containers:
|
containers:
|
||||||
- name: contiv-etcd-proxy
|
- name: contiv-etcd-proxy
|
||||||
image: {{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }}
|
image: {{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }}
|
||||||
|
|
|
@ -13,6 +13,7 @@ rules:
|
||||||
- namespaces
|
- namespaces
|
||||||
- networkpolicies
|
- networkpolicies
|
||||||
verbs:
|
verbs:
|
||||||
|
- get
|
||||||
- watch
|
- watch
|
||||||
- list
|
- list
|
||||||
- update
|
- update
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# This manifest deploys the Contiv API Server on Kubernetes.
|
---
|
||||||
kind: DaemonSet
|
kind: DaemonSet
|
||||||
apiVersion: extensions/v1beta1
|
apiVersion: extensions/v1beta1
|
||||||
metadata:
|
metadata:
|
||||||
|
@ -31,20 +31,31 @@ spec:
|
||||||
containers:
|
containers:
|
||||||
- name: contiv-netmaster
|
- name: contiv-netmaster
|
||||||
image: {{ contiv_image_repo }}:{{ contiv_image_tag }}
|
image: {{ contiv_image_repo }}:{{ contiv_image_tag }}
|
||||||
args:
|
|
||||||
- -m
|
|
||||||
- -pkubernetes
|
|
||||||
env:
|
env:
|
||||||
- name: CONTIV_ETCD
|
- name: CONTIV_ROLE
|
||||||
|
value: netmaster
|
||||||
|
- name: CONTIV_NETMASTER_MODE
|
||||||
|
value: kubernetes
|
||||||
|
- name: CONTIV_NETMASTER_ETCD_ENDPOINTS
|
||||||
valueFrom:
|
valueFrom:
|
||||||
configMapKeyRef:
|
configMapKeyRef:
|
||||||
name: contiv-config
|
name: contiv-config
|
||||||
key: cluster_store
|
key: contiv_etcd
|
||||||
- name: CONTIV_CONFIG
|
- name: CONTIV_NETMASTER_FORWARD_MODE
|
||||||
valueFrom:
|
valueFrom:
|
||||||
configMapKeyRef:
|
configMapKeyRef:
|
||||||
name: contiv-config
|
name: contiv-config
|
||||||
key: config
|
key: contiv_fwdmode
|
||||||
|
- name: CONTIV_NETMASTER_NET_MODE
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: contiv-config
|
||||||
|
key: contiv_netmode
|
||||||
|
- name: CONTIV_NETMASTER_LOG_LEVEL
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: contiv-config
|
||||||
|
key: contiv_netmaster_loglevel
|
||||||
securityContext:
|
securityContext:
|
||||||
privileged: true
|
privileged: true
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
---
|
||||||
# This manifest installs contiv-netplugin container, as well
|
# This manifest installs contiv-netplugin container, as well
|
||||||
# as the Contiv CNI plugins and network config on
|
# as the Contiv CNI plugins and network config on
|
||||||
# each master and worker node in a Kubernetes cluster.
|
# each master and worker node in a Kubernetes cluster.
|
||||||
|
@ -27,73 +28,99 @@ spec:
|
||||||
- key: node-role.kubernetes.io/master
|
- key: node-role.kubernetes.io/master
|
||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
serviceAccountName: contiv-netplugin
|
serviceAccountName: contiv-netplugin
|
||||||
|
initContainers:
|
||||||
|
- name: contiv-netplugin-init
|
||||||
|
image: {{ contiv_init_image_repo }}:{{ contiv_init_image_tag }}
|
||||||
|
env:
|
||||||
|
- name: CONTIV_ROLE
|
||||||
|
value: netplugin
|
||||||
|
- name: CONTIV_MODE
|
||||||
|
value: kubernetes
|
||||||
|
- name: CONTIV_K8S_CONFIG
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: contiv-config
|
||||||
|
key: contiv_k8s_config
|
||||||
|
- name: CONTIV_CNI_CONFIG
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: contiv-config
|
||||||
|
key: contiv_cni_config
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /var/contiv
|
||||||
|
name: var-contiv
|
||||||
|
readOnly: false
|
||||||
|
- mountPath: /etc/cni/net.d/
|
||||||
|
name: etc-cni-dir
|
||||||
|
readOnly: false
|
||||||
|
- name: contiv-cni
|
||||||
|
image: {{ contiv_image_repo }}:{{ contiv_version }}
|
||||||
|
command: ["cp", "/contiv/bin/contivk8s", "/opt/cni/bin/contivk8s"]
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /opt/cni/bin
|
||||||
|
name: cni-bin-dir
|
||||||
|
readOnly: false
|
||||||
containers:
|
containers:
|
||||||
# Runs netplugin container on each Kubernetes node. This
|
# Runs netplugin container on each Kubernetes node. This
|
||||||
# container programs network policy and routes on each
|
# container programs network policy and routes on each
|
||||||
# host.
|
# host.
|
||||||
- name: contiv-netplugin
|
- name: contiv-netplugin
|
||||||
image: {{ contiv_image_repo }}:{{ contiv_image_tag }}
|
image: {{ contiv_image_repo }}:{{ contiv_image_tag }}
|
||||||
args:
|
|
||||||
- -pkubernetes
|
|
||||||
- -x
|
|
||||||
env:
|
env:
|
||||||
- name: VLAN_IF
|
- name: VLAN_IF
|
||||||
value: {{ contiv_vlan_interface }}
|
value: {{ contiv_vlan_interface }}
|
||||||
- name: VTEP_IP
|
- name: CONTIV_NETPLUGIN_VLAN_UPLINKS
|
||||||
|
value: {{ contiv_vlan_interface }}
|
||||||
|
- name: CONTIV_NETPLUGIN_VXLAN_PORT
|
||||||
|
value: "{{ contiv_vxlan_port }}"
|
||||||
|
- name: CONTIV_ROLE
|
||||||
|
value: netplugin
|
||||||
|
- name: CONTIV_NETPLUGIN_MODE
|
||||||
|
value: kubernetes
|
||||||
|
- name: CONTIV_NETPLUGIN_VTEP_IP
|
||||||
valueFrom:
|
valueFrom:
|
||||||
fieldRef:
|
fieldRef:
|
||||||
fieldPath: status.podIP
|
fieldPath: status.podIP
|
||||||
- name: CONTIV_ETCD
|
- name: CONTIV_NETPLUGIN_ETCD_ENDPOINTS
|
||||||
valueFrom:
|
valueFrom:
|
||||||
configMapKeyRef:
|
configMapKeyRef:
|
||||||
name: contiv-config
|
name: contiv-config
|
||||||
key: cluster_store
|
key: contiv_etcd
|
||||||
- name: CONTIV_CNI_CONFIG
|
- name: CONTIV_NETPLUGIN_FORWARD_MODE
|
||||||
valueFrom:
|
valueFrom:
|
||||||
configMapKeyRef:
|
configMapKeyRef:
|
||||||
name: contiv-config
|
name: contiv-config
|
||||||
key: cni_config
|
key: contiv_fwdmode
|
||||||
- name: CONTIV_CONFIG
|
- name: CONTIV_NETPLUGIN_NET_MODE
|
||||||
valueFrom:
|
valueFrom:
|
||||||
configMapKeyRef:
|
configMapKeyRef:
|
||||||
name: contiv-config
|
name: contiv-config
|
||||||
key: config
|
key: contiv_netmode
|
||||||
|
- name: CONTIV_NETPLUGIN_LOG_LEVEL
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: contiv-config
|
||||||
|
key: contiv_netplugin_loglevel
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 250m
|
||||||
securityContext:
|
securityContext:
|
||||||
privileged: true
|
privileged: true
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- mountPath: /etc/openvswitch
|
|
||||||
name: etc-openvswitch
|
|
||||||
readOnly: false
|
|
||||||
- mountPath: /lib/modules
|
|
||||||
name: lib-modules
|
|
||||||
readOnly: false
|
|
||||||
- mountPath: /var/run
|
- mountPath: /var/run
|
||||||
name: var-run
|
name: var-run
|
||||||
readOnly: false
|
readOnly: false
|
||||||
- mountPath: /var/contiv
|
- mountPath: /var/contiv
|
||||||
name: var-contiv
|
name: var-contiv
|
||||||
readOnly: false
|
readOnly: false
|
||||||
- mountPath: /opt/cni/bin
|
|
||||||
name: cni-bin-dir
|
|
||||||
readOnly: false
|
|
||||||
- mountPath: /etc/cni/net.d/
|
|
||||||
name: etc-cni-dir
|
|
||||||
readOnly: false
|
|
||||||
volumes:
|
volumes:
|
||||||
# Used by contiv-netplugin
|
# Used by contiv-netplugin
|
||||||
- name: etc-openvswitch
|
|
||||||
hostPath:
|
|
||||||
path: /etc/openvswitch
|
|
||||||
- name: lib-modules
|
|
||||||
hostPath:
|
|
||||||
path: /lib/modules
|
|
||||||
- name: var-run
|
- name: var-run
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /var/run
|
path: /var/run
|
||||||
- name: var-contiv
|
- name: var-contiv
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /var/contiv
|
path: /var/contiv
|
||||||
# Used to install CNI.
|
|
||||||
- name: cni-bin-dir
|
- name: cni-bin-dir
|
||||||
hostPath:
|
hostPath:
|
||||||
path: /opt/cni/bin
|
path: /opt/cni/bin
|
||||||
|
|
80
roles/network_plugin/contiv/templates/contiv-ovs.yml.j2
Normal file
80
roles/network_plugin/contiv/templates/contiv-ovs.yml.j2
Normal file
|
@ -0,0 +1,80 @@
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
# This manifest deploys the contiv-ovs pod.
|
||||||
|
kind: DaemonSet
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: contiv-ovs
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: contiv-ovs
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
k8s-app: contiv-ovs
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: contiv-ovs
|
||||||
|
annotations:
|
||||||
|
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||||
|
spec:
|
||||||
|
hostNetwork: true
|
||||||
|
hostPID: true
|
||||||
|
tolerations:
|
||||||
|
- key: node-role.kubernetes.io/master
|
||||||
|
effect: NoSchedule
|
||||||
|
containers:
|
||||||
|
# Runs ovs containers on each Kubernetes node.
|
||||||
|
- name: contiv-ovsdb-server
|
||||||
|
image: {{ contiv_ovs_image_repo }}:{{ contiv_ovs_image_tag }}
|
||||||
|
command: ["/scripts/start-ovsdb-server.sh"]
|
||||||
|
securityContext:
|
||||||
|
privileged: false
|
||||||
|
# Won't work until https://github.com/contiv/ovs-docker/pull/4 is merged and image is built again
|
||||||
|
env:
|
||||||
|
- name: OVSDBSERVER_EXTRA_FLAGS
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: contiv-config
|
||||||
|
key: contiv_ovsdb_server_extra_flags
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /etc/openvswitch
|
||||||
|
name: etc-openvswitch
|
||||||
|
readOnly: false
|
||||||
|
- mountPath: /var/run
|
||||||
|
name: var-run
|
||||||
|
readOnly: false
|
||||||
|
- name: contiv-ovs-vswitchd
|
||||||
|
image: {{ contiv_ovs_image_repo }}:{{ contiv_ovs_image_tag }}
|
||||||
|
command: ["/scripts/start-ovs-vswitchd.sh"]
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
# Won't work until https://github.com/contiv/ovs-docker/pull/4 is merged and image is built again
|
||||||
|
env:
|
||||||
|
- name: OVSVSWITCHD_EXTRA_FLAGS
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: contiv-config
|
||||||
|
key: contiv_ovs_vswitchd_extra_flags
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /etc/openvswitch
|
||||||
|
name: etc-openvswitch
|
||||||
|
readOnly: false
|
||||||
|
- mountPath: /lib/modules
|
||||||
|
name: lib-modules
|
||||||
|
readOnly: true
|
||||||
|
- mountPath: /var/run
|
||||||
|
name: var-run
|
||||||
|
readOnly: false
|
||||||
|
volumes:
|
||||||
|
# Used by contiv-ovs
|
||||||
|
- name: etc-openvswitch
|
||||||
|
hostPath:
|
||||||
|
path: /etc/openvswitch
|
||||||
|
- name: lib-modules
|
||||||
|
hostPath:
|
||||||
|
path: /lib/modules
|
||||||
|
- name: var-run
|
||||||
|
hostPath:
|
||||||
|
path: /var/run
|
|
@ -1,5 +1,12 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
|
- name: reset | include file with pre-reset tasks specific to the network_plugin if exists
|
||||||
|
include_tasks: "{{ (role_path + '/../network_plugin/' + kube_network_plugin + '/tasks/pre-reset.yml') | realpath }}"
|
||||||
|
when:
|
||||||
|
- kube_network_plugin in ['contiv']
|
||||||
|
tags:
|
||||||
|
- network
|
||||||
|
|
||||||
- name: reset | stop services
|
- name: reset | stop services
|
||||||
service:
|
service:
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
|
@ -150,6 +157,11 @@
|
||||||
- "{{ bin_dir }}/weave"
|
- "{{ bin_dir }}/weave"
|
||||||
- /var/lib/rkt
|
- /var/lib/rkt
|
||||||
- /etc/vault
|
- /etc/vault
|
||||||
|
- /etc/contiv
|
||||||
|
- /var/contiv
|
||||||
|
- /run/contiv
|
||||||
|
- /etc/openvswitch
|
||||||
|
- /run/openvswitch
|
||||||
ignore_errors: yes
|
ignore_errors: yes
|
||||||
tags:
|
tags:
|
||||||
- files
|
- files
|
||||||
|
@ -181,7 +193,7 @@
|
||||||
- name: reset | include file with reset tasks specific to the network_plugin if exists
|
- name: reset | include file with reset tasks specific to the network_plugin if exists
|
||||||
include_tasks: "{{ (role_path + '/../network_plugin/' + kube_network_plugin + '/tasks/reset.yml') | realpath }}"
|
include_tasks: "{{ (role_path + '/../network_plugin/' + kube_network_plugin + '/tasks/reset.yml') | realpath }}"
|
||||||
when:
|
when:
|
||||||
- kube_network_plugin in ['flannel', 'cilium']
|
- kube_network_plugin in ['flannel', 'cilium', 'contiv']
|
||||||
tags:
|
tags:
|
||||||
- network
|
- network
|
||||||
|
|
||||||
|
|
|
@ -100,9 +100,8 @@
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
|
- { role: upgrade/pre-upgrade, tags: pre-upgrade }
|
||||||
- { role: kubernetes/node, tags: node }
|
- { role: kubernetes/node, tags: node }
|
||||||
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
|
||||||
- { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" }
|
- { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" }
|
||||||
- { role: kubespray-defaults}
|
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
||||||
environment: "{{proxy_env}}"
|
environment: "{{proxy_env}}"
|
||||||
|
|
||||||
- hosts: kube-master[0]
|
- hosts: kube-master[0]
|
||||||
|
|
Loading…
Reference in a new issue