ansible-lint: add spaces around variables [E206] (#4699)
This commit is contained in:
parent
560f50d3cd
commit
e67f848abc
88 changed files with 363 additions and 353 deletions
|
@ -5,7 +5,6 @@ skip_list:
|
||||||
# The following rules throw errors.
|
# The following rules throw errors.
|
||||||
# These either still need to be corrected in the repository and the rules re-enabled or they are skipped on purpose.
|
# These either still need to be corrected in the repository and the rules re-enabled or they are skipped on purpose.
|
||||||
- '204'
|
- '204'
|
||||||
- '206'
|
|
||||||
- '301'
|
- '301'
|
||||||
- '305'
|
- '305'
|
||||||
- '306'
|
- '306'
|
||||||
|
|
|
@ -34,7 +34,7 @@
|
||||||
pre_tasks:
|
pre_tasks:
|
||||||
- name: gather facts from all instances
|
- name: gather facts from all instances
|
||||||
setup:
|
setup:
|
||||||
delegate_to: "{{item}}"
|
delegate_to: "{{ item }}"
|
||||||
delegate_facts: true
|
delegate_facts: true
|
||||||
with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]) }}"
|
with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]) }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
@ -46,7 +46,7 @@
|
||||||
- { role: kubernetes/preinstall, tags: preinstall }
|
- { role: kubernetes/preinstall, tags: preinstall }
|
||||||
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) }
|
- { role: "container-engine", tags: "container-engine", when: deploy_container_engine|default(true) }
|
||||||
- { role: download, tags: download, when: "not skip_downloads" }
|
- { role: download, tags: download, when: "not skip_downloads" }
|
||||||
environment: "{{proxy_env}}"
|
environment: "{{ proxy_env }}"
|
||||||
|
|
||||||
- hosts: etcd
|
- hosts: etcd
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
@ -65,7 +65,7 @@
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes/node, tags: node }
|
- { role: kubernetes/node, tags: node }
|
||||||
environment: "{{proxy_env}}"
|
environment: "{{ proxy_env }}"
|
||||||
|
|
||||||
- hosts: kube-master
|
- hosts: kube-master
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
@ -109,7 +109,7 @@
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes-apps, tags: apps }
|
- { role: kubernetes-apps, tags: apps }
|
||||||
environment: "{{proxy_env}}"
|
environment: "{{ proxy_env }}"
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s-cluster
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
|
|
@ -8,4 +8,6 @@
|
||||||
vm_list: "{{ vm_list_cmd.stdout }}"
|
vm_list: "{{ vm_list_cmd.stdout }}"
|
||||||
|
|
||||||
- name: Generate inventory
|
- name: Generate inventory
|
||||||
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
|
template:
|
||||||
|
src: inventory.j2
|
||||||
|
dest: "{{ playbook_dir }}/inventory"
|
||||||
|
|
|
@ -13,4 +13,6 @@
|
||||||
vm_roles_list: "{{ vm_list_cmd.stdout }}"
|
vm_roles_list: "{{ vm_list_cmd.stdout }}"
|
||||||
|
|
||||||
- name: Generate inventory
|
- name: Generate inventory
|
||||||
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
|
template:
|
||||||
|
src: inventory.j2
|
||||||
|
dest: "{{ playbook_dir }}/inventory"
|
||||||
|
|
|
@ -1,10 +1,15 @@
|
||||||
---
|
---
|
||||||
- set_fact:
|
- set_fact:
|
||||||
base_dir: "{{playbook_dir}}/.generated/"
|
base_dir: "{{ playbook_dir }}/.generated/"
|
||||||
|
|
||||||
- file: path={{base_dir}} state=directory recurse=true
|
- file:
|
||||||
|
path: "{{ base_dir }}"
|
||||||
|
state: directory
|
||||||
|
recurse: true
|
||||||
|
|
||||||
- template: src={{item}} dest="{{base_dir}}/{{item}}"
|
- template:
|
||||||
|
src: "{{ item }}"
|
||||||
|
dest: "{{ base_dir }}/{{ item }}"
|
||||||
with_items:
|
with_items:
|
||||||
- network.json
|
- network.json
|
||||||
- storage.json
|
- storage.json
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
- name: Null-ify some linux tools to ease DIND
|
- name: Null-ify some linux tools to ease DIND
|
||||||
file:
|
file:
|
||||||
src: "/bin/true"
|
src: "/bin/true"
|
||||||
dest: "{{item}}"
|
dest: "{{ item }}"
|
||||||
state: link
|
state: link
|
||||||
force: yes
|
force: yes
|
||||||
with_items:
|
with_items:
|
||||||
|
@ -52,7 +52,7 @@
|
||||||
- rsyslog
|
- rsyslog
|
||||||
- "{{ distro_ssh_service }}"
|
- "{{ distro_ssh_service }}"
|
||||||
|
|
||||||
- name: Create distro user "{{distro_user}}"
|
- name: Create distro user "{{ distro_user }}"
|
||||||
user:
|
user:
|
||||||
name: "{{ distro_user }}"
|
name: "{{ distro_user }}"
|
||||||
uid: 1000
|
uid: 1000
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
- /lib/modules:/lib/modules
|
- /lib/modules:/lib/modules
|
||||||
- "{{ item }}:/dind/docker"
|
- "{{ item }}:/dind/docker"
|
||||||
register: containers
|
register: containers
|
||||||
with_items: "{{groups.containers}}"
|
with_items: "{{ groups.containers }}"
|
||||||
tags:
|
tags:
|
||||||
- addresses
|
- addresses
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
- name: "Kubernetes Apps | Install and configure MetalLB"
|
- name: "Kubernetes Apps | Install and configure MetalLB"
|
||||||
kube:
|
kube:
|
||||||
name: "MetalLB"
|
name: "MetalLB"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/{{ item.item }}"
|
filename: "{{ kube_config_dir }}/{{ item.item }}"
|
||||||
state: "{{ item.changed | ternary('latest','present') }}"
|
state: "{{ item.changed | ternary('latest','present') }}"
|
||||||
become: true
|
become: true
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
---
|
---
|
||||||
- name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV
|
- name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV
|
||||||
template: src={{item.file}} dest={{kube_config_dir}}/{{item.dest}}
|
template:
|
||||||
|
src: "{{ item.file }}"
|
||||||
|
dest: "{{ kube_config_dir }}/{{ item.dest }}"
|
||||||
with_items:
|
with_items:
|
||||||
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||||
|
@ -12,9 +14,9 @@
|
||||||
kube:
|
kube:
|
||||||
name: glusterfs
|
name: glusterfs
|
||||||
namespace: default
|
namespace: default
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{ item.item.type }}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.dest}}"
|
filename: "{{ kube_config_dir }}/{{ item.item.dest }}"
|
||||||
state: "{{item.changed | ternary('latest','present') }}"
|
state: "{{ item.changed | ternary('latest','present') }}"
|
||||||
with_items: "{{ gluster_pv.results }}"
|
with_items: "{{ gluster_pv.results }}"
|
||||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined
|
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
|
- name: "Kubernetes Apps | Install and configure Heketi Bootstrap"
|
||||||
kube:
|
kube:
|
||||||
name: "GlusterFS"
|
name: "GlusterFS"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/heketi-bootstrap.json"
|
filename: "{{ kube_config_dir }}/heketi-bootstrap.json"
|
||||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
- name: "Wait for heketi bootstrap to complete."
|
- name: "Wait for heketi bootstrap to complete."
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
- name: "Create heketi storage."
|
- name: "Create heketi storage."
|
||||||
kube:
|
kube:
|
||||||
name: "GlusterFS"
|
name: "GlusterFS"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
filename: "{{ kube_config_dir }}/heketi-storage-bootstrap.json"
|
||||||
state: "present"
|
state: "present"
|
||||||
vars:
|
vars:
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
|
- name: "Kubernetes Apps | Install and configure GlusterFS daemonset"
|
||||||
kube:
|
kube:
|
||||||
name: "GlusterFS"
|
name: "GlusterFS"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/glusterfs-daemonset.json"
|
filename: "{{ kube_config_dir }}/glusterfs-daemonset.json"
|
||||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
- name: "Kubernetes Apps | Label GlusterFS nodes"
|
- name: "Kubernetes Apps | Label GlusterFS nodes"
|
||||||
|
@ -33,6 +33,6 @@
|
||||||
- name: "Kubernetes Apps | Install and configure Heketi Service Account"
|
- name: "Kubernetes Apps | Install and configure Heketi Service Account"
|
||||||
kube:
|
kube:
|
||||||
name: "GlusterFS"
|
name: "GlusterFS"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/heketi-service-account.json"
|
filename: "{{ kube_config_dir }}/heketi-service-account.json"
|
||||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
- name: "Kubernetes Apps | Install and configure Heketi"
|
- name: "Kubernetes Apps | Install and configure Heketi"
|
||||||
kube:
|
kube:
|
||||||
name: "GlusterFS"
|
name: "GlusterFS"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/heketi-deployment.json"
|
filename: "{{ kube_config_dir }}/heketi-deployment.json"
|
||||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
- name: "Ensure heketi is up and running."
|
- name: "Ensure heketi is up and running."
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
- name: "Kubernetes Apps | Test Heketi"
|
- name: "Kubernetes Apps | Test Heketi"
|
||||||
register: "heketi_service_state"
|
register: "heketi_service_state"
|
||||||
command: "{{bin_dir}}/kubectl get service heketi-storage-endpoints -o=name --ignore-not-found=true"
|
command: "{{ bin_dir }}/kubectl get service heketi-storage-endpoints -o=name --ignore-not-found=true"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
||||||
- name: "Kubernetes Apps | Bootstrap Heketi"
|
- name: "Kubernetes Apps | Bootstrap Heketi"
|
||||||
|
|
|
@ -1,19 +1,19 @@
|
||||||
---
|
---
|
||||||
- register: "clusterrolebinding_state"
|
- register: "clusterrolebinding_state"
|
||||||
command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
- name: "Kubernetes Apps | Deploy cluster role binding."
|
- name: "Kubernetes Apps | Deploy cluster role binding."
|
||||||
when: "clusterrolebinding_state.stdout == \"\""
|
when: "clusterrolebinding_state.stdout == \"\""
|
||||||
command: "{{bin_dir}}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
|
command: "{{ bin_dir }}/kubectl create clusterrolebinding heketi-gluster-admin --clusterrole=edit --serviceaccount=default:heketi-service-account"
|
||||||
- register: "clusterrolebinding_state"
|
- register: "clusterrolebinding_state"
|
||||||
command: "{{bin_dir}}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
command: "{{ bin_dir }}/kubectl get clusterrolebinding heketi-gluster-admin -o=name --ignore-not-found=true"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
- assert:
|
- assert:
|
||||||
that: "clusterrolebinding_state.stdout != \"\""
|
that: "clusterrolebinding_state.stdout != \"\""
|
||||||
msg: "Cluster role binding is not present."
|
msg: "Cluster role binding is not present."
|
||||||
|
|
||||||
- register: "secret_state"
|
- register: "secret_state"
|
||||||
command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
- name: "Render Heketi secret configuration."
|
- name: "Render Heketi secret configuration."
|
||||||
become: true
|
become: true
|
||||||
|
@ -22,9 +22,9 @@
|
||||||
dest: "{{ kube_config_dir }}/heketi.json"
|
dest: "{{ kube_config_dir }}/heketi.json"
|
||||||
- name: "Deploy Heketi config secret"
|
- name: "Deploy Heketi config secret"
|
||||||
when: "secret_state.stdout == \"\""
|
when: "secret_state.stdout == \"\""
|
||||||
command: "{{bin_dir}}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
|
command: "{{ bin_dir }}/kubectl create secret generic heketi-config-secret --from-file={{ kube_config_dir }}/heketi.json"
|
||||||
- register: "secret_state"
|
- register: "secret_state"
|
||||||
command: "{{bin_dir}}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
command: "{{ bin_dir }}/kubectl get secret heketi-config-secret -o=name --ignore-not-found=true"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
- assert:
|
- assert:
|
||||||
that: "secret_state.stdout != \"\""
|
that: "secret_state.stdout != \"\""
|
||||||
|
|
|
@ -7,6 +7,6 @@
|
||||||
- name: "Kubernetes Apps | Install and configure Heketi Storage"
|
- name: "Kubernetes Apps | Install and configure Heketi Storage"
|
||||||
kube:
|
kube:
|
||||||
name: "GlusterFS"
|
name: "GlusterFS"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/heketi-storage.json"
|
filename: "{{ kube_config_dir }}/heketi-storage.json"
|
||||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
|
|
|
@ -20,6 +20,6 @@
|
||||||
- name: "Kubernetes Apps | Install and configure Storace Class"
|
- name: "Kubernetes Apps | Install and configure Storace Class"
|
||||||
kube:
|
kube:
|
||||||
name: "GlusterFS"
|
name: "GlusterFS"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/storageclass.yml"
|
filename: "{{ kube_config_dir }}/storageclass.yml"
|
||||||
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
state: "{{ rendering.changed | ternary('latest', 'present') }}"
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
until: vault_etcd_health_check.status == 200 or vault_etcd_health_check.status == 401
|
until: vault_etcd_health_check.status == 200 or vault_etcd_health_check.status == 401
|
||||||
retries: 3
|
retries: 3
|
||||||
delay: 2
|
delay: 2
|
||||||
delegate_to: "{{groups['etcd'][0]}}"
|
delegate_to: "{{ groups['etcd'][0] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
failed_when: false
|
failed_when: false
|
||||||
register: vault_etcd_health_check
|
register: vault_etcd_health_check
|
||||||
|
|
20
mitogen.yaml
20
mitogen.yaml
|
@ -3,29 +3,29 @@
|
||||||
strategy: linear
|
strategy: linear
|
||||||
vars:
|
vars:
|
||||||
mitogen_version: master
|
mitogen_version: master
|
||||||
mitogen_url: https://github.com/dw/mitogen/archive/{{mitogen_version}}.zip
|
mitogen_url: https://github.com/dw/mitogen/archive/{{ mitogen_version }}.zip
|
||||||
tasks:
|
tasks:
|
||||||
- name: Create mitogen plugin dir
|
- name: Create mitogen plugin dir
|
||||||
file:
|
file:
|
||||||
path: "{{item}}"
|
path: "{{ item }}"
|
||||||
state: directory
|
state: directory
|
||||||
become: false
|
become: false
|
||||||
loop:
|
loop:
|
||||||
- "{{playbook_dir}}/plugins/mitogen"
|
- "{{ playbook_dir }}/plugins/mitogen"
|
||||||
- "{{playbook_dir}}/dist"
|
- "{{ playbook_dir }}/dist"
|
||||||
|
|
||||||
- name: download mitogen release
|
- name: download mitogen release
|
||||||
get_url:
|
get_url:
|
||||||
url: "{{mitogen_url}}"
|
url: "{{ mitogen_url }}"
|
||||||
dest: "{{playbook_dir}}/dist/mitogen_{{mitogen_version}}.zip"
|
dest: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.zip"
|
||||||
validate_certs: true
|
validate_certs: true
|
||||||
|
|
||||||
- name: extract zip
|
- name: extract zip
|
||||||
unarchive:
|
unarchive:
|
||||||
src: "{{playbook_dir}}/dist/mitogen_{{mitogen_version}}.zip"
|
src: "{{ playbook_dir }}/dist/mitogen_{{ mitogen_version }}.zip"
|
||||||
dest: "{{playbook_dir}}/dist/"
|
dest: "{{ playbook_dir }}/dist/"
|
||||||
|
|
||||||
- name: copy plugin
|
- name: copy plugin
|
||||||
synchronize:
|
synchronize:
|
||||||
src: "{{playbook_dir}}/dist/mitogen-{{mitogen_version}}/"
|
src: "{{ playbook_dir }}/dist/mitogen-{{ mitogen_version }}/"
|
||||||
dest: "{{playbook_dir}}/plugins/mitogen"
|
dest: "{{ playbook_dir }}/plugins/mitogen"
|
||||||
|
|
|
@ -1,15 +1,15 @@
|
||||||
---
|
---
|
||||||
- name: User | Create User Group
|
- name: User | Create User Group
|
||||||
group:
|
group:
|
||||||
name: "{{user.group|default(user.name)}}"
|
name: "{{ user.group|default(user.name) }}"
|
||||||
system: "{{user.system|default(omit)}}"
|
system: "{{ user.system|default(omit) }}"
|
||||||
|
|
||||||
- name: User | Create User
|
- name: User | Create User
|
||||||
user:
|
user:
|
||||||
comment: "{{user.comment|default(omit)}}"
|
comment: "{{ user.comment|default(omit) }}"
|
||||||
createhome: "{{user.createhome|default(omit)}}"
|
createhome: "{{ user.createhome|default(omit) }}"
|
||||||
group: "{{user.group|default(user.name)}}"
|
group: "{{ user.group|default(user.name) }}"
|
||||||
home: "{{user.home|default(omit)}}"
|
home: "{{ user.home|default(omit) }}"
|
||||||
shell: "{{user.shell|default(omit)}}"
|
shell: "{{ user.shell|default(omit) }}"
|
||||||
name: "{{user.name}}"
|
name: "{{ user.name }}"
|
||||||
system: "{{user.system|default(omit)}}"
|
system: "{{ user.system|default(omit) }}"
|
||||||
|
|
|
@ -54,8 +54,8 @@
|
||||||
- name: ensure docker-ce repository public key is installed
|
- name: ensure docker-ce repository public key is installed
|
||||||
action: "{{ docker_repo_key_info.pkg_key }}"
|
action: "{{ docker_repo_key_info.pkg_key }}"
|
||||||
args:
|
args:
|
||||||
id: "{{item}}"
|
id: "{{ item }}"
|
||||||
url: "{{docker_repo_key_info.url}}"
|
url: "{{ docker_repo_key_info.url }}"
|
||||||
state: present
|
state: present
|
||||||
register: keyserver_task_result
|
register: keyserver_task_result
|
||||||
until: keyserver_task_result is succeeded
|
until: keyserver_task_result is succeeded
|
||||||
|
@ -67,7 +67,7 @@
|
||||||
- name: ensure docker-ce repository is enabled
|
- name: ensure docker-ce repository is enabled
|
||||||
action: "{{ docker_repo_info.pkg_repo }}"
|
action: "{{ docker_repo_info.pkg_repo }}"
|
||||||
args:
|
args:
|
||||||
repo: "{{item}}"
|
repo: "{{ item }}"
|
||||||
state: present
|
state: present
|
||||||
with_items: "{{ docker_repo_info.repos }}"
|
with_items: "{{ docker_repo_info.repos }}"
|
||||||
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse", "ClearLinux"] or is_atomic) and (docker_repo_info.repos|length > 0)
|
when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS", "RedHat", "Suse", "ClearLinux"] or is_atomic) and (docker_repo_info.repos|length > 0)
|
||||||
|
@ -75,8 +75,8 @@
|
||||||
- name: ensure docker-engine repository public key is installed
|
- name: ensure docker-engine repository public key is installed
|
||||||
action: "{{ dockerproject_repo_key_info.pkg_key }}"
|
action: "{{ dockerproject_repo_key_info.pkg_key }}"
|
||||||
args:
|
args:
|
||||||
id: "{{item}}"
|
id: "{{ item }}"
|
||||||
url: "{{dockerproject_repo_key_info.url}}"
|
url: "{{ dockerproject_repo_key_info.url }}"
|
||||||
state: present
|
state: present
|
||||||
register: keyserver_task_result
|
register: keyserver_task_result
|
||||||
until: keyserver_task_result is succeeded
|
until: keyserver_task_result is succeeded
|
||||||
|
@ -90,7 +90,7 @@
|
||||||
- name: ensure docker-engine repository is enabled
|
- name: ensure docker-engine repository is enabled
|
||||||
action: "{{ dockerproject_repo_info.pkg_repo }}"
|
action: "{{ dockerproject_repo_info.pkg_repo }}"
|
||||||
args:
|
args:
|
||||||
repo: "{{item}}"
|
repo: "{{ item }}"
|
||||||
state: present
|
state: present
|
||||||
with_items: "{{ dockerproject_repo_info.repos }}"
|
with_items: "{{ dockerproject_repo_info.repos }}"
|
||||||
when:
|
when:
|
||||||
|
@ -123,7 +123,7 @@
|
||||||
baseurl: "{{ extras_rh_repo_base_url }}"
|
baseurl: "{{ extras_rh_repo_base_url }}"
|
||||||
file: "extras"
|
file: "extras"
|
||||||
gpgcheck: yes
|
gpgcheck: yes
|
||||||
gpgkey: "{{extras_rh_repo_gpgkey}}"
|
gpgkey: "{{ extras_rh_repo_gpgkey }}"
|
||||||
keepcache: "{{ docker_rpm_keepcache | default('1') }}"
|
keepcache: "{{ docker_rpm_keepcache | default('1') }}"
|
||||||
proxy: " {{ http_proxy | default('_none_') }}"
|
proxy: " {{ http_proxy | default('_none_') }}"
|
||||||
when:
|
when:
|
||||||
|
@ -148,10 +148,10 @@
|
||||||
- name: ensure docker packages are installed
|
- name: ensure docker packages are installed
|
||||||
action: "{{ docker_package_info.pkg_mgr }}"
|
action: "{{ docker_package_info.pkg_mgr }}"
|
||||||
args:
|
args:
|
||||||
pkg: "{{item.name}}"
|
pkg: "{{ item.name }}"
|
||||||
force: "{{item.force|default(omit)}}"
|
force: "{{ item.force|default(omit) }}"
|
||||||
conf_file: "{{item.yum_conf|default(omit)}}"
|
conf_file: "{{ item.yum_conf|default(omit) }}"
|
||||||
state: "{{item.state | default('present')}}"
|
state: "{{ item.state | default('present') }}"
|
||||||
update_cache: "{{ omit if ansible_distribution == 'Fedora' else True }}"
|
update_cache: "{{ omit if ansible_distribution == 'Fedora' else True }}"
|
||||||
register: docker_task_result
|
register: docker_task_result
|
||||||
until: docker_task_result is succeeded
|
until: docker_task_result is succeeded
|
||||||
|
@ -166,7 +166,7 @@
|
||||||
action: "{{ docker_package_info.pkg_mgr }}"
|
action: "{{ docker_package_info.pkg_mgr }}"
|
||||||
args:
|
args:
|
||||||
name: "{{ item.name }}"
|
name: "{{ item.name }}"
|
||||||
state: "{{item.state | default('present')}}"
|
state: "{{ item.state | default('present') }}"
|
||||||
with_items: "{{ docker_package_info.pkgs }}"
|
with_items: "{{ docker_package_info.pkgs }}"
|
||||||
register: docker_task_result
|
register: docker_task_result
|
||||||
until: docker_task_result is succeeded
|
until: docker_task_result is succeeded
|
||||||
|
@ -185,7 +185,7 @@
|
||||||
|
|
||||||
- name: show available packages on ubuntu
|
- name: show available packages on ubuntu
|
||||||
fail:
|
fail:
|
||||||
msg: "{{available_packages}}"
|
msg: "{{ available_packages }}"
|
||||||
when:
|
when:
|
||||||
- docker_task_result is failed
|
- docker_task_result is failed
|
||||||
- ansible_distribution == 'Ubuntu'
|
- ansible_distribution == 'Ubuntu'
|
||||||
|
|
|
@ -2,11 +2,11 @@
|
||||||
|
|
||||||
- name: set dns server for docker
|
- name: set dns server for docker
|
||||||
set_fact:
|
set_fact:
|
||||||
docker_dns_servers: "{{dns_servers}}"
|
docker_dns_servers: "{{ dns_servers }}"
|
||||||
|
|
||||||
- name: show docker_dns_servers
|
- name: show docker_dns_servers
|
||||||
debug:
|
debug:
|
||||||
msg: "{{docker_dns_servers}}"
|
msg: "{{ docker_dns_servers }}"
|
||||||
|
|
||||||
- name: set base docker dns facts
|
- name: set base docker dns facts
|
||||||
set_fact:
|
set_fact:
|
||||||
|
|
|
@ -29,7 +29,7 @@ download_always_pull: False
|
||||||
download_validate_certs: True
|
download_validate_certs: True
|
||||||
|
|
||||||
# Use the first kube-master if download_localhost is not set
|
# Use the first kube-master if download_localhost is not set
|
||||||
download_delegate: "{% if download_localhost %}localhost{% else %}{{groups['kube-master'][0]}}{% endif %}"
|
download_delegate: "{% if download_localhost %}localhost{% else %}{{ groups['kube-master'][0] }}{% endif %}"
|
||||||
|
|
||||||
# Arch of Docker images and needed packages
|
# Arch of Docker images and needed packages
|
||||||
image_arch: "{{host_architecture | default('amd64')}}"
|
image_arch: "{{host_architecture | default('amd64')}}"
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
- name: file_download | Create dest directory
|
- name: file_download | Create dest directory
|
||||||
file:
|
file:
|
||||||
path: "{{download.dest|dirname}}"
|
path: "{{ download.dest | dirname }}"
|
||||||
state: directory
|
state: directory
|
||||||
recurse: yes
|
recurse: yes
|
||||||
when:
|
when:
|
||||||
|
@ -20,9 +20,9 @@
|
||||||
# to one task in the future.
|
# to one task in the future.
|
||||||
- name: file_download | Download item (delegate)
|
- name: file_download | Download item (delegate)
|
||||||
get_url:
|
get_url:
|
||||||
url: "{{download.url}}"
|
url: "{{ download.url }}"
|
||||||
dest: "{{download.dest}}"
|
dest: "{{ download.dest }}"
|
||||||
sha256sum: "{{download.sha256 | default(omit)}}"
|
sha256sum: "{{ download.sha256|default(omit) }}"
|
||||||
owner: "{{ download.owner|default(omit) }}"
|
owner: "{{ download.owner|default(omit) }}"
|
||||||
mode: "{{ download.mode|default(omit) }}"
|
mode: "{{ download.mode|default(omit) }}"
|
||||||
validate_certs: "{{ download_validate_certs }}"
|
validate_certs: "{{ download_validate_certs }}"
|
||||||
|
@ -43,9 +43,9 @@
|
||||||
|
|
||||||
- name: file_download | Download item (all)
|
- name: file_download | Download item (all)
|
||||||
get_url:
|
get_url:
|
||||||
url: "{{download.url}}"
|
url: "{{ download.url }}"
|
||||||
dest: "{{download.dest}}"
|
dest: "{{ download.dest }}"
|
||||||
sha256sum: "{{download.sha256 | default(omit)}}"
|
sha256sum: "{{ download.sha256|default(omit) }}"
|
||||||
owner: "{{ download.owner|default(omit) }}"
|
owner: "{{ download.owner|default(omit) }}"
|
||||||
mode: "{{ download.mode|default(omit) }}"
|
mode: "{{ download.mode|default(omit) }}"
|
||||||
validate_certs: "{{ download_validate_certs }}"
|
validate_certs: "{{ download_validate_certs }}"
|
||||||
|
@ -64,8 +64,8 @@
|
||||||
|
|
||||||
- name: file_download | Extract archives
|
- name: file_download | Extract archives
|
||||||
unarchive:
|
unarchive:
|
||||||
src: "{{download.dest}}"
|
src: "{{ download.dest }}"
|
||||||
dest: "{{download.dest|dirname}}"
|
dest: "{{ download.dest |dirname }}"
|
||||||
owner: "{{ download.owner|default(omit) }}"
|
owner: "{{ download.owner|default(omit) }}"
|
||||||
mode: "{{ download.mode|default(omit) }}"
|
mode: "{{ download.mode|default(omit) }}"
|
||||||
copy: no
|
copy: no
|
||||||
|
|
|
@ -11,16 +11,16 @@
|
||||||
|
|
||||||
- name: container_download | Create dest directory for saved/loaded container images
|
- name: container_download | Create dest directory for saved/loaded container images
|
||||||
file:
|
file:
|
||||||
path: "{{local_release_dir}}/containers"
|
path: "{{ local_release_dir }}/containers"
|
||||||
state: directory
|
state: directory
|
||||||
recurse: yes
|
recurse: yes
|
||||||
mode: 0755
|
mode: 0755
|
||||||
owner: "{{ansible_ssh_user|default(ansible_user_id)}}"
|
owner: "{{ ansible_ssh_user|default(ansible_user_id) }}"
|
||||||
when: download_container
|
when: download_container
|
||||||
|
|
||||||
- name: container_download | create local directory for saved/loaded container images
|
- name: container_download | create local directory for saved/loaded container images
|
||||||
file:
|
file:
|
||||||
path: "{{local_release_dir}}/containers"
|
path: "{{ local_release_dir }}/containers"
|
||||||
state: directory
|
state: directory
|
||||||
recurse: yes
|
recurse: yes
|
||||||
delegate_to: localhost
|
delegate_to: localhost
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
- set_fact:
|
- set_fact:
|
||||||
pull_args: >-
|
pull_args: >-
|
||||||
{%- if pull_by_digest %}{{download.repo}}@sha256:{{download.sha256}}{%- else -%}{{download.repo}}:{{download.tag}}{%- endif -%}
|
{%- if pull_by_digest %}{{ download.repo }}@sha256:{{ download.sha256 }}{%- else -%}{{ download.repo }}:{{ download.tag }}{%- endif -%}
|
||||||
|
|
||||||
- name: Register docker images info
|
- name: Register docker images info
|
||||||
shell: >-
|
shell: >-
|
||||||
|
@ -33,7 +33,7 @@
|
||||||
|
|
||||||
- name: Check the local digest sha256 corresponds to the given image tag
|
- name: Check the local digest sha256 corresponds to the given image tag
|
||||||
assert:
|
assert:
|
||||||
that: "{{download.repo}}:{{download.tag}} in docker_images.stdout.split(',')"
|
that: "{{ download.repo }}:{{ download.tag }} in docker_images.stdout.split(',')"
|
||||||
when:
|
when:
|
||||||
- not download_always_pull
|
- not download_always_pull
|
||||||
- not pull_required
|
- not pull_required
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
- facts
|
- facts
|
||||||
|
|
||||||
- set_fact:
|
- set_fact:
|
||||||
fname: "{{local_release_dir}}/containers/{{download.repo|regex_replace('/|\0|:', '_')}}:{{download.tag|default(download.sha256)|regex_replace('/|\0|:', '_')}}.tar"
|
fname: "{{ local_release_dir }}/containers/{{ download.repo|regex_replace('/|\0|:', '_') }}:{{ download.tag|default(download.sha256)|regex_replace('/|\0|:', '_') }}.tar"
|
||||||
run_once: true
|
run_once: true
|
||||||
when:
|
when:
|
||||||
- download.enabled
|
- download.enabled
|
||||||
|
@ -20,7 +20,7 @@
|
||||||
|
|
||||||
- name: "container_download | Set default value for 'container_changed' to false"
|
- name: "container_download | Set default value for 'container_changed' to false"
|
||||||
set_fact:
|
set_fact:
|
||||||
container_changed: "{{pull_required|default(false)}}"
|
container_changed: "{{ pull_required|default(false) }}"
|
||||||
when:
|
when:
|
||||||
- download.enabled
|
- download.enabled
|
||||||
- download.container
|
- download.container
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
- name: file_download | create local download destination directory
|
- name: file_download | create local download destination directory
|
||||||
file:
|
file:
|
||||||
path: "{{download.dest|dirname}}"
|
path: "{{ download.dest|dirname }}"
|
||||||
state: directory
|
state: directory
|
||||||
recurse: yes
|
recurse: yes
|
||||||
mode: 0755
|
mode: 0755
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
paths: "{{ etcd_cert_dir }}"
|
paths: "{{ etcd_cert_dir }}"
|
||||||
patterns: "ca.pem,node*.pem"
|
patterns: "ca.pem,node*.pem"
|
||||||
get_checksum: true
|
get_checksum: true
|
||||||
delegate_to: "{{groups['etcd'][0]}}"
|
delegate_to: "{{ groups['etcd'][0] }}"
|
||||||
register: etcdcert_master
|
register: etcdcert_master
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
|
@ -30,10 +30,10 @@
|
||||||
with_items: "{{ expected_files }}"
|
with_items: "{{ expected_files }}"
|
||||||
vars:
|
vars:
|
||||||
expected_files: >-
|
expected_files: >-
|
||||||
['{{etcd_cert_dir}}/ca.pem',
|
['{{ etcd_cert_dir }}/ca.pem',
|
||||||
{% set all_etcd_hosts = groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique|sort %}
|
{% set all_etcd_hosts = groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique|sort %}
|
||||||
{% for host in all_etcd_hosts %}
|
{% for host in all_etcd_hosts %}
|
||||||
'{{etcd_cert_dir}}/node-{{ host }}-key.pem'
|
'{{ etcd_cert_dir }}/node-{{ host }}-key.pem'
|
||||||
{% if not loop.last %}{{','}}{% endif %}
|
{% if not loop.last %}{{','}}{% endif %}
|
||||||
{% endfor %}]
|
{% endfor %}]
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
mode: 0700
|
mode: 0700
|
||||||
recurse: yes
|
recurse: yes
|
||||||
|
|
||||||
- name: "Gen_certs | create etcd script dir (on {{groups['etcd'][0]}})"
|
- name: "Gen_certs | create etcd script dir (on {{ groups['etcd'][0] }})"
|
||||||
file:
|
file:
|
||||||
path: "{{ etcd_script_dir }}"
|
path: "{{ etcd_script_dir }}"
|
||||||
state: directory
|
state: directory
|
||||||
|
@ -16,9 +16,9 @@
|
||||||
mode: 0700
|
mode: 0700
|
||||||
run_once: yes
|
run_once: yes
|
||||||
when: inventory_hostname == groups['etcd'][0]
|
when: inventory_hostname == groups['etcd'][0]
|
||||||
delegate_to: "{{groups['etcd'][0]}}"
|
delegate_to: "{{ groups['etcd'][0] }}"
|
||||||
|
|
||||||
- name: "Gen_certs | create etcd cert dir (on {{groups['etcd'][0]}})"
|
- name: "Gen_certs | create etcd cert dir (on {{ groups['etcd'][0] }})"
|
||||||
file:
|
file:
|
||||||
path: "{{ etcd_cert_dir }}"
|
path: "{{ etcd_cert_dir }}"
|
||||||
group: "{{ etcd_cert_group }}"
|
group: "{{ etcd_cert_group }}"
|
||||||
|
@ -28,14 +28,14 @@
|
||||||
mode: 0700
|
mode: 0700
|
||||||
run_once: yes
|
run_once: yes
|
||||||
when: inventory_hostname == groups['etcd'][0]
|
when: inventory_hostname == groups['etcd'][0]
|
||||||
delegate_to: "{{groups['etcd'][0]}}"
|
delegate_to: "{{ groups['etcd'][0] }}"
|
||||||
|
|
||||||
- name: Gen_certs | write openssl config
|
- name: Gen_certs | write openssl config
|
||||||
template:
|
template:
|
||||||
src: "openssl.conf.j2"
|
src: "openssl.conf.j2"
|
||||||
dest: "{{ etcd_config_dir }}/openssl.conf"
|
dest: "{{ etcd_config_dir }}/openssl.conf"
|
||||||
run_once: yes
|
run_once: yes
|
||||||
delegate_to: "{{groups['etcd'][0]}}"
|
delegate_to: "{{ groups['etcd'][0] }}"
|
||||||
when:
|
when:
|
||||||
- gen_certs|default(false)
|
- gen_certs|default(false)
|
||||||
- inventory_hostname == groups['etcd'][0]
|
- inventory_hostname == groups['etcd'][0]
|
||||||
|
@ -46,7 +46,7 @@
|
||||||
dest: "{{ etcd_script_dir }}/make-ssl-etcd.sh"
|
dest: "{{ etcd_script_dir }}/make-ssl-etcd.sh"
|
||||||
mode: 0700
|
mode: 0700
|
||||||
run_once: yes
|
run_once: yes
|
||||||
delegate_to: "{{groups['etcd'][0]}}"
|
delegate_to: "{{ groups['etcd'][0] }}"
|
||||||
when:
|
when:
|
||||||
- gen_certs|default(false)
|
- gen_certs|default(false)
|
||||||
- inventory_hostname == groups['etcd'][0]
|
- inventory_hostname == groups['etcd'][0]
|
||||||
|
@ -65,7 +65,7 @@
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endfor %}"
|
{% endfor %}"
|
||||||
run_once: yes
|
run_once: yes
|
||||||
delegate_to: "{{groups['etcd'][0]}}"
|
delegate_to: "{{ groups['etcd'][0] }}"
|
||||||
when:
|
when:
|
||||||
- gen_certs|default(false)
|
- gen_certs|default(false)
|
||||||
notify: set etcd_secret_changed
|
notify: set etcd_secret_changed
|
||||||
|
@ -87,7 +87,7 @@
|
||||||
'{{ etcd_cert_dir }}/node-{{ node }}.pem',
|
'{{ etcd_cert_dir }}/node-{{ node }}.pem',
|
||||||
'{{ etcd_cert_dir }}/node-{{ node }}-key.pem',
|
'{{ etcd_cert_dir }}/node-{{ node }}-key.pem',
|
||||||
{% endfor %}]"
|
{% endfor %}]"
|
||||||
delegate_to: "{{groups['etcd'][0]}}"
|
delegate_to: "{{ groups['etcd'][0] }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname in groups['etcd']
|
- inventory_hostname in groups['etcd']
|
||||||
- sync_certs|default(false)
|
- sync_certs|default(false)
|
||||||
|
@ -133,13 +133,13 @@
|
||||||
no_log: true
|
no_log: true
|
||||||
register: etcd_node_certs
|
register: etcd_node_certs
|
||||||
check_mode: no
|
check_mode: no
|
||||||
delegate_to: "{{groups['etcd'][0]}}"
|
delegate_to: "{{ groups['etcd'][0] }}"
|
||||||
when: (('calico-rr' in groups and inventory_hostname in groups['calico-rr']) or
|
when: (('calico-rr' in groups and inventory_hostname in groups['calico-rr']) or
|
||||||
inventory_hostname in groups['k8s-cluster']) and
|
inventory_hostname in groups['k8s-cluster']) and
|
||||||
sync_certs|default(false) and inventory_hostname not in groups['etcd']
|
sync_certs|default(false) and inventory_hostname not in groups['etcd']
|
||||||
|
|
||||||
- name: Gen_certs | Copy certs on nodes
|
- name: Gen_certs | Copy certs on nodes
|
||||||
shell: "base64 -d <<< '{{etcd_node_certs.stdout|quote}}' | tar xz -C {{ etcd_cert_dir }}"
|
shell: "base64 -d <<< '{{ etcd_node_certs.stdout|quote }}' | tar xz -C {{ etcd_cert_dir }}"
|
||||||
args:
|
args:
|
||||||
executable: /bin/bash
|
executable: /bin/bash
|
||||||
no_log: true
|
no_log: true
|
||||||
|
|
|
@ -8,9 +8,9 @@
|
||||||
set_fact:
|
set_fact:
|
||||||
host_architecture: >-
|
host_architecture: >-
|
||||||
{%- if ansible_architecture in architecture_groups -%}
|
{%- if ansible_architecture in architecture_groups -%}
|
||||||
{{architecture_groups[ansible_architecture]}}
|
{{ architecture_groups[ansible_architecture] }}
|
||||||
{%- else -%}
|
{%- else -%}
|
||||||
{{ansible_architecture}}
|
{{ ansible_architecture }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
- include_tasks: check_certs.yml
|
- include_tasks: check_certs.yml
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
name: "netchecker-server"
|
name: "netchecker-server"
|
||||||
namespace: "{{ netcheck_namespace }}"
|
namespace: "{{ netcheck_namespace }}"
|
||||||
filename: "{{ netchecker_server_manifest.stat.path }}"
|
filename: "{{ netchecker_server_manifest.stat.path }}"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "deploy"
|
resource: "deploy"
|
||||||
state: latest
|
state: latest
|
||||||
when: inventory_hostname == groups['kube-master'][0] and netchecker_server_manifest.stat.exists
|
when: inventory_hostname == groups['kube-master'][0] and netchecker_server_manifest.stat.exists
|
||||||
|
@ -39,13 +39,13 @@
|
||||||
|
|
||||||
- name: Kubernetes Apps | Append extra templates to Netchecker Templates list for PodSecurityPolicy
|
- name: Kubernetes Apps | Append extra templates to Netchecker Templates list for PodSecurityPolicy
|
||||||
set_fact:
|
set_fact:
|
||||||
netchecker_templates: "{{ netchecker_templates_for_psp + netchecker_templates}}"
|
netchecker_templates: "{{ netchecker_templates_for_psp + netchecker_templates }}"
|
||||||
when: podsecuritypolicy_enabled
|
when: podsecuritypolicy_enabled
|
||||||
|
|
||||||
- name: Kubernetes Apps | Lay Down Netchecker Template
|
- name: Kubernetes Apps | Lay Down Netchecker Template
|
||||||
template:
|
template:
|
||||||
src: "{{item.file}}.j2"
|
src: "{{ item.file }}.j2"
|
||||||
dest: "{{kube_config_dir}}/{{item.file}}"
|
dest: "{{ kube_config_dir }}/{{ item.file }}"
|
||||||
with_items: "{{ netchecker_templates }}"
|
with_items: "{{ netchecker_templates }}"
|
||||||
register: manifests
|
register: manifests
|
||||||
when:
|
when:
|
||||||
|
@ -53,11 +53,11 @@
|
||||||
|
|
||||||
- name: Kubernetes Apps | Start Netchecker Resources
|
- name: Kubernetes Apps | Start Netchecker Resources
|
||||||
kube:
|
kube:
|
||||||
name: "{{item.item.name}}"
|
name: "{{ item.item.name }}"
|
||||||
namespace: "{{netcheck_namespace}}"
|
namespace: "{{ netcheck_namespace }}"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{ item.item.type }}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ manifests.results }}"
|
with_items: "{{ manifests.results }}"
|
||||||
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
|
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
|
||||||
|
|
|
@ -41,10 +41,10 @@
|
||||||
|
|
||||||
- name: Kubernetes Apps | Add policies, roles, bindings for PodSecurityPolicy
|
- name: Kubernetes Apps | Add policies, roles, bindings for PodSecurityPolicy
|
||||||
kube:
|
kube:
|
||||||
name: "{{item.item.name}}"
|
name: "{{ item.item.name }}"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{ item.item.type }}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
register: result
|
register: result
|
||||||
until: result is succeeded
|
until: result is succeeded
|
||||||
|
@ -69,7 +69,7 @@
|
||||||
- name: Apply workaround to allow all nodes with cert O=system:nodes to register
|
- name: Apply workaround to allow all nodes with cert O=system:nodes to register
|
||||||
kube:
|
kube:
|
||||||
name: "kubespray:system:node"
|
name: "kubespray:system:node"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "clusterrolebinding"
|
resource: "clusterrolebinding"
|
||||||
filename: "{{ kube_config_dir }}/node-crb.yml"
|
filename: "{{ kube_config_dir }}/node-crb.yml"
|
||||||
state: latest
|
state: latest
|
||||||
|
@ -96,7 +96,7 @@
|
||||||
- name: Apply webhook ClusterRole
|
- name: Apply webhook ClusterRole
|
||||||
kube:
|
kube:
|
||||||
name: "system:node-webhook"
|
name: "system:node-webhook"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "clusterrole"
|
resource: "clusterrole"
|
||||||
filename: "{{ kube_config_dir }}/node-webhook-cr.yml"
|
filename: "{{ kube_config_dir }}/node-webhook-cr.yml"
|
||||||
state: latest
|
state: latest
|
||||||
|
@ -121,7 +121,7 @@
|
||||||
- name: Grant system:nodes the webhook ClusterRole
|
- name: Grant system:nodes the webhook ClusterRole
|
||||||
kube:
|
kube:
|
||||||
name: "system:node-webhook"
|
name: "system:node-webhook"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "clusterrolebinding"
|
resource: "clusterrolebinding"
|
||||||
filename: "{{ kube_config_dir }}/node-webhook-crb.yml"
|
filename: "{{ kube_config_dir }}/node-webhook-crb.yml"
|
||||||
state: latest
|
state: latest
|
||||||
|
@ -164,7 +164,7 @@
|
||||||
- name: Apply vsphere-cloud-provider ClusterRole
|
- name: Apply vsphere-cloud-provider ClusterRole
|
||||||
kube:
|
kube:
|
||||||
name: "system:vsphere-cloud-provider"
|
name: "system:vsphere-cloud-provider"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "clusterrolebinding"
|
resource: "clusterrolebinding"
|
||||||
filename: "{{ kube_config_dir }}/vsphere-rbac.yml"
|
filename: "{{ kube_config_dir }}/vsphere-rbac.yml"
|
||||||
state: latest
|
state: latest
|
||||||
|
@ -194,7 +194,7 @@
|
||||||
- name: PriorityClass | Create k8s-cluster-critical
|
- name: PriorityClass | Create k8s-cluster-critical
|
||||||
kube:
|
kube:
|
||||||
name: k8s-cluster-critical
|
name: k8s-cluster-critical
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "PriorityClass"
|
resource: "PriorityClass"
|
||||||
filename: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml"
|
filename: "{{ kube_config_dir }}/k8s-cluster-critical-pc.yml"
|
||||||
state: latest
|
state: latest
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
|
|
||||||
- name: Apply OCI RBAC
|
- name: Apply OCI RBAC
|
||||||
kube:
|
kube:
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
filename: "{{ kube_config_dir }}/oci-rbac.yml"
|
filename: "{{ kube_config_dir }}/oci-rbac.yml"
|
||||||
when:
|
when:
|
||||||
- cloud_provider is defined
|
- cloud_provider is defined
|
||||||
|
|
|
@ -13,12 +13,12 @@
|
||||||
|
|
||||||
- name: Container Engine Acceleration Nvidia GPU | Set fact of download url Tesla
|
- name: Container Engine Acceleration Nvidia GPU | Set fact of download url Tesla
|
||||||
set_fact:
|
set_fact:
|
||||||
nvidia_driver_download_url_default: "{{nvidia_gpu_tesla_base_url}}{{nvidia_url_end}}"
|
nvidia_driver_download_url_default: "{{ nvidia_gpu_tesla_base_url }}{{ nvidia_url_end }}"
|
||||||
when: nvidia_gpu_flavor|lower == "tesla"
|
when: nvidia_gpu_flavor|lower == "tesla"
|
||||||
|
|
||||||
- name: Container Engine Acceleration Nvidia GPU | Set fact of download url GTX
|
- name: Container Engine Acceleration Nvidia GPU | Set fact of download url GTX
|
||||||
set_fact:
|
set_fact:
|
||||||
nvidia_driver_download_url_default: "{{nvidia_gpu_gtx_base_url}}{{nvidia_url_end}}"
|
nvidia_driver_download_url_default: "{{ nvidia_gpu_gtx_base_url }}{{ nvidia_url_end }}"
|
||||||
when: nvidia_gpu_flavor|lower == "gtx"
|
when: nvidia_gpu_flavor|lower == "gtx"
|
||||||
|
|
||||||
- name: Container Engine Acceleration Nvidia GPU | Create addon dir
|
- name: Container Engine Acceleration Nvidia GPU | Create addon dir
|
||||||
|
@ -49,6 +49,6 @@
|
||||||
filename: "{{ kube_config_dir }}/addons/container_engine_accelerator/{{ item.item.file }}"
|
filename: "{{ kube_config_dir }}/addons/container_engine_accelerator/{{ item.item.file }}"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{container_engine_accelerator_manifests.results}}"
|
- "{{ container_engine_accelerator_manifests.results }}"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0] and nvidia_driver_install_container and nvidia_driver_install_supported
|
- inventory_hostname == groups['kube-master'][0] and nvidia_driver_install_container and nvidia_driver_install_supported
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
delegate_to: "{{ item[0] }}"
|
delegate_to: "{{ item[0] }}"
|
||||||
with_nested:
|
with_nested:
|
||||||
- "{{ groups['k8s-cluster'] }}"
|
- "{{ groups['k8s-cluster'] }}"
|
||||||
- "{{ local_volume_provisioner_storage_classes.keys() | list}}"
|
- "{{ local_volume_provisioner_storage_classes.keys() | list }}"
|
||||||
|
|
||||||
- name: Local Volume Provisioner | Create addon dir
|
- name: Local Volume Provisioner | Create addon dir
|
||||||
file:
|
file:
|
||||||
|
|
|
@ -1,15 +1,15 @@
|
||||||
---
|
---
|
||||||
- name: "Gen_helm_tiller_certs | Create helm config directory (on {{groups['kube-master'][0]}})"
|
- name: "Gen_helm_tiller_certs | Create helm config directory (on {{ groups['kube-master'][0] }})"
|
||||||
run_once: yes
|
run_once: yes
|
||||||
delegate_to: "{{groups['kube-master'][0]}}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
file:
|
file:
|
||||||
path: "{{ helm_config_dir }}"
|
path: "{{ helm_config_dir }}"
|
||||||
state: directory
|
state: directory
|
||||||
owner: kube
|
owner: kube
|
||||||
|
|
||||||
- name: "Gen_helm_tiller_certs | Create helm script directory (on {{groups['kube-master'][0]}})"
|
- name: "Gen_helm_tiller_certs | Create helm script directory (on {{ groups['kube-master'][0] }})"
|
||||||
run_once: yes
|
run_once: yes
|
||||||
delegate_to: "{{groups['kube-master'][0]}}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
file:
|
file:
|
||||||
path: "{{ helm_script_dir }}"
|
path: "{{ helm_script_dir }}"
|
||||||
state: directory
|
state: directory
|
||||||
|
@ -17,24 +17,24 @@
|
||||||
|
|
||||||
- name: Gen_helm_tiller_certs | Copy certs generation script
|
- name: Gen_helm_tiller_certs | Copy certs generation script
|
||||||
run_once: yes
|
run_once: yes
|
||||||
delegate_to: "{{groups['kube-master'][0]}}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
template:
|
template:
|
||||||
src: "helm-make-ssl.sh.j2"
|
src: "helm-make-ssl.sh.j2"
|
||||||
dest: "{{ helm_script_dir }}/helm-make-ssl.sh"
|
dest: "{{ helm_script_dir }}/helm-make-ssl.sh"
|
||||||
mode: 0700
|
mode: 0700
|
||||||
|
|
||||||
- name: "Check_helm_certs | check if helm client certs have already been generated on first master (on {{groups['kube-master'][0]}})"
|
- name: "Check_helm_certs | check if helm client certs have already been generated on first master (on {{ groups['kube-master'][0] }})"
|
||||||
find:
|
find:
|
||||||
paths: "{{ helm_home_dir }}"
|
paths: "{{ helm_home_dir }}"
|
||||||
patterns: "*.pem"
|
patterns: "*.pem"
|
||||||
get_checksum: true
|
get_checksum: true
|
||||||
delegate_to: "{{groups['kube-master'][0]}}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
register: helmcert_master
|
register: helmcert_master
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- name: Gen_helm_tiller_certs | run cert generation script
|
- name: Gen_helm_tiller_certs | run cert generation script
|
||||||
run_once: yes
|
run_once: yes
|
||||||
delegate_to: "{{groups['kube-master'][0]}}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
command: "{{ helm_script_dir }}/helm-make-ssl.sh -e {{ helm_home_dir }} -d {{ helm_tiller_cert_dir }}"
|
command: "{{ helm_script_dir }}/helm-make-ssl.sh -e {{ helm_home_dir }} -d {{ helm_tiller_cert_dir }}"
|
||||||
|
|
||||||
- set_fact:
|
- set_fact:
|
||||||
|
@ -64,7 +64,7 @@
|
||||||
no_log: true
|
no_log: true
|
||||||
register: helm_client_cert_data
|
register: helm_client_cert_data
|
||||||
check_mode: no
|
check_mode: no
|
||||||
delegate_to: "{{groups['kube-master'][0]}}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
when: sync_helm_certs|default(false) and inventory_hostname != groups['kube-master'][0]
|
when: sync_helm_certs|default(false) and inventory_hostname != groups['kube-master'][0]
|
||||||
|
|
||||||
- name: Gen_helm_tiller_certs | Use tempfile for unpacking certs on masters
|
- name: Gen_helm_tiller_certs | Use tempfile for unpacking certs on masters
|
||||||
|
@ -78,8 +78,8 @@
|
||||||
|
|
||||||
- name: Gen_helm_tiller_certs | Write helm client certs to tempfile
|
- name: Gen_helm_tiller_certs | Write helm client certs to tempfile
|
||||||
copy:
|
copy:
|
||||||
content: "{{helm_client_cert_data.stdout}}"
|
content: "{{ helm_client_cert_data.stdout }}"
|
||||||
dest: "{{helm_cert_tempfile.path}}"
|
dest: "{{ helm_cert_tempfile.path }}"
|
||||||
owner: root
|
owner: root
|
||||||
mode: "0600"
|
mode: "0600"
|
||||||
when: sync_helm_certs|default(false) and inventory_hostname != groups['kube-master'][0]
|
when: sync_helm_certs|default(false) and inventory_hostname != groups['kube-master'][0]
|
||||||
|
@ -93,7 +93,7 @@
|
||||||
|
|
||||||
- name: Gen_helm_tiller_certs | Cleanup tempfile on masters
|
- name: Gen_helm_tiller_certs | Cleanup tempfile on masters
|
||||||
file:
|
file:
|
||||||
path: "{{helm_cert_tempfile.path}}"
|
path: "{{ helm_cert_tempfile.path }}"
|
||||||
state: absent
|
state: absent
|
||||||
when: sync_helm_certs|default(false) and inventory_hostname != groups['kube-master'][0]
|
when: sync_helm_certs|default(false) and inventory_hostname != groups['kube-master'][0]
|
||||||
|
|
||||||
|
|
|
@ -7,8 +7,8 @@
|
||||||
|
|
||||||
- name: Helm | Lay Down Helm Manifests (RBAC)
|
- name: Helm | Lay Down Helm Manifests (RBAC)
|
||||||
template:
|
template:
|
||||||
src: "{{item.file}}.j2"
|
src: "{{ item.file }}.j2"
|
||||||
dest: "{{kube_config_dir}}/{{item.file}}"
|
dest: "{{ kube_config_dir }}/{{ item.file }}"
|
||||||
with_items:
|
with_items:
|
||||||
- {name: tiller, file: tiller-namespace.yml, type: namespace}
|
- {name: tiller, file: tiller-namespace.yml, type: namespace}
|
||||||
- {name: tiller, file: tiller-sa.yml, type: sa}
|
- {name: tiller, file: tiller-sa.yml, type: sa}
|
||||||
|
@ -20,11 +20,11 @@
|
||||||
|
|
||||||
- name: Helm | Apply Helm Manifests (RBAC)
|
- name: Helm | Apply Helm Manifests (RBAC)
|
||||||
kube:
|
kube:
|
||||||
name: "{{item.item.name}}"
|
name: "{{ item.item.name }}"
|
||||||
namespace: "{{ tiller_namespace }}"
|
namespace: "{{ tiller_namespace }}"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{ item.item.type }}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ manifests.results }}"
|
with_items: "{{ manifests.results }}"
|
||||||
when:
|
when:
|
||||||
|
@ -56,7 +56,7 @@
|
||||||
{% endif %}
|
{% endif %}
|
||||||
register: install_helm
|
register: install_helm
|
||||||
changed_when: false
|
changed_when: false
|
||||||
environment: "{{proxy_env}}"
|
environment: "{{ proxy_env }}"
|
||||||
|
|
||||||
# FIXME: https://github.com/helm/helm/issues/4063
|
# FIXME: https://github.com/helm/helm/issues/4063
|
||||||
- name: Helm | Force apply tiller overrides if necessary
|
- name: Helm | Force apply tiller overrides if necessary
|
||||||
|
@ -73,12 +73,12 @@
|
||||||
{% if tiller_secure_release_info %} --override 'spec.template.spec.containers[0].command'='{/tiller,--storage=secret}' {% endif %}
|
{% if tiller_secure_release_info %} --override 'spec.template.spec.containers[0].command'='{/tiller,--storage=secret}' {% endif %}
|
||||||
{% if tiller_wait %} --wait{% endif %}
|
{% if tiller_wait %} --wait{% endif %}
|
||||||
--output yaml
|
--output yaml
|
||||||
| {{bin_dir}}/kubectl apply -f -
|
| {{ bin_dir }}/kubectl apply -f -
|
||||||
changed_when: false
|
changed_when: false
|
||||||
when:
|
when:
|
||||||
- (tiller_override is defined and tiller_override) or (kube_version is version('v1.11.1', '>='))
|
- (tiller_override is defined and tiller_override) or (kube_version is version('v1.11.1', '>='))
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
environment: "{{proxy_env}}"
|
environment: "{{ proxy_env }}"
|
||||||
|
|
||||||
- name: Make sure bash_completion.d folder exists
|
- name: Make sure bash_completion.d folder exists
|
||||||
file:
|
file:
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
---
|
---
|
||||||
- name: Start Calico resources
|
- name: Start Calico resources
|
||||||
kube:
|
kube:
|
||||||
name: "{{item.item.name}}"
|
name: "{{ item.item.name }}"
|
||||||
namespace: "kube-system"
|
namespace: "kube-system"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{ item.item.type }}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ calico_node_manifests.results }}"
|
- "{{ calico_node_manifests.results }}"
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
---
|
---
|
||||||
- name: Canal | Start Resources
|
- name: Canal | Start Resources
|
||||||
kube:
|
kube:
|
||||||
name: "{{item.item.name}}"
|
name: "{{ item.item.name }}"
|
||||||
namespace: "kube-system"
|
namespace: "kube-system"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{ item.item.type }}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ canal_manifests.results }}"
|
with_items: "{{ canal_manifests.results }}"
|
||||||
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
|
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
|
||||||
|
|
|
@ -1,17 +1,17 @@
|
||||||
---
|
---
|
||||||
- name: Cilium | Start Resources
|
- name: Cilium | Start Resources
|
||||||
kube:
|
kube:
|
||||||
name: "{{item.item.name}}"
|
name: "{{ item.item.name }}"
|
||||||
namespace: "kube-system"
|
namespace: "kube-system"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{ item.item.type }}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ cilium_node_manifests.results }}"
|
with_items: "{{ cilium_node_manifests.results }}"
|
||||||
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
|
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
|
||||||
|
|
||||||
- name: Cilium | Wait for pods to run
|
- name: Cilium | Wait for pods to run
|
||||||
command: "{{bin_dir}}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601
|
command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601
|
||||||
register: pods_not_ready
|
register: pods_not_ready
|
||||||
until: pods_not_ready.stdout.find("cilium")==-1
|
until: pods_not_ready.stdout.find("cilium")==-1
|
||||||
retries: 30
|
retries: 30
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
---
|
---
|
||||||
- name: Flannel | Start Resources
|
- name: Flannel | Start Resources
|
||||||
kube:
|
kube:
|
||||||
name: "{{item.item.name}}"
|
name: "{{ item.item.name }}"
|
||||||
namespace: "kube-system"
|
namespace: "kube-system"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{ item.item.type }}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ flannel_node_manifests.results }}"
|
with_items: "{{ flannel_node_manifests.results }}"
|
||||||
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
|
when: inventory_hostname == groups['kube-master'][0] and not item is skipped
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
||||||
- name: kube-router | Wait for kube-router pods to be ready
|
- name: kube-router | Wait for kube-router pods to be ready
|
||||||
command: "{{bin_dir}}/kubectl -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601
|
command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601
|
||||||
register: pods_not_ready
|
register: pods_not_ready
|
||||||
until: pods_not_ready.stdout.find("kube-router")==-1
|
until: pods_not_ready.stdout.find("kube-router")==-1
|
||||||
retries: 30
|
retries: 30
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
---
|
---
|
||||||
- name: Multus | Start resources
|
- name: Multus | Start resources
|
||||||
kube:
|
kube:
|
||||||
name: "{{item.item.name}}"
|
name: "{{ item.item.name }}"
|
||||||
namespace: "kube-system"
|
namespace: "kube-system"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{ item.item.type }}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items: "{{ multus_manifest_1.results }} + {{multus_manifest_2.results }}"
|
with_items: "{{ multus_manifest_1.results }} + {{ multus_manifest_2.results }}"
|
||||||
when: inventory_hostname == groups['kube-master'][0] and not item|skipped
|
when: inventory_hostname == groups['kube-master'][0] and not item|skipped
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
- name: Kubernetes Persistent Volumes | Lay down OpenStack Cinder Storage Class template
|
- name: Kubernetes Persistent Volumes | Lay down OpenStack Cinder Storage Class template
|
||||||
template:
|
template:
|
||||||
src: "openstack-storage-class.yml.j2"
|
src: "openstack-storage-class.yml.j2"
|
||||||
dest: "{{kube_config_dir}}/openstack-storage-class.yml"
|
dest: "{{ kube_config_dir }}/openstack-storage-class.yml"
|
||||||
register: manifests
|
register: manifests
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
@ -10,9 +10,9 @@
|
||||||
- name: Kubernetes Persistent Volumes | Add OpenStack Cinder Storage Class
|
- name: Kubernetes Persistent Volumes | Add OpenStack Cinder Storage Class
|
||||||
kube:
|
kube:
|
||||||
name: storage-class
|
name: storage-class
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: StorageClass
|
resource: StorageClass
|
||||||
filename: "{{kube_config_dir}}/openstack-storage-class.yml"
|
filename: "{{ kube_config_dir }}/openstack-storage-class.yml"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
when:
|
when:
|
||||||
- inventory_hostname == groups['kube-master'][0]
|
- inventory_hostname == groups['kube-master'][0]
|
||||||
|
|
|
@ -10,8 +10,8 @@
|
||||||
|
|
||||||
- name: Create calico-kube-controllers manifests
|
- name: Create calico-kube-controllers manifests
|
||||||
template:
|
template:
|
||||||
src: "{{item.file}}.j2"
|
src: "{{ item.file }}.j2"
|
||||||
dest: "{{kube_config_dir}}/{{item.file}}"
|
dest: "{{ kube_config_dir }}/{{ item.file }}"
|
||||||
with_items:
|
with_items:
|
||||||
- {name: calico-kube-controllers, file: calico-kube-controllers.yml, type: deployment}
|
- {name: calico-kube-controllers, file: calico-kube-controllers.yml, type: deployment}
|
||||||
- {name: calico-kube-controllers, file: calico-kube-sa.yml, type: sa}
|
- {name: calico-kube-controllers, file: calico-kube-sa.yml, type: sa}
|
||||||
|
@ -24,11 +24,11 @@
|
||||||
|
|
||||||
- name: Start of Calico kube controllers
|
- name: Start of Calico kube controllers
|
||||||
kube:
|
kube:
|
||||||
name: "{{item.item.name}}"
|
name: "{{ item.item.name }}"
|
||||||
namespace: "kube-system"
|
namespace: "kube-system"
|
||||||
kubectl: "{{bin_dir}}/kubectl"
|
kubectl: "{{ bin_dir }}/kubectl"
|
||||||
resource: "{{item.item.type}}"
|
resource: "{{ item.item.type }}"
|
||||||
filename: "{{kube_config_dir}}/{{item.item.file}}"
|
filename: "{{ kube_config_dir }}/{{ item.item.file }}"
|
||||||
state: "latest"
|
state: "latest"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ calico_kube_manifests.results }}"
|
- "{{ calico_kube_manifests.results }}"
|
||||||
|
|
|
@ -77,7 +77,7 @@
|
||||||
- name: Join to cluster
|
- name: Join to cluster
|
||||||
command: >-
|
command: >-
|
||||||
{{ bin_dir }}/kubeadm join
|
{{ bin_dir }}/kubeadm join
|
||||||
--config {{ kube_config_dir}}/kubeadm-client.conf
|
--config {{ kube_config_dir }}/kubeadm-client.conf
|
||||||
--ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests
|
--ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests
|
||||||
register: kubeadm_join
|
register: kubeadm_join
|
||||||
async: 120
|
async: 120
|
||||||
|
@ -88,7 +88,7 @@
|
||||||
- name: Join to cluster with ignores
|
- name: Join to cluster with ignores
|
||||||
command: >-
|
command: >-
|
||||||
{{ bin_dir }}/kubeadm join
|
{{ bin_dir }}/kubeadm join
|
||||||
--config {{ kube_config_dir}}/kubeadm-client.conf
|
--config {{ kube_config_dir }}/kubeadm-client.conf
|
||||||
--ignore-preflight-errors=all
|
--ignore-preflight-errors=all
|
||||||
register: kubeadm_join
|
register: kubeadm_join
|
||||||
async: 60
|
async: 60
|
||||||
|
|
|
@ -12,12 +12,12 @@
|
||||||
|
|
||||||
- name: Base 64 Decode slurped secrets_encryption.yaml file
|
- name: Base 64 Decode slurped secrets_encryption.yaml file
|
||||||
set_fact:
|
set_fact:
|
||||||
secret_file_decoded: "{{secret_file_encoded['content'] | b64decode | from_yaml}}"
|
secret_file_decoded: "{{ secret_file_encoded['content'] | b64decode | from_yaml }}"
|
||||||
when: secrets_encryption_file.stat.exists
|
when: secrets_encryption_file.stat.exists
|
||||||
|
|
||||||
- name: Extract secret value from secrets_encryption.yaml
|
- name: Extract secret value from secrets_encryption.yaml
|
||||||
set_fact:
|
set_fact:
|
||||||
kube_encrypt_token_extracted: "{{ secret_file_decoded | json_query(secrets_encryption_query) | first | b64decode}}"
|
kube_encrypt_token_extracted: "{{ secret_file_decoded | json_query(secrets_encryption_query) | first | b64decode }}"
|
||||||
when: secrets_encryption_file.stat.exists
|
when: secrets_encryption_file.stat.exists
|
||||||
|
|
||||||
- name: Set kube_encrypt_token across master nodes
|
- name: Set kube_encrypt_token across master nodes
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
{%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%}
|
{%- if "127.0.0.1" in kube_apiserver_endpoint or "localhost" in kube_apiserver_endpoint -%}
|
||||||
{{ first_kube_master }}:{{ kube_apiserver_port }}
|
{{ first_kube_master }}:{{ kube_apiserver_port }}
|
||||||
{%- else -%}
|
{%- else -%}
|
||||||
{{ kube_apiserver_endpoint | regex_replace('https://', '')}}
|
{{ kube_apiserver_endpoint | regex_replace('https://', '') }}
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
tags:
|
tags:
|
||||||
- facts
|
- facts
|
||||||
|
@ -21,15 +21,15 @@
|
||||||
|
|
||||||
- name: Wait for k8s apiserver
|
- name: Wait for k8s apiserver
|
||||||
wait_for:
|
wait_for:
|
||||||
host: "{{kubeadm_discovery_address.split(':')[0]}}"
|
host: "{{ kubeadm_discovery_address.split(':')[0] }}"
|
||||||
port: "{{kubeadm_discovery_address.split(':')[1]}}"
|
port: "{{ kubeadm_discovery_address.split(':')[1] }}"
|
||||||
timeout: 180
|
timeout: 180
|
||||||
|
|
||||||
|
|
||||||
- name: Upload certificates so they are fresh and not expired
|
- name: Upload certificates so they are fresh and not expired
|
||||||
command: >-
|
command: >-
|
||||||
{{ bin_dir }}/kubeadm init phase
|
{{ bin_dir }}/kubeadm init phase
|
||||||
--config {{ kube_config_dir}}/kubeadm-config.yaml
|
--config {{ kube_config_dir }}/kubeadm-config.yaml
|
||||||
upload-certs --experimental-upload-certs
|
upload-certs --experimental-upload-certs
|
||||||
{% if kubeadm_certificate_key is defined %}
|
{% if kubeadm_certificate_key is defined %}
|
||||||
--certificate-key={{ kubeadm_certificate_key }}
|
--certificate-key={{ kubeadm_certificate_key }}
|
||||||
|
@ -46,7 +46,7 @@
|
||||||
- name: Joining control plane node to the cluster.
|
- name: Joining control plane node to the cluster.
|
||||||
command: >-
|
command: >-
|
||||||
{{ bin_dir }}/kubeadm join
|
{{ bin_dir }}/kubeadm join
|
||||||
--config {{ kube_config_dir}}/kubeadm-controlplane.yaml
|
--config {{ kube_config_dir }}/kubeadm-controlplane.yaml
|
||||||
--ignore-preflight-errors=all
|
--ignore-preflight-errors=all
|
||||||
{% if kubeadm_certificate_key is defined %}
|
{% if kubeadm_certificate_key is defined %}
|
||||||
--certificate-key={{ kubeadm_certificate_key }}
|
--certificate-key={{ kubeadm_certificate_key }}
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
stat:
|
stat:
|
||||||
path: "{{ kube_cert_dir }}/apiserver.pem"
|
path: "{{ kube_cert_dir }}/apiserver.pem"
|
||||||
register: old_apiserver_cert
|
register: old_apiserver_cert
|
||||||
delegate_to: "{{groups['kube-master']|first}}"
|
delegate_to: "{{ groups['kube-master'] | first }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- name: kubeadm | Migrate old certs if necessary
|
- name: kubeadm | Migrate old certs if necessary
|
||||||
|
@ -41,14 +41,14 @@
|
||||||
|
|
||||||
- name: kubeadm | Delete old static pods
|
- name: kubeadm | Delete old static pods
|
||||||
file:
|
file:
|
||||||
path: "{{ kube_config_dir }}/manifests/{{item}}.manifest"
|
path: "{{ kube_config_dir }}/manifests/{{ item }}.manifest"
|
||||||
state: absent
|
state: absent
|
||||||
with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler", "kube-proxy"]
|
with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler", "kube-proxy"]
|
||||||
when:
|
when:
|
||||||
- old_apiserver_cert.stat.exists
|
- old_apiserver_cert.stat.exists
|
||||||
|
|
||||||
- name: kubeadm | Forcefully delete old static pods
|
- name: kubeadm | Forcefully delete old static pods
|
||||||
shell: "docker ps -f name=k8s_{{item}} -q | xargs --no-run-if-empty docker rm -f"
|
shell: "docker ps -f name=k8s_{{ item }} -q | xargs --no-run-if-empty docker rm -f"
|
||||||
with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
with_items: ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
||||||
when:
|
when:
|
||||||
- old_apiserver_cert.stat.exists
|
- old_apiserver_cert.stat.exists
|
||||||
|
@ -147,7 +147,7 @@
|
||||||
retries: 5
|
retries: 5
|
||||||
delay: 5
|
delay: 5
|
||||||
until: temp_token is succeeded
|
until: temp_token is succeeded
|
||||||
delegate_to: "{{groups['kube-master']|first}}"
|
delegate_to: "{{ groups['kube-master'] | first }}"
|
||||||
when: kubeadm_token is not defined
|
when: kubeadm_token is not defined
|
||||||
tags:
|
tags:
|
||||||
- kubeadm_token
|
- kubeadm_token
|
||||||
|
@ -190,6 +190,6 @@
|
||||||
# FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
|
# FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
|
||||||
- name: kubeadm | Remove taint for master with node role
|
- name: kubeadm | Remove taint for master with node role
|
||||||
command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf taint node {{ inventory_hostname }} node-role.kubernetes.io/master:NoSchedule-"
|
command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf taint node {{ inventory_hostname }} node-role.kubernetes.io/master:NoSchedule-"
|
||||||
delegate_to: "{{groups['kube-master']|first}}"
|
delegate_to: "{{ groups['kube-master'] | first }}"
|
||||||
when: inventory_hostname in groups['kube-node']
|
when: inventory_hostname in groups['kube-node']
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
- name: "Pre-upgrade | Delete master manifests if etcd secrets changed"
|
- name: "Pre-upgrade | Delete master manifests if etcd secrets changed"
|
||||||
file:
|
file:
|
||||||
path: "/etc/kubernetes/manifests/{{item}}.manifest"
|
path: "/etc/kubernetes/manifests/{{ item }}.manifest"
|
||||||
state: absent
|
state: absent
|
||||||
with_items:
|
with_items:
|
||||||
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
||||||
|
@ -9,7 +9,7 @@
|
||||||
when: etcd_secret_changed|default(false)
|
when: etcd_secret_changed|default(false)
|
||||||
|
|
||||||
- name: "Pre-upgrade | Delete master containers forcefully"
|
- name: "Pre-upgrade | Delete master containers forcefully"
|
||||||
shell: "docker ps -af name=k8s_{{item}}* -q | xargs --no-run-if-empty docker rm -f"
|
shell: "docker ps -af name=k8s_{{ item }}* -q | xargs --no-run-if-empty docker rm -f"
|
||||||
with_items:
|
with_items:
|
||||||
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
- ["kube-apiserver", "kube-controller-manager", "kube-scheduler"]
|
||||||
when: kube_apiserver_manifest_replaced.changed
|
when: kube_apiserver_manifest_replaced.changed
|
||||||
|
|
|
@ -56,7 +56,7 @@
|
||||||
|
|
||||||
- name: check azure_loadbalancer_sku value
|
- name: check azure_loadbalancer_sku value
|
||||||
fail:
|
fail:
|
||||||
msg: "azure_loadbalancer_sku has an invalid value '{{azure_loadbalancer_sku}}'. Supported values are 'basic', 'standard'"
|
msg: "azure_loadbalancer_sku has an invalid value '{{ azure_loadbalancer_sku }}'. Supported values are 'basic', 'standard'"
|
||||||
when: azure_loadbalancer_sku not in ["basic", "standard"]
|
when: azure_loadbalancer_sku not in ["basic", "standard"]
|
||||||
|
|
||||||
- name: "check azure_exclude_master_from_standard_lb is a bool"
|
- name: "check azure_exclude_master_from_standard_lb is a bool"
|
||||||
|
|
|
@ -65,7 +65,7 @@
|
||||||
- name: Verify if br_netfilter module exists
|
- name: Verify if br_netfilter module exists
|
||||||
shell: "modinfo br_netfilter"
|
shell: "modinfo br_netfilter"
|
||||||
environment:
|
environment:
|
||||||
PATH: "{{ ansible_env.PATH}}:/sbin" # Make sure we can workaround RH's conservative path management
|
PATH: "{{ ansible_env.PATH }}:/sbin" # Make sure we can workaround RH's conservative path management
|
||||||
register: modinfo_br_netfilter
|
register: modinfo_br_netfilter
|
||||||
failed_when: modinfo_br_netfilter.rc not in [0, 1]
|
failed_when: modinfo_br_netfilter.rc not in [0, 1]
|
||||||
changed_when: false
|
changed_when: false
|
||||||
|
|
|
@ -6,7 +6,7 @@ Wants=docker.socket
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
User=root
|
User=root
|
||||||
EnvironmentFile=-{{kube_config_dir}}/kubelet.env
|
EnvironmentFile=-{{ kube_config_dir }}/kubelet.env
|
||||||
ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }}
|
ExecStartPre=-/bin/mkdir -p {{ kubelet_flexvolumes_plugins_dir }}
|
||||||
ExecStart={{ bin_dir }}/kubelet \
|
ExecStart={{ bin_dir }}/kubelet \
|
||||||
$KUBE_LOGTOSTDERR \
|
$KUBE_LOGTOSTDERR \
|
||||||
|
|
|
@ -35,7 +35,7 @@
|
||||||
- name: "Stop if known booleans are set as strings (Use JSON format on CLI: -e \"{'key': true }\")"
|
- name: "Stop if known booleans are set as strings (Use JSON format on CLI: -e \"{'key': true }\")"
|
||||||
assert:
|
assert:
|
||||||
that: item.value|type_debug == 'bool'
|
that: item.value|type_debug == 'bool'
|
||||||
msg: "{{item.value}} isn't a bool"
|
msg: "{{ item.value }} isn't a bool"
|
||||||
run_once: yes
|
run_once: yes
|
||||||
with_items:
|
with_items:
|
||||||
- { name: download_run_once, value: "{{ download_run_once }}" }
|
- { name: download_run_once, value: "{{ download_run_once }}" }
|
||||||
|
|
|
@ -8,9 +8,9 @@
|
||||||
set_fact:
|
set_fact:
|
||||||
host_architecture: >-
|
host_architecture: >-
|
||||||
{%- if ansible_architecture in architecture_groups -%}
|
{%- if ansible_architecture in architecture_groups -%}
|
||||||
{{architecture_groups[ansible_architecture]}}
|
{{ architecture_groups[ansible_architecture] }}
|
||||||
{%- else -%}
|
{%- else -%}
|
||||||
{{ansible_architecture}}
|
{{ ansible_architecture }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
- name: Force binaries directory for Container Linux by CoreOS
|
- name: Force binaries directory for Container Linux by CoreOS
|
||||||
|
@ -46,7 +46,7 @@
|
||||||
- set_fact:
|
- set_fact:
|
||||||
bogus_domains: |-
|
bogus_domains: |-
|
||||||
{% for d in [ 'default.svc.' + dns_domain, 'svc.' + dns_domain ] + searchdomains|default([]) -%}
|
{% for d in [ 'default.svc.' + dns_domain, 'svc.' + dns_domain ] + searchdomains|default([]) -%}
|
||||||
{{dns_domain}}.{{d}}./{{d}}.{{d}}./com.{{d}}./
|
{{ dns_domain }}.{{ d }}./{{ d }}.{{ d }}./com.{{ d }}./
|
||||||
{%- endfor %}
|
{%- endfor %}
|
||||||
cloud_resolver: >-
|
cloud_resolver: >-
|
||||||
{%- if cloud_provider is defined and cloud_provider == 'gce' -%}
|
{%- if cloud_provider is defined and cloud_provider == 'gce' -%}
|
||||||
|
@ -139,9 +139,9 @@
|
||||||
- name: generate nameservers to resolvconf
|
- name: generate nameservers to resolvconf
|
||||||
set_fact:
|
set_fact:
|
||||||
nameserverentries:
|
nameserverentries:
|
||||||
nameserver {{( coredns_server + nameservers|d([]) + cloud_resolver|d([])) | join(',nameserver ')}}
|
nameserver {{ ( coredns_server + nameservers|d([]) + cloud_resolver|d([])) | join(',nameserver ') }}
|
||||||
supersede_nameserver:
|
supersede_nameserver:
|
||||||
supersede domain-name-servers {{( coredns_server + nameservers|d([]) + cloud_resolver|d([])) | join(', ') }};
|
supersede domain-name-servers {{ ( coredns_server + nameservers|d([]) + cloud_resolver|d([])) | join(', ') }};
|
||||||
|
|
||||||
- name: gather os specific variables
|
- name: gather os specific variables
|
||||||
include_vars: "{{ item }}"
|
include_vars: "{{ item }}"
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
- master
|
- master
|
||||||
- node
|
- node
|
||||||
with_items:
|
with_items:
|
||||||
- "{{bin_dir}}"
|
- "{{ bin_dir }}"
|
||||||
- "{{ kube_config_dir }}"
|
- "{{ kube_config_dir }}"
|
||||||
- "{{ kube_cert_dir }}"
|
- "{{ kube_cert_dir }}"
|
||||||
- "{{ kube_manifest_dir }}"
|
- "{{ kube_manifest_dir }}"
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
- name: Add domain/search/nameservers/options to resolv.conf
|
- name: Add domain/search/nameservers/options to resolv.conf
|
||||||
blockinfile:
|
blockinfile:
|
||||||
path: "{{resolvconffile}}"
|
path: "{{ resolvconffile }}"
|
||||||
block: |-
|
block: |-
|
||||||
{% for item in [domainentry] + [searchentries] + nameserverentries.split(',') -%}
|
{% for item in [domainentry] + [searchentries] + nameserverentries.split(',') -%}
|
||||||
{{ item }}
|
{{ item }}
|
||||||
|
@ -22,7 +22,7 @@
|
||||||
|
|
||||||
- name: Remove search/domain/nameserver options before block
|
- name: Remove search/domain/nameserver options before block
|
||||||
replace:
|
replace:
|
||||||
dest: "{{item[0]}}"
|
dest: "{{ item[0] }}"
|
||||||
regexp: '^{{ item[1] }}[^#]*(?=# Ansible entries BEGIN)'
|
regexp: '^{{ item[1] }}[^#]*(?=# Ansible entries BEGIN)'
|
||||||
backup: yes
|
backup: yes
|
||||||
follow: yes
|
follow: yes
|
||||||
|
@ -33,7 +33,7 @@
|
||||||
|
|
||||||
- name: Remove search/domain/nameserver options after block
|
- name: Remove search/domain/nameserver options after block
|
||||||
replace:
|
replace:
|
||||||
dest: "{{item[0]}}"
|
dest: "{{ item[0] }}"
|
||||||
regexp: '(# Ansible entries END\n(?:(?!^{{ item[1] }}).*\n)*)(?:^{{ item[1] }}.*\n?)+'
|
regexp: '(# Ansible entries END\n(?:(?!^{{ item[1] }}).*\n)*)(?:^{{ item[1] }}.*\n?)+'
|
||||||
replace: '\1'
|
replace: '\1'
|
||||||
backup: yes
|
backup: yes
|
||||||
|
@ -51,7 +51,7 @@
|
||||||
|
|
||||||
- name: persist resolvconf cloud init file
|
- name: persist resolvconf cloud init file
|
||||||
template:
|
template:
|
||||||
dest: "{{resolveconf_cloud_init_conf}}"
|
dest: "{{ resolveconf_cloud_init_conf }}"
|
||||||
src: resolvconf.j2
|
src: resolvconf.j2
|
||||||
owner: root
|
owner: root
|
||||||
mode: 0644
|
mode: 0644
|
||||||
|
|
|
@ -31,14 +31,14 @@
|
||||||
|
|
||||||
- name: Stat sysctl file configuration
|
- name: Stat sysctl file configuration
|
||||||
stat:
|
stat:
|
||||||
path: "{{sysctl_file_path}}"
|
path: "{{ sysctl_file_path }}"
|
||||||
register: sysctl_file_stat
|
register: sysctl_file_stat
|
||||||
tags:
|
tags:
|
||||||
- bootstrap-os
|
- bootstrap-os
|
||||||
|
|
||||||
- name: Change sysctl file path to link source if linked
|
- name: Change sysctl file path to link source if linked
|
||||||
set_fact:
|
set_fact:
|
||||||
sysctl_file_path: "{{sysctl_file_stat.stat.lnk_source}}"
|
sysctl_file_path: "{{ sysctl_file_stat.stat.lnk_source }}"
|
||||||
when:
|
when:
|
||||||
- sysctl_file_stat.stat.islnk is defined
|
- sysctl_file_stat.stat.islnk is defined
|
||||||
- sysctl_file_stat.stat.islnk
|
- sysctl_file_stat.stat.islnk
|
||||||
|
@ -52,7 +52,7 @@
|
||||||
|
|
||||||
- name: Enable ip forwarding
|
- name: Enable ip forwarding
|
||||||
sysctl:
|
sysctl:
|
||||||
sysctl_file: "{{sysctl_file_path}}"
|
sysctl_file: "{{ sysctl_file_path }}"
|
||||||
name: net.ipv4.ip_forward
|
name: net.ipv4.ip_forward
|
||||||
value: 1
|
value: 1
|
||||||
state: present
|
state: present
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
block: |-
|
block: |-
|
||||||
{% for item in (groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]))|unique -%}
|
{% for item in (groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]))|unique -%}
|
||||||
{% if 'access_ip' in hostvars[item] or 'ip' in hostvars[item] or fallback_ips[item] != "skip" -%}
|
{% if 'access_ip' in hostvars[item] or 'ip' in hostvars[item] or fallback_ips[item] != "skip" -%}
|
||||||
{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(fallback_ips[item]))}}
|
{{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(fallback_ips[item])) }}
|
||||||
{%- if ('ansible_hostname' in hostvars[item] and item != hostvars[item]['ansible_hostname']) %} {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }} {{ hostvars[item]['ansible_hostname'] }}{% endif %} {{ item }} {{ item }}.{{ dns_domain }}
|
{%- if ('ansible_hostname' in hostvars[item] and item != hostvars[item]['ansible_hostname']) %} {{ hostvars[item]['ansible_hostname'] }}.{{ dns_domain }} {{ hostvars[item]['ansible_hostname'] }}{% endif %} {{ item }} {{ item }}.{{ dns_domain }}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
{% for item in [ supersede_domain, supersede_search, supersede_nameserver ] -%}
|
{% for item in [ supersede_domain, supersede_search, supersede_nameserver ] -%}
|
||||||
{{ item }}
|
{{ item }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
path: "{{dhclientconffile}}"
|
path: "{{ dhclientconffile }}"
|
||||||
create: yes
|
create: yes
|
||||||
state: present
|
state: present
|
||||||
insertbefore: BOF
|
insertbefore: BOF
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
- name: Remove kubespray specific config from dhclient config
|
- name: Remove kubespray specific config from dhclient config
|
||||||
blockinfile:
|
blockinfile:
|
||||||
path: "{{dhclientconffile}}"
|
path: "{{ dhclientconffile }}"
|
||||||
state: absent
|
state: absent
|
||||||
backup: yes
|
backup: yes
|
||||||
marker: "# Ansible entries {mark}"
|
marker: "# Ansible entries {mark}"
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
- name: "Check_tokens | check if the tokens have already been generated on first master"
|
- name: "Check_tokens | check if the tokens have already been generated on first master"
|
||||||
stat:
|
stat:
|
||||||
path: "{{ kube_token_dir }}/known_tokens.csv"
|
path: "{{ kube_token_dir }}/known_tokens.csv"
|
||||||
delegate_to: "{{groups['kube-master'][0]}}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
register: known_tokens_master
|
register: known_tokens_master
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
dest: "{{ kube_script_dir }}/kube-gen-token.sh"
|
dest: "{{ kube_script_dir }}/kube-gen-token.sh"
|
||||||
mode: 0700
|
mode: 0700
|
||||||
run_once: yes
|
run_once: yes
|
||||||
delegate_to: "{{groups['kube-master'][0]}}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
when: gen_tokens|default(false)
|
when: gen_tokens|default(false)
|
||||||
|
|
||||||
- name: Gen_tokens | generate tokens for master components
|
- name: Gen_tokens | generate tokens for master components
|
||||||
|
@ -18,7 +18,7 @@
|
||||||
register: gentoken_master
|
register: gentoken_master
|
||||||
changed_when: "'Added' in gentoken_master.stdout"
|
changed_when: "'Added' in gentoken_master.stdout"
|
||||||
run_once: yes
|
run_once: yes
|
||||||
delegate_to: "{{groups['kube-master'][0]}}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
when: gen_tokens|default(false)
|
when: gen_tokens|default(false)
|
||||||
|
|
||||||
- name: Gen_tokens | generate tokens for node components
|
- name: Gen_tokens | generate tokens for node components
|
||||||
|
@ -31,14 +31,14 @@
|
||||||
register: gentoken_node
|
register: gentoken_node
|
||||||
changed_when: "'Added' in gentoken_node.stdout"
|
changed_when: "'Added' in gentoken_node.stdout"
|
||||||
run_once: yes
|
run_once: yes
|
||||||
delegate_to: "{{groups['kube-master'][0]}}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
when: gen_tokens|default(false)
|
when: gen_tokens|default(false)
|
||||||
|
|
||||||
- name: Gen_tokens | Get list of tokens from first master
|
- name: Gen_tokens | Get list of tokens from first master
|
||||||
shell: "(find {{ kube_token_dir }} -maxdepth 1 -type f)"
|
shell: "(find {{ kube_token_dir }} -maxdepth 1 -type f)"
|
||||||
register: tokens_list
|
register: tokens_list
|
||||||
check_mode: no
|
check_mode: no
|
||||||
delegate_to: "{{groups['kube-master'][0]}}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
when: sync_tokens|default(false)
|
when: sync_tokens|default(false)
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@
|
||||||
warn: false
|
warn: false
|
||||||
register: tokens_data
|
register: tokens_data
|
||||||
check_mode: no
|
check_mode: no
|
||||||
delegate_to: "{{groups['kube-master'][0]}}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
when: sync_tokens|default(false)
|
when: sync_tokens|default(false)
|
||||||
|
|
||||||
|
|
|
@ -376,7 +376,7 @@ contiv_global_neighbor_as: "500"
|
||||||
fallback_ips_base: |
|
fallback_ips_base: |
|
||||||
---
|
---
|
||||||
{% for item in groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([])|unique %}
|
{% for item in groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([])|unique %}
|
||||||
{{item}}: "{{ hostvars[item].get('ansible_default_ipv4', {'address': '127.0.0.1'})['address'] }}"
|
{{ item }}: "{{ hostvars[item].get('ansible_default_ipv4', {'address': '127.0.0.1'})['address'] }}"
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
fallback_ips: "{{ fallback_ips_base | from_yaml }}"
|
fallback_ips: "{{ fallback_ips_base | from_yaml }}"
|
||||||
|
|
||||||
|
|
|
@ -61,7 +61,7 @@
|
||||||
ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ groups['etcd'][0] }}-key.pem"
|
ETCDCTL_KEY: "{{ etcd_cert_dir }}/admin-{{ groups['etcd'][0] }}-key.pem"
|
||||||
retries: 4
|
retries: 4
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
delegate_to: "{{groups['etcd'][0]}}"
|
delegate_to: "{{ groups['etcd'][0] }}"
|
||||||
when:
|
when:
|
||||||
- calico_version is version("v3.0.0", ">=")
|
- calico_version is version("v3.0.0", ">=")
|
||||||
|
|
||||||
|
@ -79,7 +79,7 @@
|
||||||
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ groups['etcd'][0] }}-key.pem"
|
ETCDCTL_KEY_FILE: "{{ etcd_cert_dir }}/admin-{{ groups['etcd'][0] }}-key.pem"
|
||||||
retries: 4
|
retries: 4
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
delegate_to: "{{groups['etcd'][0]}}"
|
delegate_to: "{{ groups['etcd'][0] }}"
|
||||||
when:
|
when:
|
||||||
- calico_version is version("v3.0.0", "<")
|
- calico_version is version("v3.0.0", "<")
|
||||||
|
|
||||||
|
|
|
@ -155,7 +155,7 @@
|
||||||
- calico_version is version('v3.0.0', '>=')
|
- calico_version is version('v3.0.0', '>=')
|
||||||
|
|
||||||
- name: Calico | Set global as_num (legacy)
|
- name: Calico | Set global as_num (legacy)
|
||||||
command: "{{ bin_dir}}/calicoctl.sh config set asNumber {{ global_as_num }}"
|
command: "{{ bin_dir }}/calicoctl.sh config set asNumber {{ global_as_num }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
when:
|
when:
|
||||||
- calico_version is version('v3.0.0', '<')
|
- calico_version is version('v3.0.0', '<')
|
||||||
|
@ -301,7 +301,7 @@
|
||||||
"name": "{{ inventory_hostname }}-{{ hostvars[item]["calico_rr_ip"]|default(hostvars[item]["ip"])|default(fallback_ips[item]) }}"
|
"name": "{{ inventory_hostname }}-{{ hostvars[item]["calico_rr_ip"]|default(hostvars[item]["ip"])|default(fallback_ips[item]) }}"
|
||||||
},
|
},
|
||||||
"spec": {
|
"spec": {
|
||||||
"asNumber": "{{ local_as | default(global_as_num)}}",
|
"asNumber": "{{ local_as | default(global_as_num) }}",
|
||||||
"node": "{{ inventory_hostname }}",
|
"node": "{{ inventory_hostname }}",
|
||||||
"peerIP": "{{ hostvars[item]["calico_rr_ip"]|default(hostvars[item]["ip"])|default(fallback_ips[item]) }}"
|
"peerIP": "{{ hostvars[item]["calico_rr_ip"]|default(hostvars[item]["ip"])|default(fallback_ips[item]) }}"
|
||||||
}}' | {{ bin_dir }}/calicoctl.sh create --skip-exists -f -
|
}}' | {{ bin_dir }}/calicoctl.sh create --skip-exists -f -
|
||||||
|
@ -319,7 +319,7 @@
|
||||||
shell: >
|
shell: >
|
||||||
echo '{
|
echo '{
|
||||||
"kind": "bgpPeer",
|
"kind": "bgpPeer",
|
||||||
"spec": {"asNumber": "{{ local_as | default(global_as_num)}}"},
|
"spec": {"asNumber": "{{ local_as | default(global_as_num) }}"},
|
||||||
"apiVersion": "v1",
|
"apiVersion": "v1",
|
||||||
"metadata": {"node": "{{ inventory_hostname }}",
|
"metadata": {"node": "{{ inventory_hostname }}",
|
||||||
"scope": "node",
|
"scope": "node",
|
||||||
|
@ -338,8 +338,8 @@
|
||||||
|
|
||||||
- name: Calico | Create calico manifests
|
- name: Calico | Create calico manifests
|
||||||
template:
|
template:
|
||||||
src: "{{item.file}}.j2"
|
src: "{{ item.file }}.j2"
|
||||||
dest: "{{kube_config_dir}}/{{item.file}}"
|
dest: "{{ kube_config_dir }}/{{ item.file }}"
|
||||||
with_items:
|
with_items:
|
||||||
- {name: calico-config, file: calico-config.yml, type: cm}
|
- {name: calico-config, file: calico-config.yml, type: cm}
|
||||||
- {name: calico-node, file: calico-node.yml, type: ds}
|
- {name: calico-node, file: calico-node.yml, type: ds}
|
||||||
|
@ -353,8 +353,8 @@
|
||||||
|
|
||||||
- name: Calico | Create calico manifests for kdd
|
- name: Calico | Create calico manifests for kdd
|
||||||
template:
|
template:
|
||||||
src: "{{item.file}}.j2"
|
src: "{{ item.file }}.j2"
|
||||||
dest: "{{kube_config_dir}}/{{item.file}}"
|
dest: "{{ kube_config_dir }}/{{ item.file }}"
|
||||||
with_items:
|
with_items:
|
||||||
- {name: calico, file: kdd-crds.yml, type: kdd}
|
- {name: calico, file: kdd-crds.yml, type: kdd}
|
||||||
register: calico_node_kdd_manifest
|
register: calico_node_kdd_manifest
|
||||||
|
@ -364,8 +364,8 @@
|
||||||
|
|
||||||
- name: Calico | Create calico manifests for typha
|
- name: Calico | Create calico manifests for typha
|
||||||
template:
|
template:
|
||||||
src: "{{item.file}}.j2"
|
src: "{{ item.file }}.j2"
|
||||||
dest: "{{kube_config_dir}}/{{item.file}}"
|
dest: "{{ kube_config_dir }}/{{ item.file }}"
|
||||||
with_items:
|
with_items:
|
||||||
- {name: calico, file: calico-typha.yml, type: typha}
|
- {name: calico, file: calico-typha.yml, type: typha}
|
||||||
register: calico_node_typha_manifest
|
register: calico_node_typha_manifest
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
owner: root
|
owner: root
|
||||||
group: root
|
group: root
|
||||||
force: yes
|
force: yes
|
||||||
environment: "{{proxy_env}}"
|
environment: "{{ proxy_env }}"
|
||||||
- name: "Create etcdv2 and etcdv3 calicoApiConfig"
|
- name: "Create etcdv2 and etcdv3 calicoApiConfig"
|
||||||
template:
|
template:
|
||||||
src: "{{ item }}-store.yml.j2"
|
src: "{{ item }}-store.yml.j2"
|
||||||
|
|
|
@ -31,7 +31,7 @@
|
||||||
'{ "Network": "{{ kube_pods_subnet }}", "SubnetLen": {{ kube_network_node_prefix }}, "Backend": { "Type": "{{ flannel_backend_type }}" } }'
|
'{ "Network": "{{ kube_pods_subnet }}", "SubnetLen": {{ kube_network_node_prefix }}, "Backend": { "Type": "{{ flannel_backend_type }}" } }'
|
||||||
retries: 4
|
retries: 4
|
||||||
delay: "{{ retry_stagger | random + 3 }}"
|
delay: "{{ retry_stagger | random + 3 }}"
|
||||||
delegate_to: "{{groups['etcd'][0]}}"
|
delegate_to: "{{ groups['etcd'][0] }}"
|
||||||
changed_when: false
|
changed_when: false
|
||||||
run_once: true
|
run_once: true
|
||||||
environment:
|
environment:
|
||||||
|
@ -40,8 +40,8 @@
|
||||||
|
|
||||||
- name: Canal | Create canal node manifests
|
- name: Canal | Create canal node manifests
|
||||||
template:
|
template:
|
||||||
src: "{{item.file}}.j2"
|
src: "{{ item.file }}.j2"
|
||||||
dest: "{{kube_config_dir}}/{{item.file}}"
|
dest: "{{ kube_config_dir }}/{{ item.file }}"
|
||||||
with_items:
|
with_items:
|
||||||
- {name: canal-config, file: canal-config.yaml, type: cm}
|
- {name: canal-config, file: canal-config.yaml, type: cm}
|
||||||
- {name: canal-node, file: canal-node.yaml, type: ds}
|
- {name: canal-node, file: canal-node.yaml, type: ds}
|
||||||
|
|
|
@ -27,8 +27,8 @@
|
||||||
|
|
||||||
- name: Cilium | Create Cilium node manifests
|
- name: Cilium | Create Cilium node manifests
|
||||||
template:
|
template:
|
||||||
src: "{{item.file}}.j2"
|
src: "{{ item.file }}.j2"
|
||||||
dest: "{{kube_config_dir}}/{{item.file}}"
|
dest: "{{ kube_config_dir }}/{{ item.file }}"
|
||||||
with_items:
|
with_items:
|
||||||
- {name: cilium, file: cilium-config.yml, type: cm}
|
- {name: cilium, file: cilium-config.yml, type: cm}
|
||||||
- {name: cilium, file: cilium-crb.yml, type: clusterrolebinding}
|
- {name: cilium, file: cilium-crb.yml, type: clusterrolebinding}
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
---
|
---
|
||||||
- name: Flannel | Create Flannel manifests
|
- name: Flannel | Create Flannel manifests
|
||||||
template:
|
template:
|
||||||
src: "{{item.file}}.j2"
|
src: "{{ item.file }}.j2"
|
||||||
dest: "{{kube_config_dir}}/{{item.file}}"
|
dest: "{{ kube_config_dir }}/{{ item.file }}"
|
||||||
with_items:
|
with_items:
|
||||||
- {name: flannel, file: cni-flannel-rbac.yml, type: sa}
|
- {name: flannel, file: cni-flannel-rbac.yml, type: sa}
|
||||||
- {name: kube-flannel, file: cni-flannel.yml, type: ds}
|
- {name: kube-flannel, file: cni-flannel.yml, type: ds}
|
||||||
|
|
|
@ -1,21 +1,21 @@
|
||||||
---
|
---
|
||||||
- name: kube-router | Add annotations on kube-master
|
- name: kube-router | Add annotations on kube-master
|
||||||
command: "{{bin_dir}}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
|
command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ kube_router_annotations_master }}"
|
- "{{ kube_router_annotations_master }}"
|
||||||
delegate_to: "{{groups['kube-master'][0]}}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
when: kube_router_annotations_master is defined and inventory_hostname in groups['kube-master']
|
when: kube_router_annotations_master is defined and inventory_hostname in groups['kube-master']
|
||||||
|
|
||||||
- name: kube-router | Add annotations on kube-node
|
- name: kube-router | Add annotations on kube-node
|
||||||
command: "{{bin_dir}}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
|
command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ kube_router_annotations_node }}"
|
- "{{ kube_router_annotations_node }}"
|
||||||
delegate_to: "{{groups['kube-master'][0]}}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
when: kube_router_annotations_node is defined and inventory_hostname in groups['kube-node']
|
when: kube_router_annotations_node is defined and inventory_hostname in groups['kube-node']
|
||||||
|
|
||||||
- name: kube-router | Add common annotations on all servers
|
- name: kube-router | Add common annotations on all servers
|
||||||
command: "{{bin_dir}}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
|
command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ kube_router_annotations_all }}"
|
- "{{ kube_router_annotations_all }}"
|
||||||
delegate_to: "{{groups['kube-master'][0]}}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
when: kube_router_annotations_all is defined and inventory_hostname in groups['all']
|
when: kube_router_annotations_all is defined and inventory_hostname in groups['all']
|
|
@ -32,7 +32,7 @@
|
||||||
- old_etcd_members is defined
|
- old_etcd_members is defined
|
||||||
|
|
||||||
- name: Remove old cluster members
|
- name: Remove old cluster members
|
||||||
shell: "{{ bin_dir}}/etcdctl --endpoints={{ etcd_access_addresses }} member remove {{ item[1].replace(' ','').split(',')[0] }}"
|
shell: "{{ bin_dir }}/etcdctl --endpoints={{ etcd_access_addresses }} member remove {{ item[1].replace(' ','').split(',')[0] }}"
|
||||||
environment:
|
environment:
|
||||||
- ETCDCTL_API: 3
|
- ETCDCTL_API: 3
|
||||||
- ETCDCTL_CA_FILE: /etc/ssl/etcd/ssl/ca.pem
|
- ETCDCTL_CA_FILE: /etc/ssl/etcd/ssl/ca.pem
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
- name: Delete node
|
- name: Delete node
|
||||||
command: "{{ bin_dir}}/kubectl delete node {{ item }}"
|
command: "{{ bin_dir }}/kubectl delete node {{ item }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ node.split(',') | default(groups['kube-node']) }}"
|
- "{{ node.split(',') | default(groups['kube-node']) }}"
|
||||||
delegate_to: "{{ groups['kube-master']|first }}"
|
delegate_to: "{{ groups['kube-master']|first }}"
|
||||||
|
|
|
@ -118,7 +118,7 @@
|
||||||
- mounts
|
- mounts
|
||||||
|
|
||||||
- name: reset | unmount kubelet dirs
|
- name: reset | unmount kubelet dirs
|
||||||
command: umount -f {{item}}
|
command: umount -f {{ item }}
|
||||||
with_items: '{{ mounted_dirs.stdout_lines }}'
|
with_items: '{{ mounted_dirs.stdout_lines }}'
|
||||||
register: umount_dir
|
register: umount_dir
|
||||||
retries: 4
|
retries: 4
|
||||||
|
@ -170,7 +170,7 @@
|
||||||
path: "{{ item }}"
|
path: "{{ item }}"
|
||||||
state: absent
|
state: absent
|
||||||
with_items:
|
with_items:
|
||||||
- "{{kube_config_dir}}"
|
- "{{ kube_config_dir }}"
|
||||||
- /var/lib/kubelet
|
- /var/lib/kubelet
|
||||||
- /root/.kube
|
- /root/.kube
|
||||||
- /root/.helm
|
- /root/.helm
|
||||||
|
|
|
@ -16,11 +16,11 @@
|
||||||
|
|
||||||
# Due to https://github.com/kubernetes/kubernetes/issues/58212 we cannot rely on exit code for "kubectl patch"
|
# Due to https://github.com/kubernetes/kubernetes/issues/58212 we cannot rely on exit code for "kubectl patch"
|
||||||
- name: Check current nodeselector for kube-proxy daemonset
|
- name: Check current nodeselector for kube-proxy daemonset
|
||||||
shell: "{{bin_dir}}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get ds kube-proxy --namespace=kube-system -o jsonpath='{.spec.template.spec.nodeSelector.beta.kubernetes.io/os}'"
|
shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get ds kube-proxy --namespace=kube-system -o jsonpath='{.spec.template.spec.nodeSelector.beta.kubernetes.io/os}'"
|
||||||
register: current_kube_proxy_state
|
register: current_kube_proxy_state
|
||||||
|
|
||||||
- name: Apply nodeselector patch for kube-proxy daemonset
|
- name: Apply nodeselector patch for kube-proxy daemonset
|
||||||
shell: "{{bin_dir}}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf patch ds kube-proxy --namespace=kube-system --type=strategic -p \"$(cat nodeselector-os-linux-patch.json)\""
|
shell: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf patch ds kube-proxy --namespace=kube-system --type=strategic -p \"$(cat nodeselector-os-linux-patch.json)\""
|
||||||
args:
|
args:
|
||||||
chdir: "{{ kubernetes_user_manifests_path }}"
|
chdir: "{{ kubernetes_user_manifests_path }}"
|
||||||
register: patch_kube_proxy_state
|
register: patch_kube_proxy_state
|
||||||
|
|
|
@ -53,4 +53,4 @@
|
||||||
- { role: kubernetes/node, tags: node }
|
- { role: kubernetes/node, tags: node }
|
||||||
- { role: kubernetes/kubeadm, tags: kubeadm }
|
- { role: kubernetes/kubeadm, tags: kubeadm }
|
||||||
- { role: network_plugin, tags: network }
|
- { role: network_plugin, tags: network }
|
||||||
environment: "{{proxy_env}}"
|
environment: "{{ proxy_env }}"
|
||||||
|
|
|
@ -32,13 +32,13 @@
|
||||||
- name: etcd_info
|
- name: etcd_info
|
||||||
cmd: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses | default('http://127.0.0.1:2379') }} cluster-health"
|
cmd: "{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses | default('http://127.0.0.1:2379') }} cluster-health"
|
||||||
- name: calico_info
|
- name: calico_info
|
||||||
cmd: "{{bin_dir}}/calicoctl node status"
|
cmd: "{{ bin_dir }}/calicoctl node status"
|
||||||
when: '{{ kube_network_plugin == "calico" }}'
|
when: '{{ kube_network_plugin == "calico" }}'
|
||||||
- name: calico_workload_info
|
- name: calico_workload_info
|
||||||
cmd: "{{bin_dir}}/calicoctl get workloadEndpoint -o wide"
|
cmd: "{{ bin_dir }}/calicoctl get workloadEndpoint -o wide"
|
||||||
when: '{{ kube_network_plugin == "calico" }}'
|
when: '{{ kube_network_plugin == "calico" }}'
|
||||||
- name: calico_pool_info
|
- name: calico_pool_info
|
||||||
cmd: "{{bin_dir}}/calicoctl get ippool -o wide"
|
cmd: "{{ bin_dir }}/calicoctl get ippool -o wide"
|
||||||
when: '{{ kube_network_plugin == "calico" }}'
|
when: '{{ kube_network_plugin == "calico" }}'
|
||||||
- name: weave_info
|
- name: weave_info
|
||||||
cmd: weave report
|
cmd: weave report
|
||||||
|
@ -111,19 +111,19 @@
|
||||||
- name: Storing commands output
|
- name: Storing commands output
|
||||||
shell: "{{ item.cmd }} 2>&1 | tee {{ item.name }}"
|
shell: "{{ item.cmd }} 2>&1 | tee {{ item.name }}"
|
||||||
failed_when: false
|
failed_when: false
|
||||||
with_items: "{{commands}}"
|
with_items: "{{ commands }}"
|
||||||
when: item.when | default(True)
|
when: item.when | default(True)
|
||||||
no_log: True
|
no_log: True
|
||||||
|
|
||||||
- name: Fetch results
|
- name: Fetch results
|
||||||
fetch: src={{ item.name }} dest=/tmp/{{ archive_dirname }}/commands
|
fetch: src={{ item.name }} dest=/tmp/{{ archive_dirname }}/commands
|
||||||
with_items: "{{commands}}"
|
with_items: "{{ commands }}"
|
||||||
when: item.when | default(True)
|
when: item.when | default(True)
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
|
||||||
- name: Fetch logs
|
- name: Fetch logs
|
||||||
fetch: src={{ item }} dest=/tmp/{{ archive_dirname }}/logs
|
fetch: src={{ item }} dest=/tmp/{{ archive_dirname }}/logs
|
||||||
with_items: "{{logs}}"
|
with_items: "{{ logs }}"
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
|
||||||
- name: Pack results and logs
|
- name: Pack results and logs
|
||||||
|
@ -137,4 +137,4 @@
|
||||||
|
|
||||||
- name: Clean up collected command outputs
|
- name: Clean up collected command outputs
|
||||||
file: path={{ item.name }} state=absent
|
file: path={{ item.name }} state=absent
|
||||||
with_items: "{{commands}}"
|
with_items: "{{ commands }}"
|
||||||
|
|
|
@ -40,7 +40,7 @@
|
||||||
dest: "{{ images_dir }}/Dockerfile"
|
dest: "{{ images_dir }}/Dockerfile"
|
||||||
|
|
||||||
- name: Create docker images for each OS
|
- name: Create docker images for each OS
|
||||||
command: docker build -t {{registry}}/vm-{{ item.key }} --build-arg cloud_image="{{ item.key }}.qcow2" {{ images_dir }}
|
command: docker build -t {{ registry }}/vm-{{ item.key }} --build-arg cloud_image="{{ item.key }}.qcow2" {{ images_dir }}
|
||||||
with_dict:
|
with_dict:
|
||||||
- "{{ images }}"
|
- "{{ images }}"
|
||||||
|
|
||||||
|
|
|
@ -10,8 +10,8 @@
|
||||||
aws_access_key: "{{ aws.access_key }}"
|
aws_access_key: "{{ aws.access_key }}"
|
||||||
aws_secret_key: "{{ aws.secret_key }}"
|
aws_secret_key: "{{ aws.secret_key }}"
|
||||||
region: "{{ aws.region }}"
|
region: "{{ aws.region }}"
|
||||||
group_id: "{{ aws.group}}"
|
group_id: "{{ aws.group }}"
|
||||||
instance_type: "{{ aws.instance_type}}"
|
instance_type: "{{ aws.instance_type }}"
|
||||||
image: "{{ aws.ami_id }}"
|
image: "{{ aws.ami_id }}"
|
||||||
wait: true
|
wait: true
|
||||||
count: "{{ aws.count }}"
|
count: "{{ aws.count }}"
|
||||||
|
@ -30,4 +30,4 @@
|
||||||
timeout: 300
|
timeout: 300
|
||||||
state: started
|
state: started
|
||||||
delegate_to: localhost
|
delegate_to: localhost
|
||||||
with_items: "{{ec2.instances}}"
|
with_items: "{{ ec2.instances }}"
|
||||||
|
|
|
@ -52,20 +52,20 @@
|
||||||
tasks:
|
tasks:
|
||||||
- name: replace_test_id
|
- name: replace_test_id
|
||||||
set_fact:
|
set_fact:
|
||||||
test_name: "{{test_id |regex_replace('\\.', '-')}}"
|
test_name: "{{ test_id |regex_replace('\\.', '-') }}"
|
||||||
|
|
||||||
- name: show vars
|
- name: show vars
|
||||||
debug: msg="{{cloud_region}}, {{cloud_image}}"
|
debug: msg="{{ cloud_region }}, {{ cloud_image }}"
|
||||||
|
|
||||||
- set_fact:
|
- set_fact:
|
||||||
instance_names: >-
|
instance_names: >-
|
||||||
{%- if mode in ['separate', 'ha'] -%}
|
{%- if mode in ['separate', 'ha'] -%}
|
||||||
["k8s-{{test_name}}-1", "k8s-{{test_name}}-2", "k8s-{{test_name}}-3"]
|
["k8s-{{ test_name }}-1", "k8s-{{ test_name }}-2", "k8s-{{ test_name }}-3"]
|
||||||
{%- else -%}
|
{%- else -%}
|
||||||
["k8s-{{test_name}}-1", "k8s-{{test_name}}-2"]
|
["k8s-{{ test_name }}-1", "k8s-{{ test_name }}-2"]
|
||||||
{%- endif -%}
|
{%- endif -%}
|
||||||
|
|
||||||
- name: Manage DO instances | {{state}}
|
- name: Manage DO instances | {{ state }}
|
||||||
digital_ocean:
|
digital_ocean:
|
||||||
unique_name: yes
|
unique_name: yes
|
||||||
api_token: "{{ lookup('env','DO_API_TOKEN') }}"
|
api_token: "{{ lookup('env','DO_API_TOKEN') }}"
|
||||||
|
@ -73,16 +73,16 @@
|
||||||
image_id: "{{ cloud_image }}"
|
image_id: "{{ cloud_image }}"
|
||||||
name: "{{ item }}"
|
name: "{{ item }}"
|
||||||
private_networking: no
|
private_networking: no
|
||||||
region_id: "{{cloud_region}}"
|
region_id: "{{ cloud_region }}"
|
||||||
size_id: "{{cloud_machine_type}}"
|
size_id: "{{ cloud_machine_type }}"
|
||||||
ssh_key_ids: "{{ssh_key_id}}"
|
ssh_key_ids: "{{ ssh_key_id }}"
|
||||||
state: "{{state}}"
|
state: "{{ state }}"
|
||||||
wait: yes
|
wait: yes
|
||||||
register: droplets
|
register: droplets
|
||||||
with_items: "{{instance_names}}"
|
with_items: "{{ instance_names }}"
|
||||||
|
|
||||||
- debug:
|
- debug:
|
||||||
msg: "{{droplets}}, {{inventory_path}}"
|
msg: "{{ droplets }}, {{ inventory_path }}"
|
||||||
when: state == 'present'
|
when: state == 'present'
|
||||||
|
|
||||||
- name: Template the inventory
|
- name: Template the inventory
|
||||||
|
@ -92,6 +92,6 @@
|
||||||
when: state == 'present'
|
when: state == 'present'
|
||||||
|
|
||||||
- name: Wait for SSH to come up
|
- name: Wait for SSH to come up
|
||||||
wait_for: host={{item.droplet.ip_address}} port=22 delay=10 timeout=180 state=started
|
wait_for: host={{ item.droplet.ip_address }} port=22 delay=10 timeout=180 state=started
|
||||||
with_items: "{{droplets.results}}"
|
with_items: "{{ droplets.results }}"
|
||||||
when: state == 'present'
|
when: state == 'present'
|
||||||
|
|
|
@ -14,39 +14,39 @@
|
||||||
|
|
||||||
- name: replace_test_id
|
- name: replace_test_id
|
||||||
set_fact:
|
set_fact:
|
||||||
test_name: "{{test_id |regex_replace('\\.', '-')}}"
|
test_name: "{{ test_id |regex_replace('\\.', '-') }}"
|
||||||
|
|
||||||
- set_fact:
|
- set_fact:
|
||||||
instance_names: >-
|
instance_names: >-
|
||||||
{%- if mode in ['separate', 'separate-scale', 'ha', 'ha-scale'] -%}
|
{%- if mode in ['separate', 'separate-scale', 'ha', 'ha-scale'] -%}
|
||||||
k8s-{{test_name}}-1,k8s-{{test_name}}-2,k8s-{{test_name}}-3
|
k8s-{{ test_name }}-1,k8s-{{ test_name }}-2,k8s-{{ test_name }}-3
|
||||||
{%- elif mode == 'aio' -%}
|
{%- elif mode == 'aio' -%}
|
||||||
k8s-{{test_name}}-1
|
k8s-{{ test_name }}-1
|
||||||
{%- else -%}
|
{%- else -%}
|
||||||
k8s-{{test_name}}-1,k8s-{{test_name}}-2
|
k8s-{{ test_name }}-1,k8s-{{ test_name }}-2
|
||||||
{%- endif -%}
|
{%- endif -%}
|
||||||
|
|
||||||
- name: Create gce instances
|
- name: Create gce instances
|
||||||
gce:
|
gce:
|
||||||
instance_names: "{{instance_names}}"
|
instance_names: "{{ instance_names }}"
|
||||||
machine_type: "{{ cloud_machine_type }}"
|
machine_type: "{{ cloud_machine_type }}"
|
||||||
image: "{{ cloud_image | default(omit) }}"
|
image: "{{ cloud_image | default(omit) }}"
|
||||||
image_family: "{{ cloud_image_family | default(omit) }}"
|
image_family: "{{ cloud_image_family | default(omit) }}"
|
||||||
preemptible: "{{ preemptible }}"
|
preemptible: "{{ preemptible }}"
|
||||||
service_account_email: "{{ gce_service_account_email }}"
|
service_account_email: "{{ gce_service_account_email }}"
|
||||||
pem_file: "{{ gce_pem_file | default(omit)}}"
|
pem_file: "{{ gce_pem_file | default(omit) }}"
|
||||||
credentials_file: "{{gce_credentials_file | default(omit)}}"
|
credentials_file: "{{ gce_credentials_file | default(omit) }}"
|
||||||
project_id: "{{ gce_project_id }}"
|
project_id: "{{ gce_project_id }}"
|
||||||
zone: "{{cloud_region}}"
|
zone: "{{ cloud_region }}"
|
||||||
metadata: '{"test_id": "{{test_id}}", "network": "{{kube_network_plugin}}", "startup-script": "{{startup_script|default("")}}"}'
|
metadata: '{"test_id": "{{ test_id }}", "network": "{{ kube_network_plugin }}", "startup-script": "{{ startup_script|default("") }}"}'
|
||||||
tags: "build-{{test_name}},{{kube_network_plugin}}"
|
tags: "build-{{ test_name }},{{ kube_network_plugin }}"
|
||||||
ip_forward: yes
|
ip_forward: yes
|
||||||
service_account_permissions: ['compute-rw']
|
service_account_permissions: ['compute-rw']
|
||||||
register: gce
|
register: gce
|
||||||
|
|
||||||
- name: Add instances to host group
|
- name: Add instances to host group
|
||||||
add_host: hostname={{item.public_ip}} groupname="waitfor_hosts"
|
add_host: hostname={{ item.public_ip }} groupname="waitfor_hosts"
|
||||||
with_items: '{{gce.instance_data}}'
|
with_items: '{{ gce.instance_data }}'
|
||||||
|
|
||||||
- name: Template the inventory
|
- name: Template the inventory
|
||||||
template:
|
template:
|
||||||
|
|
|
@ -8,25 +8,25 @@
|
||||||
tasks:
|
tasks:
|
||||||
- name: replace_test_id
|
- name: replace_test_id
|
||||||
set_fact:
|
set_fact:
|
||||||
test_name: "{{test_id |regex_replace('\\.', '-')}}"
|
test_name: "{{ test_id |regex_replace('\\.', '-') }}"
|
||||||
|
|
||||||
- set_fact:
|
- set_fact:
|
||||||
instance_names: >-
|
instance_names: >-
|
||||||
{%- if mode in ['separate', 'ha'] -%}
|
{%- if mode in ['separate', 'ha'] -%}
|
||||||
k8s-{{test_name}}-1,k8s-{{test_name}}-2,k8s-{{test_name}}-3
|
k8s-{{ test_name }}-1,k8s-{{ test_name }}-2,k8s-{{ test_name }}-3
|
||||||
{%- else -%}
|
{%- else -%}
|
||||||
k8s-{{test_name}}-1,k8s-{{test_name}}-2
|
k8s-{{ test_name }}-1,k8s-{{ test_name }}-2
|
||||||
{%- endif -%}
|
{%- endif -%}
|
||||||
|
|
||||||
- name: stop gce instances
|
- name: stop gce instances
|
||||||
gce:
|
gce:
|
||||||
instance_names: "{{instance_names}}"
|
instance_names: "{{ instance_names }}"
|
||||||
image: "{{ cloud_image | default(omit) }}"
|
image: "{{ cloud_image | default(omit) }}"
|
||||||
service_account_email: "{{ gce_service_account_email }}"
|
service_account_email: "{{ gce_service_account_email }}"
|
||||||
pem_file: "{{ gce_pem_file | default(omit)}}"
|
pem_file: "{{ gce_pem_file | default(omit) }}"
|
||||||
credentials_file: "{{gce_credentials_file | default(omit)}}"
|
credentials_file: "{{ gce_credentials_file | default(omit) }}"
|
||||||
project_id: "{{ gce_project_id }}"
|
project_id: "{{ gce_project_id }}"
|
||||||
zone: "{{cloud_region | default('europe-west1-b')}}"
|
zone: "{{ cloud_region | default('europe-west1-b') }}"
|
||||||
state: 'stopped'
|
state: 'stopped'
|
||||||
async: 120
|
async: 120
|
||||||
poll: 3
|
poll: 3
|
||||||
|
@ -35,13 +35,13 @@
|
||||||
|
|
||||||
- name: delete gce instances
|
- name: delete gce instances
|
||||||
gce:
|
gce:
|
||||||
instance_names: "{{instance_names}}"
|
instance_names: "{{ instance_names }}"
|
||||||
image: "{{ cloud_image | default(omit) }}"
|
image: "{{ cloud_image | default(omit) }}"
|
||||||
service_account_email: "{{ gce_service_account_email }}"
|
service_account_email: "{{ gce_service_account_email }}"
|
||||||
pem_file: "{{ gce_pem_file | default(omit)}}"
|
pem_file: "{{ gce_pem_file | default(omit) }}"
|
||||||
credentials_file: "{{gce_credentials_file | default(omit)}}"
|
credentials_file: "{{ gce_credentials_file | default(omit) }}"
|
||||||
project_id: "{{ gce_project_id }}"
|
project_id: "{{ gce_project_id }}"
|
||||||
zone: "{{cloud_region | default('europe-west1-b')}}"
|
zone: "{{ cloud_region | default('europe-west1-b') }}"
|
||||||
state: 'absent'
|
state: 'absent'
|
||||||
async: 120
|
async: 120
|
||||||
poll: 3
|
poll: 3
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
test_name: "kargo-ci-{{ out.stdout_lines[0] }}"
|
test_name: "kargo-ci-{{ out.stdout_lines[0] }}"
|
||||||
|
|
||||||
- set_fact:
|
- set_fact:
|
||||||
file_name: "{{ostype}}-{{kube_network_plugin}}-{{commit}}-logs.tar.gz"
|
file_name: "{{ ostype }}-{{ kube_network_plugin }}-{{ commit }}-logs.tar.gz"
|
||||||
|
|
||||||
- name: Create a bucket
|
- name: Create a bucket
|
||||||
gc_storage:
|
gc_storage:
|
||||||
|
@ -30,31 +30,31 @@
|
||||||
- name: Create a lifecycle template for the bucket
|
- name: Create a lifecycle template for the bucket
|
||||||
template:
|
template:
|
||||||
src: gcs_life.json.j2
|
src: gcs_life.json.j2
|
||||||
dest: "{{dir}}/gcs_life.json"
|
dest: "{{ dir }}/gcs_life.json"
|
||||||
|
|
||||||
- name: Create a boto config to access GCS
|
- name: Create a boto config to access GCS
|
||||||
template:
|
template:
|
||||||
src: boto.j2
|
src: boto.j2
|
||||||
dest: "{{dir}}/.boto"
|
dest: "{{ dir }}/.boto"
|
||||||
no_log: True
|
no_log: True
|
||||||
|
|
||||||
- name: Download gsutil cp installer
|
- name: Download gsutil cp installer
|
||||||
get_url:
|
get_url:
|
||||||
url: https://dl.google.com/dl/cloudsdk/channels/rapid/install_google_cloud_sdk.bash
|
url: https://dl.google.com/dl/cloudsdk/channels/rapid/install_google_cloud_sdk.bash
|
||||||
dest: "{{dir}}/gcp-installer.sh"
|
dest: "{{ dir }}/gcp-installer.sh"
|
||||||
|
|
||||||
- name: Get gsutil tool
|
- name: Get gsutil tool
|
||||||
script: "{{dir}}/gcp-installer.sh"
|
script: "{{ dir }}/gcp-installer.sh"
|
||||||
environment:
|
environment:
|
||||||
CLOUDSDK_CORE_DISABLE_PROMPTS: 1
|
CLOUDSDK_CORE_DISABLE_PROMPTS: 1
|
||||||
CLOUDSDK_INSTALL_DIR: "{{dir}}"
|
CLOUDSDK_INSTALL_DIR: "{{ dir }}"
|
||||||
no_log: True
|
no_log: True
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
|
||||||
- name: Apply the lifecycle rules
|
- name: Apply the lifecycle rules
|
||||||
command: "{{dir}}/google-cloud-sdk/bin/gsutil lifecycle set {{dir}}/gcs_life.json gs://{{test_name}}"
|
command: "{{ dir }}/google-cloud-sdk/bin/gsutil lifecycle set {{ dir }}/gcs_life.json gs://{{ test_name }}"
|
||||||
environment:
|
environment:
|
||||||
BOTO_CONFIG: "{{dir}}/.boto"
|
BOTO_CONFIG: "{{ dir }}/.boto"
|
||||||
no_log: True
|
no_log: True
|
||||||
|
|
||||||
- name: Upload collected diagnostic info
|
- name: Upload collected diagnostic info
|
||||||
|
@ -63,13 +63,13 @@
|
||||||
mode: put
|
mode: put
|
||||||
permission: public-read
|
permission: public-read
|
||||||
object: "{{ file_name }}"
|
object: "{{ file_name }}"
|
||||||
src: "{{dir}}/logs.tar.gz"
|
src: "{{ dir }}/logs.tar.gz"
|
||||||
headers: '{"Content-Encoding": "x-gzip"}'
|
headers: '{"Content-Encoding": "x-gzip"}'
|
||||||
gs_access_key: "{{ gs_key }}"
|
gs_access_key: "{{ gs_key }}"
|
||||||
gs_secret_key: "{{ gs_skey }}"
|
gs_secret_key: "{{ gs_skey }}"
|
||||||
expiration: "{{expire_days * 36000|int}}"
|
expiration: "{{ expire_days * 36000|int }}"
|
||||||
failed_when: false
|
failed_when: false
|
||||||
no_log: True
|
no_log: True
|
||||||
|
|
||||||
- debug:
|
- debug:
|
||||||
msg: "A public url https://storage.googleapis.com/{{test_name}}/{{file_name}}"
|
msg: "A public url https://storage.googleapis.com/{{ test_name }}/{{ file_name }}"
|
||||||
|
|
|
@ -12,14 +12,14 @@
|
||||||
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||||
|
|
||||||
- name: Check kubectl output
|
- name: Check kubectl output
|
||||||
shell: "{{bin_dir}}/kubectl get pods --all-namespaces -owide"
|
shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
|
||||||
register: get_pods
|
register: get_pods
|
||||||
no_log: true
|
no_log: true
|
||||||
|
|
||||||
- debug: msg="{{get_pods.stdout.split('\n')}}"
|
- debug: msg="{{ get_pods.stdout.split('\n') }}"
|
||||||
|
|
||||||
- name: Check that all pods are running and ready
|
- name: Check that all pods are running and ready
|
||||||
shell: "{{bin_dir}}/kubectl get pods --all-namespaces --no-headers -o yaml"
|
shell: "{{ bin_dir }}/kubectl get pods --all-namespaces --no-headers -o yaml"
|
||||||
register: run_pods_log
|
register: run_pods_log
|
||||||
until:
|
until:
|
||||||
# Check that all pods are running
|
# Check that all pods are running
|
||||||
|
@ -32,9 +32,9 @@
|
||||||
no_log: true
|
no_log: true
|
||||||
|
|
||||||
- name: Check kubectl output
|
- name: Check kubectl output
|
||||||
shell: "{{bin_dir}}/kubectl get pods --all-namespaces -owide"
|
shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
|
||||||
register: get_pods
|
register: get_pods
|
||||||
no_log: true
|
no_log: true
|
||||||
|
|
||||||
- debug: msg="{{get_pods.stdout.split('\n')}}"
|
- debug: msg="{{ get_pods.stdout.split('\n') }}"
|
||||||
failed_when: not run_pods_log is success
|
failed_when: not run_pods_log is success
|
||||||
|
|
|
@ -15,13 +15,13 @@
|
||||||
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||||
|
|
||||||
- name: Create test namespace
|
- name: Create test namespace
|
||||||
shell: "{{bin_dir}}/kubectl create namespace test"
|
shell: "{{ bin_dir }}/kubectl create namespace test"
|
||||||
|
|
||||||
- name: Run a replica controller composed of 2 pods in test ns
|
- name: Run a replica controller composed of 2 pods in test ns
|
||||||
shell: "{{bin_dir}}/kubectl run test --image={{test_image_repo}}:{{test_image_tag}} --namespace test --replicas=2 --command -- tail -f /dev/null"
|
shell: "{{ bin_dir }}/kubectl run test --image={{ test_image_repo }}:{{ test_image_tag }} --namespace test --replicas=2 --command -- tail -f /dev/null"
|
||||||
|
|
||||||
- name: Check that all pods are running and ready
|
- name: Check that all pods are running and ready
|
||||||
shell: "{{bin_dir}}/kubectl get pods --namespace test --no-headers -o yaml"
|
shell: "{{ bin_dir }}/kubectl get pods --namespace test --no-headers -o yaml"
|
||||||
register: run_pods_log
|
register: run_pods_log
|
||||||
until:
|
until:
|
||||||
# Check that all pods are running
|
# Check that all pods are running
|
||||||
|
@ -34,31 +34,31 @@
|
||||||
no_log: true
|
no_log: true
|
||||||
|
|
||||||
- name: Get pod names
|
- name: Get pod names
|
||||||
shell: "{{bin_dir}}/kubectl get pods -n test -o json"
|
shell: "{{ bin_dir }}/kubectl get pods -n test -o json"
|
||||||
register: pods
|
register: pods
|
||||||
no_log: true
|
no_log: true
|
||||||
|
|
||||||
- debug: msg="{{pods.stdout.split('\n')}}"
|
- debug: msg="{{ pods.stdout.split('\n') }}"
|
||||||
failed_when: not run_pods_log is success
|
failed_when: not run_pods_log is success
|
||||||
|
|
||||||
- name: Get hostnet pods
|
- name: Get hostnet pods
|
||||||
command: "{{bin_dir}}/kubectl get pods -n test -o
|
command: "{{ bin_dir }}/kubectl get pods -n test -o
|
||||||
jsonpath='{range .items[?(.spec.hostNetwork)]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
|
jsonpath='{range .items[?(.spec.hostNetwork)]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
|
||||||
register: hostnet_pods
|
register: hostnet_pods
|
||||||
no_log: true
|
no_log: true
|
||||||
|
|
||||||
- name: Get running pods
|
- name: Get running pods
|
||||||
command: "{{bin_dir}}/kubectl get pods -n test -o
|
command: "{{ bin_dir }}/kubectl get pods -n test -o
|
||||||
jsonpath='{range .items[?(.status.phase==\"Running\")]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
|
jsonpath='{range .items[?(.status.phase==\"Running\")]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
|
||||||
register: running_pods
|
register: running_pods
|
||||||
no_log: true
|
no_log: true
|
||||||
|
|
||||||
- name: Check kubectl output
|
- name: Check kubectl output
|
||||||
shell: "{{bin_dir}}/kubectl get pods --all-namespaces -owide"
|
shell: "{{ bin_dir }}/kubectl get pods --all-namespaces -owide"
|
||||||
register: get_pods
|
register: get_pods
|
||||||
no_log: true
|
no_log: true
|
||||||
|
|
||||||
- debug: msg="{{get_pods.stdout.split('\n')}}"
|
- debug: msg="{{ get_pods.stdout.split('\n') }}"
|
||||||
|
|
||||||
- set_fact:
|
- set_fact:
|
||||||
kube_pods_subnet: 10.233.64.0/18
|
kube_pods_subnet: 10.233.64.0/18
|
||||||
|
@ -66,30 +66,30 @@
|
||||||
pod_ips: "{{ (pods.stdout | from_json)['items'] | selectattr('status.podIP', 'defined') | map(attribute = 'status.podIP') | list }}"
|
pod_ips: "{{ (pods.stdout | from_json)['items'] | selectattr('status.podIP', 'defined') | map(attribute = 'status.podIP') | list }}"
|
||||||
pods_hostnet: |
|
pods_hostnet: |
|
||||||
{% set list = hostnet_pods.stdout.split(" ") %}
|
{% set list = hostnet_pods.stdout.split(" ") %}
|
||||||
{{list}}
|
{{ list }}
|
||||||
pods_running: |
|
pods_running: |
|
||||||
{% set list = running_pods.stdout.split(" ") %}
|
{% set list = running_pods.stdout.split(" ") %}
|
||||||
{{list}}
|
{{ list }}
|
||||||
|
|
||||||
- name: Check pods IP are in correct network
|
- name: Check pods IP are in correct network
|
||||||
assert:
|
assert:
|
||||||
that: item | ipaddr(kube_pods_subnet)
|
that: item | ipaddr(kube_pods_subnet)
|
||||||
when: not item in pods_hostnet and item in pods_running
|
when: not item in pods_hostnet and item in pods_running
|
||||||
with_items: "{{pod_ips}}"
|
with_items: "{{ pod_ips }}"
|
||||||
|
|
||||||
- name: Ping between pods is working
|
- name: Ping between pods is working
|
||||||
shell: "{{bin_dir}}/kubectl -n test exec {{item[0]}} -- ping -c 4 {{ item[1] }}"
|
shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
|
||||||
when: not item[0] in pods_hostnet and not item[1] in pods_hostnet
|
when: not item[0] in pods_hostnet and not item[1] in pods_hostnet
|
||||||
with_nested:
|
with_nested:
|
||||||
- "{{pod_names}}"
|
- "{{ pod_names }}"
|
||||||
- "{{pod_ips}}"
|
- "{{ pod_ips }}"
|
||||||
|
|
||||||
- name: Ping between hostnet pods is working
|
- name: Ping between hostnet pods is working
|
||||||
shell: "{{bin_dir}}/kubectl -n test exec {{item[0]}} -- ping -c 4 {{ item[1] }}"
|
shell: "{{ bin_dir }}/kubectl -n test exec {{ item[0] }} -- ping -c 4 {{ item[1] }}"
|
||||||
when: item[0] in pods_hostnet and item[1] in pods_hostnet
|
when: item[0] in pods_hostnet and item[1] in pods_hostnet
|
||||||
with_nested:
|
with_nested:
|
||||||
- "{{pod_names}}"
|
- "{{ pod_names }}"
|
||||||
- "{{pod_ips}}"
|
- "{{ pod_ips }}"
|
||||||
|
|
||||||
- name: Delete test namespace
|
- name: Delete test namespace
|
||||||
shell: "{{bin_dir}}/kubectl delete namespace test"
|
shell: "{{ bin_dir }}/kubectl delete namespace test"
|
||||||
|
|
|
@ -24,8 +24,8 @@
|
||||||
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]
|
||||||
|
|
||||||
- name: Wait for netchecker server
|
- name: Wait for netchecker server
|
||||||
shell: "{{ bin_dir }}/kubectl get pods -o wide --namespace {{netcheck_namespace}} | grep ^netchecker-server"
|
shell: "{{ bin_dir }}/kubectl get pods -o wide --namespace {{ netcheck_namespace }} | grep ^netchecker-server"
|
||||||
delegate_to: "{{groups['kube-master'][0]}}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
register: ncs_pod
|
register: ncs_pod
|
||||||
until: ncs_pod.stdout.find('Running') != -1
|
until: ncs_pod.stdout.find('Running') != -1
|
||||||
|
@ -33,18 +33,18 @@
|
||||||
delay: 10
|
delay: 10
|
||||||
|
|
||||||
- name: Wait for netchecker agents
|
- name: Wait for netchecker agents
|
||||||
shell: "{{ bin_dir }}/kubectl get pods -o wide --namespace {{netcheck_namespace}} | grep '^netchecker-agent-.*Running'"
|
shell: "{{ bin_dir }}/kubectl get pods -o wide --namespace {{ netcheck_namespace }} | grep '^netchecker-agent-.*Running'"
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_to: "{{groups['kube-master'][0]}}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
register: nca_pod
|
register: nca_pod
|
||||||
until: nca_pod.stdout_lines|length >= groups['k8s-cluster']|intersect(ansible_play_hosts)|length * 2
|
until: nca_pod.stdout_lines|length >= groups['k8s-cluster']|intersect(ansible_play_hosts)|length * 2
|
||||||
retries: 3
|
retries: 3
|
||||||
delay: 10
|
delay: 10
|
||||||
failed_when: false
|
failed_when: false
|
||||||
|
|
||||||
- command: "{{ bin_dir }}/kubectl -n {{netcheck_namespace}} describe pod -l app={{ item }}"
|
- command: "{{ bin_dir }}/kubectl -n {{ netcheck_namespace }} describe pod -l app={{ item }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_to: "{{groups['kube-master'][0]}}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
no_log: false
|
no_log: false
|
||||||
with_items:
|
with_items:
|
||||||
- netchecker-agent
|
- netchecker-agent
|
||||||
|
@ -56,9 +56,9 @@
|
||||||
run_once: true
|
run_once: true
|
||||||
|
|
||||||
- name: Get netchecker agents
|
- name: Get netchecker agents
|
||||||
uri: url=http://{{ ansible_default_ipv4.address }}:{{netchecker_port}}/api/v1/agents/ return_content=yes
|
uri: url=http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/agents/ return_content=yes
|
||||||
run_once: true
|
run_once: true
|
||||||
delegate_to: "{{groups['kube-master'][0]}}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
register: agents
|
register: agents
|
||||||
retries: 18
|
retries: 18
|
||||||
delay: "{{ agent_report_interval }}"
|
delay: "{{ agent_report_interval }}"
|
||||||
|
@ -77,8 +77,8 @@
|
||||||
- agents.content[0] == '{'
|
- agents.content[0] == '{'
|
||||||
|
|
||||||
- name: Check netchecker status
|
- name: Check netchecker status
|
||||||
uri: url=http://{{ ansible_default_ipv4.address }}:{{netchecker_port}}/api/v1/connectivity_check status_code=200 return_content=yes
|
uri: url=http://{{ ansible_default_ipv4.address }}:{{ netchecker_port }}/api/v1/connectivity_check status_code=200 return_content=yes
|
||||||
delegate_to: "{{groups['kube-master'][0]}}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
run_once: true
|
run_once: true
|
||||||
register: result
|
register: result
|
||||||
retries: 3
|
retries: 3
|
||||||
|
@ -97,13 +97,13 @@
|
||||||
- command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app=kube-proxy"
|
- command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app=kube-proxy"
|
||||||
run_once: true
|
run_once: true
|
||||||
when: not result is success
|
when: not result is success
|
||||||
delegate_to: "{{groups['kube-master'][0]}}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
no_log: false
|
no_log: false
|
||||||
|
|
||||||
- command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app={{item}} --all-containers"
|
- command: "{{ bin_dir }}/kubectl -n kube-system logs -l k8s-app={{ item }} --all-containers"
|
||||||
run_once: true
|
run_once: true
|
||||||
when: not result is success
|
when: not result is success
|
||||||
delegate_to: "{{groups['kube-master'][0]}}"
|
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||||
no_log: false
|
no_log: false
|
||||||
with_items:
|
with_items:
|
||||||
- kube-router
|
- kube-router
|
||||||
|
|
|
@ -38,7 +38,7 @@
|
||||||
pre_tasks:
|
pre_tasks:
|
||||||
- name: gather facts from all instances
|
- name: gather facts from all instances
|
||||||
setup:
|
setup:
|
||||||
delegate_to: "{{item}}"
|
delegate_to: "{{ item }}"
|
||||||
delegate_facts: True
|
delegate_facts: True
|
||||||
with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]) }}"
|
with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]) }}"
|
||||||
|
|
||||||
|
@ -50,7 +50,7 @@
|
||||||
- { role: kubernetes/preinstall, tags: preinstall }
|
- { role: kubernetes/preinstall, tags: preinstall }
|
||||||
- { role: container-engine, tags: "container-engine", when: deploy_container_engine|default(true) }
|
- { role: container-engine, tags: "container-engine", when: deploy_container_engine|default(true) }
|
||||||
- { role: download, tags: download, when: "not skip_downloads" }
|
- { role: download, tags: download, when: "not skip_downloads" }
|
||||||
environment: "{{proxy_env}}"
|
environment: "{{ proxy_env }}"
|
||||||
|
|
||||||
- hosts: etcd
|
- hosts: etcd
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
@ -76,7 +76,7 @@
|
||||||
- { role: kubernetes/client, tags: client }
|
- { role: kubernetes/client, tags: client }
|
||||||
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
||||||
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
||||||
environment: "{{proxy_env}}"
|
environment: "{{ proxy_env }}"
|
||||||
|
|
||||||
- name: Upgrade calico on all masters and nodes
|
- name: Upgrade calico on all masters and nodes
|
||||||
hosts: kube-master:kube-node
|
hosts: kube-master:kube-node
|
||||||
|
@ -98,7 +98,7 @@
|
||||||
- { role: kubernetes/node, tags: node }
|
- { role: kubernetes/node, tags: node }
|
||||||
- { role: kubernetes/kubeadm, tags: kubeadm }
|
- { role: kubernetes/kubeadm, tags: kubeadm }
|
||||||
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
||||||
environment: "{{proxy_env}}"
|
environment: "{{ proxy_env }}"
|
||||||
|
|
||||||
- hosts: kube-master[0]
|
- hosts: kube-master[0]
|
||||||
any_errors_fatal: true
|
any_errors_fatal: true
|
||||||
|
@ -112,14 +112,14 @@
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: network_plugin/calico/rr, tags: network }
|
- { role: network_plugin/calico/rr, tags: network }
|
||||||
environment: "{{proxy_env}}"
|
environment: "{{ proxy_env }}"
|
||||||
|
|
||||||
- hosts: kube-master
|
- hosts: kube-master
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubespray-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes-apps, tags: apps }
|
- { role: kubernetes-apps, tags: apps }
|
||||||
environment: "{{proxy_env}}"
|
environment: "{{ proxy_env }}"
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s-cluster
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
|
Loading…
Reference in a new issue