Yamllint fixes (#4410)
* Lint everything in the repository with yamllint * yamllint fixes: syntax fixes only * yamllint fixes: move comments to play names * yamllint fixes: indent comments in .gitlab-ci.yml file
This commit is contained in:
parent
483f1d2ca0
commit
9ffc65f8f3
73 changed files with 322 additions and 281 deletions
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
stages:
|
||||
- unit-tests
|
||||
- moderator
|
||||
|
@ -8,7 +9,7 @@ stages:
|
|||
variables:
|
||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-sigs-kubespray'
|
||||
# DOCKER_HOST: tcp://localhost:2375
|
||||
# DOCKER_HOST: tcp://localhost:2375
|
||||
ANSIBLE_FORCE_COLOR: "true"
|
||||
MAGIC: "ci check this"
|
||||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||
|
@ -34,8 +35,8 @@ variables:
|
|||
# us-west1-a
|
||||
|
||||
before_script:
|
||||
- /usr/bin/python -m pip install -r tests/requirements.txt
|
||||
- mkdir -p /.ssh
|
||||
- /usr/bin/python -m pip install -r tests/requirements.txt
|
||||
- mkdir -p /.ssh
|
||||
|
||||
.job: &job
|
||||
tags:
|
||||
|
@ -45,7 +46,7 @@ before_script:
|
|||
|
||||
.docker_service: &docker_service
|
||||
services:
|
||||
- docker:dind
|
||||
- docker:dind
|
||||
|
||||
.create_cluster: &create_cluster
|
||||
<<: *job
|
||||
|
@ -232,95 +233,95 @@ before_script:
|
|||
|
||||
# Test matrix. Leave the comments for markup scripts.
|
||||
.coreos_calico_aio_variables: &coreos_calico_aio_variables
|
||||
# stage: deploy-part1
|
||||
# stage: deploy-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu18_flannel_aio_variables: &ubuntu18_flannel_aio_variables
|
||||
# stage: deploy-part1
|
||||
# stage: deploy-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.centos_weave_kubeadm_variables: ¢os_weave_kubeadm_variables
|
||||
# stage: deploy-part1
|
||||
# stage: deploy-part1
|
||||
UPGRADE_TEST: "graceful"
|
||||
|
||||
.ubuntu_canal_kubeadm_variables: &ubuntu_canal_kubeadm_variables
|
||||
# stage: deploy-part1
|
||||
# stage: deploy-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
|
||||
# stage: deploy-special
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_contiv_sep_variables: &ubuntu_contiv_sep_variables
|
||||
# stage: deploy-special
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.coreos_cilium_variables: &coreos_cilium_variables
|
||||
# stage: deploy-special
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_cilium_sep_variables: &ubuntu_cilium_sep_variables
|
||||
# stage: deploy-special
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.rhel7_weave_variables: &rhel7_weave_variables
|
||||
# stage: deploy-part1
|
||||
# stage: deploy-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.centos7_flannel_addons_variables: ¢os7_flannel_addons_variables
|
||||
# stage: deploy-part2
|
||||
# stage: deploy-part2
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.debian9_calico_variables: &debian9_calico_variables
|
||||
# stage: deploy-part2
|
||||
# stage: deploy-part2
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.coreos_canal_variables: &coreos_canal_variables
|
||||
# stage: deploy-part2
|
||||
# stage: deploy-part2
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.rhel7_canal_sep_variables: &rhel7_canal_sep_variables
|
||||
# stage: deploy-special
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables
|
||||
# stage: deploy-special
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.centos7_calico_ha_variables: ¢os7_calico_ha_variables
|
||||
# stage: deploy-special
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.centos7_kube_router_variables: ¢os7_kube_router_variables
|
||||
# stage: deploy-special
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.centos7_multus_calico_variables: ¢os7_multus_calico_variables
|
||||
# stage: deploy-part2
|
||||
# stage: deploy-part2
|
||||
UPGRADE_TEST: "graceful"
|
||||
|
||||
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
|
||||
# stage: deploy-special
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.coreos_kube_router_variables: &coreos_kube_router_variables
|
||||
# stage: deploy-special
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables
|
||||
# stage: deploy-part1
|
||||
# stage: deploy-part1
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_flannel_variables: &ubuntu_flannel_variables
|
||||
# stage: deploy-part2
|
||||
# stage: deploy-part2
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.ubuntu_kube_router_variables: &ubuntu_kube_router_variables
|
||||
# stage: deploy-special
|
||||
# stage: deploy-special
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
.opensuse_canal_variables: &opensuse_canal_variables
|
||||
# stage: deploy-part2
|
||||
# stage: deploy-part2
|
||||
MOVED_TO_GROUP_VARS: "true"
|
||||
|
||||
|
||||
|
@ -727,7 +728,7 @@ yamllint:
|
|||
<<: *job
|
||||
stage: unit-tests
|
||||
script:
|
||||
- yamllint roles
|
||||
- yamllint .
|
||||
except: ['triggers', 'master']
|
||||
|
||||
tox-inventory-builder:
|
||||
|
|
|
@ -1 +1,2 @@
|
|||
---
|
||||
theme: jekyll-theme-slate
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
apiVersion: "2015-06-15"
|
||||
|
||||
virtualNetworkName: "{{ azure_virtual_network_name | default('KubeVNET') }}"
|
||||
|
@ -34,4 +35,3 @@ imageReferenceJson: "{{imageReference|to_json}}"
|
|||
|
||||
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
|
||||
storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
- set_fact:
|
||||
base_dir: "{{playbook_dir}}/.generated/"
|
||||
|
||||
|
|
|
@ -1,2 +1,3 @@
|
|||
---
|
||||
# See distro.yaml for supported node_distro images
|
||||
node_distro: debian
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
distro_settings:
|
||||
debian: &DEBIAN
|
||||
image: "debian:9.5"
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# kubespray-dind.yaml: minimal kubespray ansible playbook usable for DIND
|
||||
# See contrib/dind/README.md
|
||||
kube_api_anonymous_auth: true
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
- name: set_fact distro_setup
|
||||
set_fact:
|
||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||
|
@ -33,7 +34,7 @@
|
|||
# Delete docs
|
||||
path-exclude=/usr/share/doc/*
|
||||
path-include=/usr/share/doc/*/copyright
|
||||
dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
|
||||
dest: /etc/dpkg/dpkg.cfg.d/01_nodoc
|
||||
when:
|
||||
- ansible_os_family == 'Debian'
|
||||
|
||||
|
@ -55,7 +56,7 @@
|
|||
user:
|
||||
name: "{{ distro_user }}"
|
||||
uid: 1000
|
||||
#groups: sudo
|
||||
# groups: sudo
|
||||
append: yes
|
||||
|
||||
- name: Allow password-less sudo to "{{ distro_user }}"
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
- name: set_fact distro_setup
|
||||
set_fact:
|
||||
distro_setup: "{{ distro_settings[node_distro] }}"
|
||||
|
@ -18,7 +19,7 @@
|
|||
state: started
|
||||
hostname: "{{ item }}"
|
||||
command: "{{ distro_init }}"
|
||||
#recreate: yes
|
||||
# recreate: yes
|
||||
privileged: true
|
||||
tmpfs:
|
||||
- /sys/module/nf_conntrack/parameters
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
|
||||
- name: Upgrade all packages to the latest version (yum)
|
||||
yum:
|
||||
name: '*'
|
||||
state: latest
|
||||
name: '*'
|
||||
state: latest
|
||||
when: ansible_os_family == "RedHat"
|
||||
|
||||
- name: Install required packages
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
vars:
|
||||
ansible_ssh_pipelining: false
|
||||
roles:
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
|
||||
- hosts: all
|
||||
gather_facts: true
|
||||
|
@ -22,4 +22,3 @@
|
|||
- hosts: kube-master[0]
|
||||
roles:
|
||||
- { role: kubernetes-pv }
|
||||
|
||||
|
|
|
@ -22,9 +22,9 @@ galaxy_info:
|
|||
- wheezy
|
||||
- jessie
|
||||
galaxy_tags:
|
||||
- system
|
||||
- networking
|
||||
- cloud
|
||||
- clustering
|
||||
- files
|
||||
- sharing
|
||||
- system
|
||||
- networking
|
||||
- cloud
|
||||
- clustering
|
||||
- files
|
||||
- sharing
|
||||
|
|
|
@ -12,5 +12,5 @@
|
|||
- name: Ensure Gluster mount directories exist.
|
||||
file: "path={{ item }} state=directory mode=0775"
|
||||
with_items:
|
||||
- "{{ gluster_mount_dir }}"
|
||||
- "{{ gluster_mount_dir }}"
|
||||
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
|
||||
|
|
|
@ -22,9 +22,9 @@ galaxy_info:
|
|||
- wheezy
|
||||
- jessie
|
||||
galaxy_tags:
|
||||
- system
|
||||
- networking
|
||||
- cloud
|
||||
- clustering
|
||||
- files
|
||||
- sharing
|
||||
- system
|
||||
- networking
|
||||
- cloud
|
||||
- clustering
|
||||
- files
|
||||
- sharing
|
||||
|
|
|
@ -33,18 +33,18 @@
|
|||
- name: Ensure Gluster brick and mount directories exist.
|
||||
file: "path={{ item }} state=directory mode=0775"
|
||||
with_items:
|
||||
- "{{ gluster_brick_dir }}"
|
||||
- "{{ gluster_mount_dir }}"
|
||||
- "{{ gluster_brick_dir }}"
|
||||
- "{{ gluster_mount_dir }}"
|
||||
|
||||
- name: Configure Gluster volume.
|
||||
gluster_volume:
|
||||
state: present
|
||||
name: "{{ gluster_brick_name }}"
|
||||
brick: "{{ gluster_brick_dir }}"
|
||||
replicas: "{{ groups['gfs-cluster'] | length }}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
host: "{{ inventory_hostname }}"
|
||||
force: yes
|
||||
state: present
|
||||
name: "{{ gluster_brick_name }}"
|
||||
brick: "{{ gluster_brick_dir }}"
|
||||
replicas: "{{ groups['gfs-cluster'] | length }}"
|
||||
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||
host: "{{ inventory_hostname }}"
|
||||
force: yes
|
||||
run_once: true
|
||||
|
||||
- name: Mount glusterfs to retrieve disk size
|
||||
|
@ -63,13 +63,13 @@
|
|||
|
||||
- name: Set Gluster disk size to variable
|
||||
set_fact:
|
||||
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024*1024*1024)) | int }}"
|
||||
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024*1024*1024)) | int }}"
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Create file on GlusterFS
|
||||
template:
|
||||
dest: "{{ gluster_mount_dir }}/.test-file.txt"
|
||||
src: test-file.txt
|
||||
dest: "{{ gluster_mount_dir }}/.test-file.txt"
|
||||
src: test-file.txt
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
- name: Unmount glusterfs
|
||||
|
@ -79,4 +79,3 @@
|
|||
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
||||
state: unmounted
|
||||
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||
|
||||
|
|
|
@ -2,9 +2,9 @@
|
|||
- name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV
|
||||
template: src={{item.file}} dest={{kube_config_dir}}/{{item.dest}}
|
||||
with_items:
|
||||
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
|
||||
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
|
||||
register: gluster_pv
|
||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||
|
||||
|
|
|
@ -1,2 +1,3 @@
|
|||
---
|
||||
dependencies:
|
||||
- {role: kubernetes-pv/ansible, tags: apps}
|
||||
|
|
|
@ -2,23 +2,23 @@
|
|||
- name: "Load lvm kernel modules"
|
||||
become: true
|
||||
with_items:
|
||||
- "dm_snapshot"
|
||||
- "dm_mirror"
|
||||
- "dm_thin_pool"
|
||||
- "dm_snapshot"
|
||||
- "dm_mirror"
|
||||
- "dm_thin_pool"
|
||||
modprobe:
|
||||
name: "{{ item }}"
|
||||
state: "present"
|
||||
name: "{{ item }}"
|
||||
state: "present"
|
||||
|
||||
- name: "Install glusterfs mount utils (RedHat)"
|
||||
become: true
|
||||
yum:
|
||||
name: "glusterfs-fuse"
|
||||
state: "present"
|
||||
name: "glusterfs-fuse"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'RedHat'"
|
||||
|
||||
- name: "Install glusterfs mount utils (Debian)"
|
||||
become: true
|
||||
apt:
|
||||
name: "glusterfs-client"
|
||||
state: "present"
|
||||
name: "glusterfs-client"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'Debian'"
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Bootstrap heketi
|
||||
- name: "Get state of heketi service, deployment and pods."
|
||||
register: "initial_heketi_state"
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
||||
command: "{{ bin_dir }}/kubectl get services,deployments,pods --selector=deploy-heketi --output=json"
|
||||
until:
|
||||
- "initial_heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
||||
- "initial_heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
||||
- "initial_heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
||||
- "initial_heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
||||
retries: 60
|
||||
delay: 5
|
||||
|
|
|
@ -38,4 +38,4 @@
|
|||
vars: { volume: "{{ volume_information.stdout|from_json }}" }
|
||||
when: "volume.name == 'heketidbstorage'"
|
||||
- name: "Ensure heketi database volume exists."
|
||||
assert: { that: "heketi_database_volume_created is defined" , msg: "Heketi database volume does not exist." }
|
||||
assert: { that: "heketi_database_volume_created is defined", msg: "Heketi database volume does not exist." }
|
||||
|
|
|
@ -18,8 +18,8 @@
|
|||
deployments_query: "items[?kind=='Deployment'].status.conditions|[0][?type=='Available'].status|[0]"
|
||||
command: "{{ bin_dir }}/kubectl get deployments,pods --selector=glusterfs --output=json"
|
||||
until:
|
||||
- "heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
||||
- "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
||||
- "heketi_state.stdout|from_json|json_query(pods_query) == 'True'"
|
||||
- "heketi_state.stdout|from_json|json_query(deployments_query) == 'True'"
|
||||
retries: 60
|
||||
delay: 5
|
||||
- set_fact:
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
register: "heketi_service"
|
||||
changed_when: false
|
||||
- name: "Ensure heketi service is available."
|
||||
assert: { that: "heketi_service.stdout != \"\"" }
|
||||
assert: { that: "heketi_service.stdout != \"\"" }
|
||||
- name: "Render storage class configuration."
|
||||
become: true
|
||||
vars:
|
||||
|
|
|
@ -2,15 +2,15 @@
|
|||
- name: "Install lvm utils (RedHat)"
|
||||
become: true
|
||||
yum:
|
||||
name: "lvm2"
|
||||
state: "present"
|
||||
name: "lvm2"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'RedHat'"
|
||||
|
||||
- name: "Install lvm utils (Debian)"
|
||||
become: true
|
||||
apt:
|
||||
name: "lvm2"
|
||||
state: "present"
|
||||
name: "lvm2"
|
||||
state: "present"
|
||||
when: "ansible_os_family == 'Debian'"
|
||||
|
||||
- name: "Get volume group information."
|
||||
|
@ -34,13 +34,13 @@
|
|||
- name: "Remove lvm utils (RedHat)"
|
||||
become: true
|
||||
yum:
|
||||
name: "lvm2"
|
||||
state: "absent"
|
||||
name: "lvm2"
|
||||
state: "absent"
|
||||
when: "ansible_os_family == 'RedHat'"
|
||||
|
||||
- name: "Remove lvm utils (Debian)"
|
||||
become: true
|
||||
apt:
|
||||
name: "lvm2"
|
||||
state: "absent"
|
||||
name: "lvm2"
|
||||
state: "absent"
|
||||
when: "ansible_os_family == 'Debian'"
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
vault_deployment_type: docker
|
||||
vault_binary_checksum: 3c4d70ba71619a43229e65c67830e30e050eab7a81ac6b28325ff707e5914188
|
||||
vault_version: 0.10.1
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
#---
|
||||
#peers:
|
||||
# -router_id: "10.99.0.34"
|
||||
# as: "65xxx"
|
||||
# - router_id: "10.99.0.35"
|
||||
# as: "65xxx"
|
||||
#
|
||||
#loadbalancer_apiserver:
|
||||
# address: "10.99.0.44"
|
||||
# port: "8383"
|
||||
# ---
|
||||
# peers:
|
||||
# - router_id: "10.99.0.34"
|
||||
# as: "65xxx"
|
||||
# - router_id: "10.99.0.35"
|
||||
# as: "65xxx"
|
||||
|
||||
# loadbalancer_apiserver:
|
||||
# address: "10.99.0.44"
|
||||
# port: "8383"
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
#---
|
||||
#peers:
|
||||
# -router_id: "10.99.0.2"
|
||||
# as: "65xxx"
|
||||
# - router_id: "10.99.0.3"
|
||||
# as: "65xxx"
|
||||
#
|
||||
#loadbalancer_apiserver:
|
||||
# address: "10.99.0.21"
|
||||
# port: "8383"
|
||||
# ---
|
||||
# peers:
|
||||
# - router_id: "10.99.0.2"
|
||||
# as: "65xxx"
|
||||
# - router_id: "10.99.0.3"
|
||||
# as: "65xxx"
|
||||
|
||||
# loadbalancer_apiserver:
|
||||
# address: "10.99.0.21"
|
||||
# port: "8383"
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
### NOTE: This playbook cannot be used to deploy any new nodes to the cluster.
|
||||
### Additional information:
|
||||
### * Will not upgrade etcd
|
||||
|
@ -38,8 +39,8 @@
|
|||
- { role: kubespray-defaults}
|
||||
- { role: kubernetes/preinstall, tags: preinstall }
|
||||
|
||||
#Handle upgrades to master components first to maintain backwards compat.
|
||||
- hosts: kube-master
|
||||
- name: Handle upgrades to master components first to maintain backwards compat.
|
||||
hosts: kube-master
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
serial: 1
|
||||
roles:
|
||||
|
@ -51,8 +52,8 @@
|
|||
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
||||
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
||||
|
||||
#Finally handle worker upgrades, based on given batch size
|
||||
- hosts: kube-node:!kube-master
|
||||
- name: Finally handle worker upgrades, based on given batch size
|
||||
hosts: kube-node:!kube-master
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
serial: "{{ serial | default('20%') }}"
|
||||
roles:
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
## Directory where etcd data stored
|
||||
etcd_data_dir: /var/lib/etcd
|
||||
|
||||
|
@ -9,17 +10,17 @@ bin_dir: /usr/local/bin
|
|||
## this node for example. The access_ip is really useful AWS and Google
|
||||
## environments where the nodes are accessed remotely by the "public" ip,
|
||||
## but don't know about that address themselves.
|
||||
#access_ip: 1.1.1.1
|
||||
# access_ip: 1.1.1.1
|
||||
|
||||
|
||||
## External LB example config
|
||||
## apiserver_loadbalancer_domain_name: "elb.some.domain"
|
||||
#loadbalancer_apiserver:
|
||||
# address: 1.2.3.4
|
||||
# port: 1234
|
||||
# loadbalancer_apiserver:
|
||||
# address: 1.2.3.4
|
||||
# port: 1234
|
||||
|
||||
## Internal loadbalancers for apiservers
|
||||
#loadbalancer_apiserver_localhost: true
|
||||
# loadbalancer_apiserver_localhost: true
|
||||
|
||||
## Local loadbalancer should use this port
|
||||
## And must be set port 6443
|
||||
|
@ -32,12 +33,12 @@ nginx_kube_apiserver_healthcheck_port: 8081
|
|||
## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
|
||||
## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
|
||||
## modules.
|
||||
#kubelet_load_modules: false
|
||||
# kubelet_load_modules: false
|
||||
|
||||
## Upstream dns servers used by dnsmasq
|
||||
#upstream_dns_servers:
|
||||
# - 8.8.8.8
|
||||
# - 8.8.4.4
|
||||
# upstream_dns_servers:
|
||||
# - 8.8.8.8
|
||||
# - 8.8.4.4
|
||||
|
||||
## There are some changes specific to the cloud providers
|
||||
## for instance we need to encapsulate packets with some network plugins
|
||||
|
@ -46,43 +47,43 @@ nginx_kube_apiserver_healthcheck_port: 8081
|
|||
## like you would do when using openstack-client before starting the playbook.
|
||||
## Note: The 'external' cloud provider is not supported.
|
||||
## TODO(riverzhang): https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager
|
||||
#cloud_provider:
|
||||
# cloud_provider:
|
||||
|
||||
## Set these proxy values in order to update package manager and docker daemon to use proxies
|
||||
#http_proxy: ""
|
||||
#https_proxy: ""
|
||||
# http_proxy: ""
|
||||
# https_proxy: ""
|
||||
|
||||
## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
|
||||
#no_proxy: ""
|
||||
# no_proxy: ""
|
||||
|
||||
## Some problems may occur when downloading files over https proxy due to ansible bug
|
||||
## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable
|
||||
## SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
|
||||
#download_validate_certs: False
|
||||
# download_validate_certs: False
|
||||
|
||||
## If you need exclude all cluster nodes from proxy and other resources, add other resources here.
|
||||
#additional_no_proxy: ""
|
||||
# additional_no_proxy: ""
|
||||
|
||||
## Certificate Management
|
||||
## This setting determines whether certs are generated via scripts.
|
||||
## Chose 'none' if you provide your own certificates.
|
||||
## Option is "script", "none"
|
||||
## note: vault is removed
|
||||
#cert_management: script
|
||||
# cert_management: script
|
||||
|
||||
## Set to true to allow pre-checks to fail and continue deployment
|
||||
#ignore_assert_errors: false
|
||||
# ignore_assert_errors: false
|
||||
|
||||
## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
|
||||
#kube_read_only_port: 10255
|
||||
# kube_read_only_port: 10255
|
||||
|
||||
## Set true to download and cache container
|
||||
#download_container: true
|
||||
# download_container: true
|
||||
|
||||
## Deploy container engine
|
||||
# Set false if you want to deploy container engine manually.
|
||||
#deploy_container_engine: true
|
||||
# deploy_container_engine: true
|
||||
|
||||
## Set Pypi repo and cert accordingly
|
||||
#pyrepo_index: https://pypi.example.com/simple
|
||||
#pyrepo_cert: /etc/ssl/certs/ca-certificates.crt
|
||||
# pyrepo_index: https://pypi.example.com/simple
|
||||
# pyrepo_cert: /etc/ssl/certs/ca-certificates.crt
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
## When azure is used, you need to also set the following variables.
|
||||
## see docs/azure.md for details on how to get these values
|
||||
|
||||
#azure_tenant_id:
|
||||
#azure_subscription_id:
|
||||
#azure_aad_client_id:
|
||||
#azure_aad_client_secret:
|
||||
#azure_resource_group:
|
||||
#azure_location:
|
||||
#azure_subnet_name:
|
||||
#azure_security_group_name:
|
||||
#azure_vnet_name:
|
||||
#azure_vnet_resource_group:
|
||||
#azure_route_table_name:
|
||||
# azure_tenant_id:
|
||||
# azure_subscription_id:
|
||||
# azure_aad_client_id:
|
||||
# azure_aad_client_secret:
|
||||
# azure_resource_group:
|
||||
# azure_location:
|
||||
# azure_subnet_name:
|
||||
# azure_security_group_name:
|
||||
# azure_vnet_name:
|
||||
# azure_vnet_resource_group:
|
||||
# azure_route_table_name:
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
## Does coreos need auto upgrade, default is true
|
||||
#coreos_auto_upgrade: true
|
||||
# coreos_auto_upgrade: true
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
---
|
||||
## Uncomment this if you want to force overlay/overlay2 as docker storage driver
|
||||
## Please note that overlay2 is only supported on newer kernels
|
||||
#docker_storage_options: -s overlay2
|
||||
# docker_storage_options: -s overlay2
|
||||
|
||||
## Enable docker_container_storage_setup, it will configure devicemapper driver on Centos7 or RedHat7.
|
||||
docker_container_storage_setup: false
|
||||
|
||||
## It must be define a disk path for docker_container_storage_setup_devs.
|
||||
## Otherwise docker-storage-setup will be executed incorrectly.
|
||||
#docker_container_storage_setup_devs: /dev/vdb
|
||||
# docker_container_storage_setup_devs: /dev/vdb
|
||||
|
||||
## Uncomment this if you have more than 3 nameservers, then we'll only use the first 3.
|
||||
docker_dns_servers_strict: false
|
||||
|
@ -32,12 +33,12 @@ docker_rpm_keepcache: 0
|
|||
## An obvious use case is allowing insecure-registry access to self hosted registries.
|
||||
## Can be ipaddress and domain_name.
|
||||
## example define 172.19.16.11 or mirror.registry.io
|
||||
#docker_insecure_registries:
|
||||
# docker_insecure_registries:
|
||||
# - mirror.registry.io
|
||||
# - 172.19.16.11
|
||||
|
||||
## Add other registry,example China registry mirror.
|
||||
#docker_registry_mirrors:
|
||||
# docker_registry_mirrors:
|
||||
# - https://registry.docker-cn.com
|
||||
# - https://mirror.aliyuncs.com
|
||||
|
||||
|
@ -46,7 +47,7 @@ docker_rpm_keepcache: 0
|
|||
## or private, which control whether mounts in the file system
|
||||
## namespace set up for docker will receive or propagate mounts
|
||||
## and unmounts. Leave empty for system default
|
||||
#docker_mount_flags:
|
||||
# docker_mount_flags:
|
||||
|
||||
## A string of extra options to pass to the docker daemon.
|
||||
## This string should be exactly as you wish it to appear.
|
||||
|
|
|
@ -1,28 +1,28 @@
|
|||
## When Oracle Cloud Infrastructure is used, set these variables
|
||||
#oci_private_key:
|
||||
#oci_region_id:
|
||||
#oci_tenancy_id:
|
||||
#oci_user_id:
|
||||
#oci_user_fingerprint:
|
||||
#oci_compartment_id:
|
||||
#oci_vnc_id:
|
||||
#oci_subnet1_id:
|
||||
#oci_subnet2_id:
|
||||
# oci_private_key:
|
||||
# oci_region_id:
|
||||
# oci_tenancy_id:
|
||||
# oci_user_id:
|
||||
# oci_user_fingerprint:
|
||||
# oci_compartment_id:
|
||||
# oci_vnc_id:
|
||||
# oci_subnet1_id:
|
||||
# oci_subnet2_id:
|
||||
## Overide these default/optional behaviors if you wish
|
||||
#oci_security_list_management: All
|
||||
# If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples.
|
||||
#oci_security_lists:
|
||||
#ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
|
||||
#ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
|
||||
# If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint
|
||||
#oci_use_instance_principals: false
|
||||
#oci_cloud_controller_version: 0.6.0
|
||||
# If you would like to control OCI query rate limits for the controller
|
||||
#oci_rate_limit:
|
||||
#rate_limit_qps_read:
|
||||
#rate_limit_qps_write:
|
||||
#rate_limit_bucket_read:
|
||||
#rate_limit_bucket_write:
|
||||
# Other optional variables
|
||||
#oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci)
|
||||
#oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above)
|
||||
# oci_security_list_management: All
|
||||
## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples.
|
||||
# oci_security_lists:
|
||||
# ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
|
||||
# ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
|
||||
## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint
|
||||
# oci_use_instance_principals: false
|
||||
# oci_cloud_controller_version: 0.6.0
|
||||
## If you would like to control OCI query rate limits for the controller
|
||||
# oci_rate_limit:
|
||||
# rate_limit_qps_read:
|
||||
# rate_limit_qps_write:
|
||||
# rate_limit_bucket_read:
|
||||
# rate_limit_bucket_write:
|
||||
## Other optional variables
|
||||
# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci)
|
||||
# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above)
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
|
||||
#openstack_blockstorage_version: "v1/v2/auto (default)"
|
||||
#openstack_blockstorage_ignore_volume_az: yes
|
||||
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
|
||||
#openstack_lbaas_enabled: True
|
||||
#openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
|
||||
## To enable automatic floating ip provisioning, specify a subnet.
|
||||
#openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
|
||||
## Override default LBaaS behavior
|
||||
#openstack_lbaas_use_octavia: False
|
||||
#openstack_lbaas_method: "ROUND_ROBIN"
|
||||
#openstack_lbaas_provider: "haproxy"
|
||||
#openstack_lbaas_create_monitor: "yes"
|
||||
#openstack_lbaas_monitor_delay: "1m"
|
||||
#openstack_lbaas_monitor_timeout: "30s"
|
||||
#openstack_lbaas_monitor_max_retries: "3"
|
||||
# # When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
|
||||
# openstack_blockstorage_version: "v1/v2/auto (default)"
|
||||
# openstack_blockstorage_ignore_volume_az: yes
|
||||
# # When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
|
||||
# openstack_lbaas_enabled: True
|
||||
# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP"
|
||||
# # To enable automatic floating ip provisioning, specify a subnet.
|
||||
# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default"
|
||||
# # Override default LBaaS behavior
|
||||
# openstack_lbaas_use_octavia: False
|
||||
# openstack_lbaas_method: "ROUND_ROBIN"
|
||||
# openstack_lbaas_provider: "haproxy"
|
||||
# openstack_lbaas_create_monitor: "yes"
|
||||
# openstack_lbaas_monitor_delay: "1m"
|
||||
# openstack_lbaas_monitor_timeout: "30s"
|
||||
# openstack_lbaas_monitor_max_retries: "3"
|
||||
|
|
|
@ -1,18 +1,18 @@
|
|||
## Etcd auto compaction retention for mvcc key value store in hour
|
||||
#etcd_compaction_retention: 0
|
||||
# etcd_compaction_retention: 0
|
||||
|
||||
## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics.
|
||||
#etcd_metrics: basic
|
||||
# etcd_metrics: basic
|
||||
|
||||
## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing.
|
||||
## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM.
|
||||
#etcd_memory_limit: "512M"
|
||||
# etcd_memory_limit: "512M"
|
||||
|
||||
## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than
|
||||
## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check
|
||||
## etcd documentation for more information.
|
||||
#etcd_quota_backend_bytes: "2G"
|
||||
# etcd_quota_backend_bytes: "2G"
|
||||
|
||||
### ETCD: disable peer client cert authentication.
|
||||
# This affects ETCD_PEER_CLIENT_CERT_AUTH variable
|
||||
#etcd_peer_client_auth: true
|
||||
# etcd_peer_client_auth: true
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Kubernetes dashboard
|
||||
# RBAC required. see docs/getting-started.md for access details.
|
||||
dashboard_enabled: true
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Kubernetes configuration dirs and system namespace.
|
||||
# Those are where all the additional config stuff goes
|
||||
# the kubernetes normally puts in /srv/kubernetes.
|
||||
|
@ -51,9 +52,9 @@ kube_users:
|
|||
- system:masters
|
||||
|
||||
## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
|
||||
#kube_oidc_auth: false
|
||||
#kube_basic_auth: false
|
||||
#kube_token_auth: false
|
||||
# kube_oidc_auth: false
|
||||
# kube_basic_auth: false
|
||||
# kube_token_auth: false
|
||||
|
||||
|
||||
## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
|
||||
|
@ -91,10 +92,10 @@ kube_network_node_prefix: 24
|
|||
|
||||
# The port the API Server will be listening on.
|
||||
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
|
||||
kube_apiserver_port: 6443 # (https)
|
||||
#kube_apiserver_insecure_port: 8080 # (http)
|
||||
kube_apiserver_port: 6443 # (https)
|
||||
# kube_apiserver_insecure_port: 8080 # (http)
|
||||
# Set to 0 to disable insecure port - Requires RBAC in authorization_modes and kube_api_anonymous_auth: true
|
||||
kube_apiserver_insecure_port: 0 # (disabled)
|
||||
kube_apiserver_insecure_port: 0 # (disabled)
|
||||
|
||||
# Kube-proxy proxyMode configuration.
|
||||
# Can be ipvs, iptables
|
||||
|
@ -112,11 +113,11 @@ kube_proxy_nodeport_addresses: >-
|
|||
{%- endif -%}
|
||||
|
||||
# If non-empty, will use this string as identification instead of the actual hostname
|
||||
#kube_override_hostname: >-
|
||||
# {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
|
||||
# {%- else -%}
|
||||
# {{ inventory_hostname }}
|
||||
# {%- endif -%}
|
||||
# kube_override_hostname: >-
|
||||
# {%- if cloud_provider is defined and cloud_provider in [ 'aws' ] -%}
|
||||
# {%- else -%}
|
||||
# {{ inventory_hostname }}
|
||||
# {%- endif -%}
|
||||
|
||||
## Encrypting Secret Data at Rest (experimental)
|
||||
kube_encrypt_secret_data: false
|
||||
|
@ -129,7 +130,7 @@ ndots: 2
|
|||
# Can be dnsmasq_kubedns, kubedns, coredns, coredns_dual, manual or none
|
||||
dns_mode: coredns
|
||||
# Set manual server if using a custom cluster DNS server
|
||||
#manual_dns_server: 10.x.x.x
|
||||
# manual_dns_server: 10.x.x.x
|
||||
# Enable nodelocal dns cache
|
||||
enable_nodelocaldns: False
|
||||
nodelocaldns_ip: 169.254.25.10
|
||||
|
@ -163,7 +164,7 @@ kubernetes_audit: false
|
|||
dynamic_kubelet_configuration: false
|
||||
|
||||
# define kubelet config dir for dynamic kubelet
|
||||
#kubelet_config_dir:
|
||||
# kubelet_config_dir:
|
||||
default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
|
||||
dynamic_kubelet_configuration_dir: "{{ kubelet_config_dir | default(default_kubelet_config_dir) }}"
|
||||
|
||||
|
@ -177,8 +178,8 @@ podsecuritypolicy_enabled: false
|
|||
|
||||
# dnsmasq
|
||||
# dnsmasq_upstream_dns_servers:
|
||||
# - /resolvethiszone.with/10.0.4.250
|
||||
# - 8.8.8.8
|
||||
# - /resolvethiszone.with/10.0.4.250
|
||||
# - 8.8.8.8
|
||||
|
||||
# Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
|
||||
# kubelet_cgroups_per_qos: true
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
## With calico it is possible to distributed routes with border routers of the datacenter.
|
||||
## Warning : enabling router peering will disable calico's default behavior ('node mesh').
|
||||
## The subnets of each nodes will be distributed by the datacenter router
|
||||
#peer_with_router: false
|
||||
# peer_with_router: false
|
||||
|
||||
# Enables Internet connectivity from containers
|
||||
# nat_outgoing: true
|
||||
|
|
|
@ -8,4 +8,3 @@
|
|||
# Whether or not to masquerade traffic to destinations not within
|
||||
# the pod network.
|
||||
# canal_masquerade: "true"
|
||||
|
||||
|
|
|
@ -6,9 +6,9 @@
|
|||
## With contiv, L3 BGP mode is possible by setting contiv_fwd_mode to "routing".
|
||||
## In this case, you may need to peer with an uplink
|
||||
## NB: The hostvars must contain a key "contiv" of which value is a dict containing "router_ip", "as"(defaults to contiv_global_as), "neighbor_as" (defaults to contiv_global_neighbor_as), "neighbor"
|
||||
#contiv_peer_with_uplink_leaf: false
|
||||
#contiv_global_as: "65002"
|
||||
#contiv_global_neighbor_as: "500"
|
||||
# contiv_peer_with_uplink_leaf: false
|
||||
# contiv_global_as: "65002"
|
||||
# contiv_global_neighbor_as: "500"
|
||||
|
||||
# Fabric mode: aci, aci-opflex or default
|
||||
# contiv_fabric_mode: default
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
- hosts: localhost
|
||||
strategy: linear
|
||||
vars:
|
||||
|
|
20
scale.yml
20
scale.yml
|
@ -20,8 +20,8 @@
|
|||
- { role: kubespray-defaults}
|
||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||
|
||||
##Bootstrap any new workers
|
||||
- hosts: kube-node
|
||||
- name: Bootstrap any new workers
|
||||
hosts: kube-node
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
gather_facts: false
|
||||
vars:
|
||||
|
@ -30,22 +30,22 @@
|
|||
- { role: kubespray-defaults}
|
||||
- { role: bootstrap-os, tags: bootstrap-os}
|
||||
|
||||
##We still have to gather facts about our masters and etcd nodes
|
||||
- hosts: k8s-cluster:etcd:calico-rr
|
||||
- name: Gather facts about our masters and etcd nodes
|
||||
hosts: k8s-cluster:etcd:calico-rr
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
vars:
|
||||
ansible_ssh_pipelining: true
|
||||
gather_facts: true
|
||||
|
||||
##We need to generate the etcd certificates beforhand
|
||||
- hosts: etcd
|
||||
- name: Generate the etcd certificates beforehand
|
||||
hosts: etcd
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
- { role: etcd, tags: etcd, etcd_cluster_setup: false }
|
||||
- { role: kubespray-defaults}
|
||||
- { role: etcd, tags: etcd, etcd_cluster_setup: false }
|
||||
|
||||
##Target only workers to get kubelet installed and checking in on any new nodes
|
||||
- hosts: kube-node
|
||||
- name: Target only workers to get kubelet installed and checking in on any new nodes
|
||||
hosts: kube-node
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
roles:
|
||||
- { role: kubespray-defaults}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
- hosts: image-builder
|
||||
gather_facts: false
|
||||
roles:
|
||||
- kubevirt-images
|
||||
---
|
||||
- hosts: image-builder
|
||||
gather_facts: false
|
||||
roles:
|
||||
- kubevirt-images
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
images_dir: /images/base
|
||||
|
||||
docker_user: kubespray+buildvmimages
|
||||
|
@ -5,32 +6,32 @@ docker_host: quay.io
|
|||
registry: quay.io/kubespray
|
||||
|
||||
images:
|
||||
ubuntu-1604:
|
||||
filename: xenial-server-cloudimg-amd64-disk1.img
|
||||
url: https://storage.googleapis.com/kubespray-images/ubuntu/xenial-server-cloudimg-amd64-disk1.img
|
||||
checksum: sha256:c0d099383cd064390b568e20d1c39a9c68ba864764404b70f754a7b1b2f808f7
|
||||
converted: false
|
||||
ubuntu-1604:
|
||||
filename: xenial-server-cloudimg-amd64-disk1.img
|
||||
url: https://storage.googleapis.com/kubespray-images/ubuntu/xenial-server-cloudimg-amd64-disk1.img
|
||||
checksum: sha256:c0d099383cd064390b568e20d1c39a9c68ba864764404b70f754a7b1b2f808f7
|
||||
converted: false
|
||||
|
||||
ubuntu-1804:
|
||||
filename: bionic-server-cloudimg-amd64.img
|
||||
url: https://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-amd64.img
|
||||
checksum: sha256:c3d0e03f4245ffaabd7647e6dabf346b944a62b9934d0a89f3a04b4236386af2
|
||||
converted: false
|
||||
ubuntu-1804:
|
||||
filename: bionic-server-cloudimg-amd64.img
|
||||
url: https://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-amd64.img
|
||||
checksum: sha256:c3d0e03f4245ffaabd7647e6dabf346b944a62b9934d0a89f3a04b4236386af2
|
||||
converted: false
|
||||
|
||||
fedora-28:
|
||||
filename: Fedora-Cloud-Base-28-1.1.x86_64.qcow2
|
||||
url: https://mirror.netsite.dk/fedora/linux/releases/28/Cloud/x86_64/images/Fedora-Cloud-Base-28-1.1.x86_64.qcow2
|
||||
checksum: sha256:d987209719fadaf81b8bff85c5d3590a1d3dac6357e4838fde8357086c49b5b4
|
||||
converted: true
|
||||
fedora-28:
|
||||
filename: Fedora-Cloud-Base-28-1.1.x86_64.qcow2
|
||||
url: https://mirror.netsite.dk/fedora/linux/releases/28/Cloud/x86_64/images/Fedora-Cloud-Base-28-1.1.x86_64.qcow2
|
||||
checksum: sha256:d987209719fadaf81b8bff85c5d3590a1d3dac6357e4838fde8357086c49b5b4
|
||||
converted: true
|
||||
|
||||
centos-7:
|
||||
filename: CentOS-7-x86_64-GenericCloud-1809.qcow2
|
||||
url: http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1809.qcow2
|
||||
checksum: sha256:42c062df8a8c36991ec0282009dd52ac488461a3f7ee114fc21a765bfc2671c2
|
||||
converted: true
|
||||
centos-7:
|
||||
filename: CentOS-7-x86_64-GenericCloud-1809.qcow2
|
||||
url: http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1809.qcow2
|
||||
checksum: sha256:42c062df8a8c36991ec0282009dd52ac488461a3f7ee114fc21a765bfc2671c2
|
||||
converted: true
|
||||
|
||||
debian-9:
|
||||
filename: debian-9-openstack-amd64.qcow2
|
||||
url: https://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
|
||||
checksum: sha256:01d9345ba7a6523d214d2eaabe07fe7b4b69b28e63d7a6b322521e99e5768719
|
||||
converted: true
|
||||
debian-9:
|
||||
filename: debian-9-openstack-amd64.qcow2
|
||||
url: https://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2
|
||||
checksum: sha256:01d9345ba7a6523d214d2eaabe07fe7b4b69b28e63d7a6b322521e99e5768719
|
||||
converted: true
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
cloud_image: ubuntu-16-04-x64
|
||||
cloud_region: nyc3
|
||||
mode: ha
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image_family: centos-7
|
||||
cloud_machine_type: "n1-standard-1"
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image_family: centos-7
|
||||
cloud_region: us-central1-c
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image_family: centos-7
|
||||
cloud_region: us-central1-c
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image_family: centos-7
|
||||
cloud_region: us-central1-c
|
||||
|
@ -15,7 +16,7 @@ deploy_netchecker: true
|
|||
dns_min_replicas: 1
|
||||
cloud_provider: gce
|
||||
kube_encrypt_secret_data: true
|
||||
#ingress_nginx_enabled: true
|
||||
# ingress_nginx_enabled: true
|
||||
cert_manager_enabled: true
|
||||
metrics_server_enabled: true
|
||||
kube_token_auth: true
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image_family: centos-7
|
||||
cloud_region: us-central1-c
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image_family: centos-7
|
||||
cloud_region: us-central1-c
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image_family: coreos-alpha
|
||||
cloud_region: us-central1-a
|
||||
|
@ -7,7 +8,7 @@ startup_script: 'systemctl disable locksmithd && systemctl stop locksmithd'
|
|||
|
||||
# Deployment settings
|
||||
kube_network_plugin: weave
|
||||
resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12
|
||||
resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12
|
||||
deploy_netchecker: true
|
||||
dns_min_replicas: 1
|
||||
cloud_provider: gce
|
||||
|
|
|
@ -1,15 +1,16 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image_family: coreos-stable
|
||||
cloud_region: us-central1-a
|
||||
cloud_machine_type: "n1-standard-2"
|
||||
mode: aio
|
||||
##user-data to simply turn off coreos upgrades
|
||||
## user-data to simply turn off coreos upgrades
|
||||
startup_script: 'systemctl disable locksmithd && systemctl stop locksmithd'
|
||||
|
||||
# Deployment settings
|
||||
no_group_vars: true
|
||||
kube_network_plugin: calico
|
||||
resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12
|
||||
resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12
|
||||
deploy_netchecker: true
|
||||
dns_min_replicas: 1
|
||||
cloud_provider: gce
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image_family: coreos-stable
|
||||
cloud_region: us-central1-c
|
||||
|
@ -6,7 +7,7 @@ startup_script: 'systemctl disable locksmithd && systemctl stop locksmithd'
|
|||
|
||||
# Deployment settings
|
||||
kube_network_plugin: canal
|
||||
resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12
|
||||
resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12
|
||||
deploy_netchecker: true
|
||||
dns_min_replicas: 1
|
||||
cloud_provider: gce
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image_family: coreos-stable
|
||||
cloud_region: us-central1-c
|
||||
|
@ -6,7 +7,7 @@ startup_script: 'systemctl disable locksmithd && systemctl stop locksmithd'
|
|||
|
||||
# Deployment settings
|
||||
kube_network_plugin: cilium
|
||||
resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12
|
||||
resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12
|
||||
deploy_netchecker: true
|
||||
enable_network_policy: true
|
||||
dns_min_replicas: 1
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image_family: coreos-stable
|
||||
cloud_region: us-central1-c
|
||||
|
@ -8,7 +9,7 @@ startup_script: 'systemctl disable locksmithd && systemctl stop locksmithd'
|
|||
# Deployment settings
|
||||
kube_network_plugin: kube-router
|
||||
bootstrap_os: coreos
|
||||
resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12
|
||||
resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12
|
||||
deploy_netchecker: true
|
||||
dns_min_replicas: 1
|
||||
cloud_provider: gce
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image: debian-9-kubespray
|
||||
cloud_region: us-central1-b
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image_family: opensuse-leap
|
||||
cloud_region: us-central1-c
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image_family: rhel-7
|
||||
cloud_region: us-central1-a
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image_family: rhel-7
|
||||
cloud_region: us-central1-b
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image_family: rhel-7
|
||||
cloud_region: us-central1-b
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image_family: ubuntu-1604-lts
|
||||
cloud_region: us-central1-b
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image_family: ubuntu-1604-lts
|
||||
cloud_image_family: ubuntu-1604-lts
|
||||
cloud_machine_type: "n1-standard-1"
|
||||
cloud_region: us-central1-c
|
||||
mode: ha
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image_family: ubuntu-1604-lts
|
||||
cloud_region: us-central1-b
|
||||
|
@ -9,4 +10,3 @@ deploy_netchecker: true
|
|||
enable_network_policy: true
|
||||
dns_min_replicas: 1
|
||||
cloud_provider: gce
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image_family: ubuntu-1604-lts
|
||||
cloud_region: us-central1-b
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image_family: ubuntu-1604-lts
|
||||
cloud_region: us-central1-b
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image_family: ubuntu-1604-lts
|
||||
cloud_region: us-central1-c
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image_family: ubuntu-1604-lts
|
||||
cloud_region: us-central1-c
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image_family: ubuntu-1604-lts
|
||||
cloud_region: us-central1-c
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Instance settings
|
||||
cloud_image_family: ubuntu-1804-lts
|
||||
cloud_region: us-central1-a
|
||||
|
|
|
@ -17,9 +17,9 @@
|
|||
shell: "{{bin_dir}}/kubectl get pods -n test"
|
||||
register: pods
|
||||
until:
|
||||
- '"ContainerCreating" not in pods.stdout'
|
||||
- '"Pending" not in pods.stdout'
|
||||
- '"Terminating" not in pods.stdout'
|
||||
- '"ContainerCreating" not in pods.stdout'
|
||||
- '"Pending" not in pods.stdout'
|
||||
- '"Terminating" not in pods.stdout'
|
||||
retries: 60
|
||||
delay: 2
|
||||
no_log: true
|
||||
|
@ -69,12 +69,12 @@
|
|||
shell: "{{bin_dir}}/kubectl exec {{item[0]}} -- ping -c 4 {{ item[1] }}"
|
||||
when: not item[0] in pods_hostnet and not item[1] in pods_hostnet
|
||||
with_nested:
|
||||
- "{{pod_names}}"
|
||||
- "{{pod_ips}}"
|
||||
- "{{pod_names}}"
|
||||
- "{{pod_ips}}"
|
||||
|
||||
- name: Ping between hostnet pods is working
|
||||
shell: "{{bin_dir}}/kubectl exec {{item[0]}} -- ping -c 4 {{ item[1] }}"
|
||||
when: item[0] in pods_hostnet and item[1] in pods_hostnet
|
||||
with_nested:
|
||||
- "{{pod_names}}"
|
||||
- "{{pod_ips}}"
|
||||
- "{{pod_names}}"
|
||||
- "{{pod_ips}}"
|
||||
|
|
|
@ -65,8 +65,8 @@
|
|||
- { role: kubespray-defaults}
|
||||
- { role: etcd, tags: etcd, etcd_cluster_setup: false }
|
||||
|
||||
#Handle upgrades to master components first to maintain backwards compat.
|
||||
- hosts: kube-master
|
||||
- name: Handle upgrades to master components first to maintain backwards compat.
|
||||
hosts: kube-master
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
serial: 1
|
||||
roles:
|
||||
|
@ -79,8 +79,8 @@
|
|||
- { role: upgrade/post-upgrade, tags: post-upgrade }
|
||||
environment: "{{proxy_env}}"
|
||||
|
||||
#Upgrade calico on all masters and nodes
|
||||
- hosts: kube-master:kube-node
|
||||
- name: Upgrade calico on all masters and nodes
|
||||
hosts: kube-master:kube-node
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
serial: "{{ serial | default('20%') }}"
|
||||
roles:
|
||||
|
@ -89,8 +89,8 @@
|
|||
- { role: kubernetes-apps/network_plugin, tags: network }
|
||||
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
||||
|
||||
#Finally handle worker upgrades, based on given batch size
|
||||
- hosts: kube-node:!kube-master
|
||||
- name: Finally handle worker upgrades, based on given batch size
|
||||
hosts: kube-node:!kube-master
|
||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||
serial: "{{ serial | default('20%') }}"
|
||||
roles:
|
||||
|
|
Loading…
Reference in a new issue