Yamllint fixes (#4410)

* Lint everything in the repository with yamllint

* yamllint fixes: syntax fixes only

* yamllint fixes: move comments to play names

* yamllint fixes: indent comments in .gitlab-ci.yml file
This commit is contained in:
MarkusTeufelberger 2019-04-01 11:38:33 +02:00 committed by Kubernetes Prow Robot
parent 483f1d2ca0
commit 9ffc65f8f3
73 changed files with 322 additions and 281 deletions

View file

@ -1,3 +1,4 @@
---
stages:
- unit-tests
- moderator
@ -727,7 +728,7 @@ yamllint:
<<: *job
stage: unit-tests
script:
- yamllint roles
- yamllint .
except: ['triggers', 'master']
tox-inventory-builder:

View file

@ -1 +1,2 @@
---
theme: jekyll-theme-slate

View file

@ -1,3 +1,4 @@
---
apiVersion: "2015-06-15"
virtualNetworkName: "{{ azure_virtual_network_name | default('KubeVNET') }}"
@ -34,4 +35,3 @@ imageReferenceJson: "{{imageReference|to_json}}"
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"

View file

@ -1,3 +1,4 @@
---
- set_fact:
base_dir: "{{playbook_dir}}/.generated/"

View file

@ -1,2 +1,3 @@
---
# See distro.yaml for supported node_distro images
node_distro: debian

View file

@ -1,3 +1,4 @@
---
distro_settings:
debian: &DEBIAN
image: "debian:9.5"

View file

@ -1,3 +1,4 @@
---
# kubespray-dind.yaml: minimal kubespray ansible playbook usable for DIND
# See contrib/dind/README.md
kube_api_anonymous_auth: true

View file

@ -1,3 +1,4 @@
---
- name: set_fact distro_setup
set_fact:
distro_setup: "{{ distro_settings[node_distro] }}"

View file

@ -1,3 +1,4 @@
---
- name: set_fact distro_setup
set_fact:
distro_setup: "{{ distro_settings[node_distro] }}"

View file

@ -22,4 +22,3 @@
- hosts: kube-master[0]
roles:
- { role: kubernetes-pv }

View file

@ -79,4 +79,3 @@
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
state: unmounted
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]

View file

@ -1,2 +1,3 @@
---
dependencies:
- {role: kubernetes-pv/ansible, tags: apps}

View file

@ -1,3 +1,4 @@
---
# Bootstrap heketi
- name: "Get state of heketi service, deployment and pods."
register: "initial_heketi_state"

View file

@ -1,3 +1,4 @@
---
vault_deployment_type: docker
vault_binary_checksum: 3c4d70ba71619a43229e65c67830e30e050eab7a81ac6b28325ff707e5914188
vault_version: 0.10.1

View file

@ -4,7 +4,7 @@
# as: "65xxx"
# - router_id: "10.99.0.35"
# as: "65xxx"
#
# loadbalancer_apiserver:
# address: "10.99.0.44"
# port: "8383"

View file

@ -4,7 +4,7 @@
# as: "65xxx"
# - router_id: "10.99.0.3"
# as: "65xxx"
#
# loadbalancer_apiserver:
# address: "10.99.0.21"
# port: "8383"

View file

@ -1,3 +1,4 @@
---
### NOTE: This playbook cannot be used to deploy any new nodes to the cluster.
### Additional information:
### * Will not upgrade etcd
@ -38,8 +39,8 @@
- { role: kubespray-defaults}
- { role: kubernetes/preinstall, tags: preinstall }
#Handle upgrades to master components first to maintain backwards compat.
- hosts: kube-master
- name: Handle upgrades to master components first to maintain backwards compat.
hosts: kube-master
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: 1
roles:
@ -51,8 +52,8 @@
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
- { role: upgrade/post-upgrade, tags: post-upgrade }
#Finally handle worker upgrades, based on given batch size
- hosts: kube-node:!kube-master
- name: Finally handle worker upgrades, based on given batch size
hosts: kube-node:!kube-master
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: "{{ serial | default('20%') }}"
roles:

View file

@ -1,3 +1,4 @@
---
## Directory where etcd data stored
etcd_data_dir: /var/lib/etcd

View file

@ -1,3 +1,4 @@
---
## Uncomment this if you want to force overlay/overlay2 as docker storage driver
## Please note that overlay2 is only supported on newer kernels
# docker_storage_options: -s overlay2

View file

@ -10,19 +10,19 @@
# oci_subnet2_id:
## Overide these default/optional behaviors if you wish
# oci_security_list_management: All
# If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples.
## If you would like the controller to manage specific lists per subnet. This is a mapping of subnet ocids to security list ocids. Below are examples.
# oci_security_lists:
# ocid1.subnet.oc1.phx.aaaaaaaasa53hlkzk6nzksqfccegk2qnkxmphkblst3riclzs4rhwg7rg57q: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
# ocid1.subnet.oc1.phx.aaaaaaaahuxrgvs65iwdz7ekwgg3l5gyah7ww5klkwjcso74u3e4i64hvtvq: ocid1.securitylist.oc1.iad.aaaaaaaaqti5jsfvyw6ejahh7r4okb2xbtuiuguswhs746mtahn72r7adt7q
# If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint
## If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint
# oci_use_instance_principals: false
# oci_cloud_controller_version: 0.6.0
# If you would like to control OCI query rate limits for the controller
## If you would like to control OCI query rate limits for the controller
# oci_rate_limit:
# rate_limit_qps_read:
# rate_limit_qps_write:
# rate_limit_bucket_read:
# rate_limit_bucket_write:
# Other optional variables
## Other optional variables
# oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci)
# oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above)

View file

@ -1,3 +1,4 @@
---
# Kubernetes dashboard
# RBAC required. see docs/getting-started.md for access details.
dashboard_enabled: true

View file

@ -1,3 +1,4 @@
---
# Kubernetes configuration dirs and system namespace.
# Those are where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernetes.

View file

@ -8,4 +8,3 @@
# Whether or not to masquerade traffic to destinations not within
# the pod network.
# canal_masquerade: "true"

View file

@ -1,3 +1,4 @@
---
- hosts: localhost
strategy: linear
vars:

View file

@ -20,8 +20,8 @@
- { role: kubespray-defaults}
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
##Bootstrap any new workers
- hosts: kube-node
- name: Bootstrap any new workers
hosts: kube-node
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
gather_facts: false
vars:
@ -30,22 +30,22 @@
- { role: kubespray-defaults}
- { role: bootstrap-os, tags: bootstrap-os}
##We still have to gather facts about our masters and etcd nodes
- hosts: k8s-cluster:etcd:calico-rr
- name: Gather facts about our masters and etcd nodes
hosts: k8s-cluster:etcd:calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
vars:
ansible_ssh_pipelining: true
gather_facts: true
##We need to generate the etcd certificates beforhand
- hosts: etcd
- name: Generate the etcd certificates beforehand
hosts: etcd
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}
- { role: etcd, tags: etcd, etcd_cluster_setup: false }
##Target only workers to get kubelet installed and checking in on any new nodes
- hosts: kube-node
- name: Target only workers to get kubelet installed and checking in on any new nodes
hosts: kube-node
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
roles:
- { role: kubespray-defaults}

View file

@ -1,3 +1,4 @@
---
- hosts: image-builder
gather_facts: false
roles:

View file

@ -1,3 +1,4 @@
---
images_dir: /images/base
docker_user: kubespray+buildvmimages

View file

@ -1,3 +1,4 @@
---
cloud_image: ubuntu-16-04-x64
cloud_region: nyc3
mode: ha

View file

@ -1,3 +1,4 @@
---
# Instance settings
cloud_image_family: centos-7
cloud_machine_type: "n1-standard-1"

View file

@ -1,3 +1,4 @@
---
# Instance settings
cloud_image_family: centos-7
cloud_region: us-central1-c

View file

@ -1,3 +1,4 @@
---
# Instance settings
cloud_image_family: centos-7
cloud_region: us-central1-c

View file

@ -1,3 +1,4 @@
---
# Instance settings
cloud_image_family: centos-7
cloud_region: us-central1-c

View file

@ -1,3 +1,4 @@
---
# Instance settings
cloud_image_family: centos-7
cloud_region: us-central1-c

View file

@ -1,3 +1,4 @@
---
# Instance settings
cloud_image_family: centos-7
cloud_region: us-central1-c

View file

@ -1,3 +1,4 @@
---
# Instance settings
cloud_image_family: coreos-alpha
cloud_region: us-central1-a

View file

@ -1,3 +1,4 @@
---
# Instance settings
cloud_image_family: coreos-stable
cloud_region: us-central1-a

View file

@ -1,3 +1,4 @@
---
# Instance settings
cloud_image_family: coreos-stable
cloud_region: us-central1-c

View file

@ -1,3 +1,4 @@
---
# Instance settings
cloud_image_family: coreos-stable
cloud_region: us-central1-c

View file

@ -1,3 +1,4 @@
---
# Instance settings
cloud_image_family: coreos-stable
cloud_region: us-central1-c

View file

@ -1,3 +1,4 @@
---
# Instance settings
cloud_image: debian-9-kubespray
cloud_region: us-central1-b

View file

@ -1,3 +1,4 @@
---
# Instance settings
cloud_image_family: opensuse-leap
cloud_region: us-central1-c

View file

@ -1,3 +1,4 @@
---
# Instance settings
cloud_image_family: rhel-7
cloud_region: us-central1-a

View file

@ -1,3 +1,4 @@
---
# Instance settings
cloud_image_family: rhel-7
cloud_region: us-central1-b

View file

@ -1,3 +1,4 @@
---
# Instance settings
cloud_image_family: rhel-7
cloud_region: us-central1-b

View file

@ -1,3 +1,4 @@
---
# Instance settings
cloud_image_family: ubuntu-1604-lts
cloud_region: us-central1-b

View file

@ -1,3 +1,4 @@
---
# Instance settings
cloud_image_family: ubuntu-1604-lts
cloud_machine_type: "n1-standard-1"

View file

@ -1,3 +1,4 @@
---
# Instance settings
cloud_image_family: ubuntu-1604-lts
cloud_region: us-central1-b
@ -9,4 +10,3 @@ deploy_netchecker: true
enable_network_policy: true
dns_min_replicas: 1
cloud_provider: gce

View file

@ -1,3 +1,4 @@
---
# Instance settings
cloud_image_family: ubuntu-1604-lts
cloud_region: us-central1-b

View file

@ -1,3 +1,4 @@
---
# Instance settings
cloud_image_family: ubuntu-1604-lts
cloud_region: us-central1-b

View file

@ -1,3 +1,4 @@
---
# Instance settings
cloud_image_family: ubuntu-1604-lts
cloud_region: us-central1-c

View file

@ -1,3 +1,4 @@
---
# Instance settings
cloud_image_family: ubuntu-1604-lts
cloud_region: us-central1-c

View file

@ -1,3 +1,4 @@
---
# Instance settings
cloud_image_family: ubuntu-1604-lts
cloud_region: us-central1-c

View file

@ -1,3 +1,4 @@
---
# Instance settings
cloud_image_family: ubuntu-1804-lts
cloud_region: us-central1-a

View file

@ -65,8 +65,8 @@
- { role: kubespray-defaults}
- { role: etcd, tags: etcd, etcd_cluster_setup: false }
#Handle upgrades to master components first to maintain backwards compat.
- hosts: kube-master
- name: Handle upgrades to master components first to maintain backwards compat.
hosts: kube-master
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: 1
roles:
@ -79,8 +79,8 @@
- { role: upgrade/post-upgrade, tags: post-upgrade }
environment: "{{proxy_env}}"
#Upgrade calico on all masters and nodes
- hosts: kube-master:kube-node
- name: Upgrade calico on all masters and nodes
hosts: kube-master:kube-node
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: "{{ serial | default('20%') }}"
roles:
@ -89,8 +89,8 @@
- { role: kubernetes-apps/network_plugin, tags: network }
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
#Finally handle worker upgrades, based on given batch size
- hosts: kube-node:!kube-master
- name: Finally handle worker upgrades, based on given batch size
hosts: kube-node:!kube-master
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
serial: "{{ serial | default('20%') }}"
roles: