[2.13] Backport recent bugfixes and mainly docker-cli issue (#6179)

* Reorder tests in packet file (#6067)

* Skip molecule tests for Ubuntu 18.04 (#6077)

* Fix kubernetes-dashboard template identation (#6066)

The 98e7a07fba commit udpates the
dashboard version to 2.0.0 but it enable skip login flag wasn't
updated. This change updates its identation to avoid issues when
dashboard_skip_login is enabled.

* Disable OVH CI (#6114)

* Create namespace when dashboard deployment uses customized namespace. (#6107)

* Create namespace when dashboard deployment uses customized namespace.

* Fix syntax.

* Fix apiserver port when upgrading (#6136)

* Fix docker fedora packages (#6097)

* Match docker-cli version with docker-engine version (when available)

* Disable upgrade jobs to allow release 2.13.1 (docker-cli bug)

Co-authored-by: Maxime Guyot <Miouge1@users.noreply.github.com>
Co-authored-by: Victor Morales <electrocucaracha@gmail.com>
Co-authored-by: petruha <5363545+p37ruh4@users.noreply.github.com>
Co-authored-by: Mateus Caruccio <mateus.caruccio@getupcloud.com>
This commit is contained in:
Florian Ruynat 2020-05-22 16:30:37 +02:00 committed by GitHub
parent 01dbc909be
commit 1f4ef0e86e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
16 changed files with 228 additions and 178 deletions

View file

@ -29,51 +29,87 @@ packet_centos7-flannel-containerd-addons-ha:
variables:
MITOGEN_ENABLE: "true"
packet_ubuntu18-crio:
packet_centos7-crio:
extends: .packet
stage: deploy-part2
when: on_success
variables:
MITOGEN_ENABLE: "true"
# ### MANUAL JOBS
packet_centos7-weave-upgrade-ha:
stage: deploy-part3
packet_ubuntu18-crio:
extends: .packet
when: on_success
variables:
UPGRADE_TEST: basic
MITOGEN_ENABLE: "false"
packet_ubuntu16-weave-sep:
stage: deploy-part2
extends: .packet
when: manual
# # More builds for PRs/merges (manual) and triggers (auto)
packet_ubuntu16-canal-sep:
stage: deploy-special
extends: .packet
when: manual
variables:
MITOGEN_ENABLE: "true"
packet_ubuntu16-canal-kubeadm-ha:
stage: deploy-part2
extends: .packet
when: on_success
packet_ubuntu16-canal-sep:
stage: deploy-special
extends: .packet
when: manual
packet_ubuntu16-flannel-ha:
stage: deploy-part2
extends: .packet
when: manual
packet_ubuntu16-kube-router-sep:
stage: deploy-part2
extends: .packet
when: manual
packet_debian10-containerd:
stage: deploy-part2
extends: .packet
when: on_success
variables:
MITOGEN_ENABLE: "true"
packet_centos7-calico-ha-once-localhost:
stage: deploy-part2
extends: .packet
when: on_success
services:
- docker:18.09.9-dind
packet_centos8-kube-ovn:
stage: deploy-part2
extends: .packet
when: on_success
packet_centos8-calico:
stage: deploy-part2
extends: .packet
when: on_success
packet_fedora30-weave:
stage: deploy-part2
extends: .packet
when: on_success
packet_opensuse-canal:
stage: deploy-part2
extends: .packet
when: on_success
# Contiv does not work in k8s v1.16
# packet_ubuntu16-contiv-sep:
# stage: deploy-part2
# extends: .packet
# when: on_success
# ### MANUAL JOBS
packet_ubuntu16-weave-sep:
stage: deploy-part2
extends: .packet
when: manual
packet_ubuntu18-cilium-sep:
stage: deploy-special
extends: .packet
@ -94,50 +130,11 @@ packet_debian9-macvlan:
extends: .packet
when: manual
packet_debian9-calico-upgrade-once:
stage: deploy-part3
extends: .packet
when: on_success
variables:
UPGRADE_TEST: graceful
MITOGEN_ENABLE: "false"
packet_debian10-containerd:
stage: deploy-part2
extends: .packet
when: on_success
variables:
MITOGEN_ENABLE: "true"
packet_centos7-calico-ha:
stage: deploy-part2
extends: .packet
when: manual
packet_centos7-calico-ha-once-localhost:
stage: deploy-part2
extends: .packet
when: on_success
services:
- docker:18.09.9-dind
packet_centos8-kube-ovn:
stage: deploy-part2
extends: .packet
when: on_success
packet_fedora30-weave:
stage: deploy-part2
extends: .packet
when: on_success
packet_fedora31-flannel:
stage: deploy-part2
extends: .packet
when: on_success
variables:
MITOGEN_ENABLE: "true"
packet_centos7-kube-router:
stage: deploy-part2
extends: .packet
@ -148,25 +145,17 @@ packet_centos7-multus-calico:
extends: .packet
when: manual
packet_centos8-calico:
stage: deploy-part2
extends: .packet
when: on_success
packet_opensuse-canal:
stage: deploy-part2
extends: .packet
when: on_success
packet_oracle7-canal-ha:
stage: deploy-part2
extends: .packet
when: manual
packet_ubuntu16-kube-router-sep:
packet_fedora31-flannel:
stage: deploy-part2
extends: .packet
when: manual
when: on_success
variables:
MITOGEN_ENABLE: "true"
packet_amazon-linux-2-aio:
stage: deploy-part2
@ -176,10 +165,26 @@ packet_amazon-linux-2-aio:
# ### PR JOBS PART3
# Long jobs (45min+)
packet_centos7-weave-upgrade-ha:
stage: deploy-part3
extends: .packet
when: manual
variables:
UPGRADE_TEST: basic
MITOGEN_ENABLE: "false"
packet_debian9-calico-upgrade:
stage: deploy-part3
extends: .packet
when: on_success
when: manual
variables:
UPGRADE_TEST: graceful
MITOGEN_ENABLE: "false"
packet_debian9-calico-upgrade-once:
stage: deploy-part3
extends: .packet
when: manual
variables:
UPGRADE_TEST: graceful
MITOGEN_ENABLE: "false"

View file

@ -98,80 +98,80 @@ tf-validate-aws:
# TF_VAR_public_key_path: ""
# TF_VAR_operating_system: ubuntu_18_04
.ovh_variables: &ovh_variables
OS_AUTH_URL: https://auth.cloud.ovh.net/v3
OS_PROJECT_ID: 8d3cd5d737d74227ace462dee0b903fe
OS_PROJECT_NAME: "9361447987648822"
OS_USER_DOMAIN_NAME: Default
OS_PROJECT_DOMAIN_ID: default
OS_USERNAME: 8XuhBMfkKVrk
OS_REGION_NAME: UK1
OS_INTERFACE: public
OS_IDENTITY_API_VERSION: "3"
# .ovh_variables: &ovh_variables
# OS_AUTH_URL: https://auth.cloud.ovh.net/v3
# OS_PROJECT_ID: 8d3cd5d737d74227ace462dee0b903fe
# OS_PROJECT_NAME: "9361447987648822"
# OS_USER_DOMAIN_NAME: Default
# OS_PROJECT_DOMAIN_ID: default
# OS_USERNAME: 8XuhBMfkKVrk
# OS_REGION_NAME: UK1
# OS_INTERFACE: public
# OS_IDENTITY_API_VERSION: "3"
tf-ovh_cleanup:
stage: unit-tests
tags: [light]
image: python
variables:
<<: *ovh_variables
before_script:
- pip install -r scripts/openstack-cleanup/requirements.txt
script:
- ./scripts/openstack-cleanup/main.py
# tf-ovh_cleanup:
# stage: unit-tests
# tags: [light]
# image: python
# variables:
# <<: *ovh_variables
# before_script:
# - pip install -r scripts/openstack-cleanup/requirements.txt
# script:
# - ./scripts/openstack-cleanup/main.py
tf-ovh_ubuntu18-calico:
extends: .terraform_apply
when: on_success
variables:
<<: *ovh_variables
TF_VERSION: 0.12.24
PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME
ANSIBLE_TIMEOUT: "60"
SSH_USER: ubuntu
TF_VAR_number_of_k8s_masters: "0"
TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
TF_VAR_number_of_etcd: "0"
TF_VAR_number_of_k8s_nodes: "0"
TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
TF_VAR_number_of_bastions: "0"
TF_VAR_number_of_k8s_masters_no_etcd: "0"
TF_VAR_use_neutron: "0"
TF_VAR_floatingip_pool: "Ext-Net"
TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
TF_VAR_network_name: "Ext-Net"
TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
TF_VAR_image: "Ubuntu 18.04"
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
# tf-ovh_ubuntu18-calico:
# extends: .terraform_apply
# when: on_success
# variables:
# <<: *ovh_variables
# TF_VERSION: 0.12.24
# PROVIDER: openstack
# CLUSTER: $CI_COMMIT_REF_NAME
# ANSIBLE_TIMEOUT: "60"
# SSH_USER: ubuntu
# TF_VAR_number_of_k8s_masters: "0"
# TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
# TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
# TF_VAR_number_of_etcd: "0"
# TF_VAR_number_of_k8s_nodes: "0"
# TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
# TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
# TF_VAR_number_of_bastions: "0"
# TF_VAR_number_of_k8s_masters_no_etcd: "0"
# TF_VAR_use_neutron: "0"
# TF_VAR_floatingip_pool: "Ext-Net"
# TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
# TF_VAR_network_name: "Ext-Net"
# TF_VAR_flavor_k8s_master: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
# TF_VAR_flavor_k8s_node: "defa64c3-bd46-43b4-858a-d93bbae0a229" # s1-8
# TF_VAR_image: "Ubuntu 18.04"
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
tf-ovh_coreos-calico:
extends: .terraform_apply
when: on_success
variables:
<<: *ovh_variables
TF_VERSION: 0.12.24
PROVIDER: openstack
CLUSTER: $CI_COMMIT_REF_NAME
ANSIBLE_TIMEOUT: "60"
SSH_USER: core
TF_VAR_number_of_k8s_masters: "0"
TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
TF_VAR_number_of_etcd: "0"
TF_VAR_number_of_k8s_nodes: "0"
TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
TF_VAR_number_of_bastions: "0"
TF_VAR_number_of_k8s_masters_no_etcd: "0"
TF_VAR_use_neutron: "0"
TF_VAR_floatingip_pool: "Ext-Net"
TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
TF_VAR_network_name: "Ext-Net"
TF_VAR_flavor_k8s_master: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
TF_VAR_flavor_k8s_node: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
TF_VAR_image: "CoreOS Stable"
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
# tf-ovh_coreos-calico:
# extends: .terraform_apply
# when: on_success
# variables:
# <<: *ovh_variables
# TF_VERSION: 0.12.24
# PROVIDER: openstack
# CLUSTER: $CI_COMMIT_REF_NAME
# ANSIBLE_TIMEOUT: "60"
# SSH_USER: core
# TF_VAR_number_of_k8s_masters: "0"
# TF_VAR_number_of_k8s_masters_no_floating_ip: "1"
# TF_VAR_number_of_k8s_masters_no_floating_ip_no_etcd: "0"
# TF_VAR_number_of_etcd: "0"
# TF_VAR_number_of_k8s_nodes: "0"
# TF_VAR_number_of_k8s_nodes_no_floating_ip: "1"
# TF_VAR_number_of_gfs_nodes_no_floating_ip: "0"
# TF_VAR_number_of_bastions: "0"
# TF_VAR_number_of_k8s_masters_no_etcd: "0"
# TF_VAR_use_neutron: "0"
# TF_VAR_floatingip_pool: "Ext-Net"
# TF_VAR_external_net: "6011fbc9-4cbf-46a4-8452-6890a340b60b"
# TF_VAR_network_name: "Ext-Net"
# TF_VAR_flavor_k8s_master: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
# TF_VAR_flavor_k8s_node: "4d4fd037-9493-4f2b-9afe-b542b5248eac" # b2-7
# TF_VAR_image: "CoreOS Stable"
# TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'

View file

@ -9,7 +9,6 @@ To generate this Matrix run `./tests/scripts/md-table/main.py`
amazon | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :white_check_mark: |
centos8 | :white_check_mark: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: |
coreos | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian9 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: | :x: |
fedora30 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :white_check_mark: |
@ -25,9 +24,8 @@ ubuntu20 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
| OS / CNI | calico | canal | cilium | contiv | flannel | kube-ovn | kube-router | macvlan | weave |
|---| --- | --- | --- | --- | --- | --- | --- | --- | --- |
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos7 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos7 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
coreos | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian10 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora30 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
@ -45,7 +43,6 @@ ubuntu20 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
amazon | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
centos7 | :x: | :x: | :x: | :x: | :white_check_mark: | :x: | :x: | :x: | :x: |
centos8 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
coreos | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian10 | :white_check_mark: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
debian9 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |
fedora30 | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: | :x: |

View file

@ -7,7 +7,7 @@
2. Add **forked repo** as submodule to desired folder in your existent ansible repo(for example 3d/kubespray):
```git submodule add https://github.com/YOUR_GITHUB/kubespray.git kubespray```
Git will create _.gitmodules_ file in your existent ansible repo:
Git will create `.gitmodules` file in your existent ansible repo:
```ini
[submodule "3d/kubespray"]

View file

@ -8,12 +8,12 @@ lint:
options:
config-file: ../../../.yamllint
platforms:
- name: ubuntu1804
box: generic/ubuntu1804
cpus: 2
memory: 1024
groups:
- kube-master
# - name: ubuntu1804
# box: generic/ubuntu1804
# cpus: 2
# memory: 1024
# groups:
# - kube-master
- name: centos7
box: centos/7
cpus: 2

View file

@ -1,5 +1,6 @@
---
docker_version: '18.09'
docker_cli_version: "{{ 'latest' if docker_version != 'latest' and docker_version is version('18.09', '<') else docker_version }}"
docker_selinux_version: '17.03'
docker_package_info:

View file

@ -16,9 +16,16 @@ docker_versioned_pkg:
'stable': docker-ce=5:18.09.7~3-0~debian-{{ ansible_distribution_release|lower }}
'edge': docker-ce=5:19.03.7~3-0~debian-{{ ansible_distribution_release|lower }}
docker_cli_versioned_pkg:
'latest': docker-ce-cli
'18.09': docker-ce-cli=5:18.09.9~3-0~debian-{{ ansible_distribution_release|lower }}
'19.03': docker-ce-cli=5:19.03.9~3-0~debian-{{ ansible_distribution_release|lower }}
docker_package_info:
pkg_mgr: apt
pkgs:
- name: "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}"
force: yes
- name: "{{ docker_versioned_pkg[docker_version | string] }}"
force: yes

View file

@ -2,21 +2,26 @@
docker_kernel_min_version: '0'
# https://docs.docker.com/install/linux/docker-ce/fedora/
# https://download.docker.com/linux/fedora/28/x86_64/stable/Packages/
# https://download.docker.com/linux/fedora/<fedora-version>/x86_64/stable/Packages/
docker_versioned_pkg:
'latest': docker-ce
'18.03': docker-ce-18.03.1.ce-3.fc28
'18.06': docker-ce-18.06.2.ce-3.fc28
'18.09': docker-ce-18.09.7-3.fc28
'18.03': docker-ce-18.03.1.ce-3.fc{{ ansible_distribution_major_version }}
'18.06': docker-ce-18.06.2.ce-3.fc{{ ansible_distribution_major_version }}
'18.09': docker-ce-18.09.7-3.fc{{ ansible_distribution_major_version }}
'19.03': docker-ce-19.03.8-3.fc{{ ansible_distribution_major_version }}
#
# This is due to the fact that the docker
# packages available on Fedora are too recent
#
docker_version: "latest"
docker_cli_versioned_pkg:
'latest': docker-ce-cli
'18.09': docker-ce-cli-19.03.8-3.fc{{ ansible_distribution_major_version }}
'19.03': docker-ce-cli-19.03.9-3.fc{{ ansible_distribution_major_version }}
# Fedora 30/31 don't provide packages for docker 18.0x ...
docker_version: "19.03"
docker_cli_version: "19.03"
docker_package_info:
pkg_mgr: dnf
pkgs:
- name: "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}"
- name: "{{ docker_versioned_pkg[docker_version | string] }}"

View file

@ -16,6 +16,11 @@ docker_versioned_pkg:
'stable': docker-ce-18.09.9-3.el7
'edge': docker-ce-19.03.8-3.el7
docker_cli_versioned_pkg:
'latest': docker-ce-cli
'18.09': docker-ce-cli-18.09.9-3.el7
'19.03': docker-ce-cli-19.03.9-3.el7
docker_selinux_versioned_pkg:
'latest': docker-ce-selinux-17.03.3.ce-1.el7
'17.03': docker-ce-selinux-17.03.3.ce-1.el7
@ -30,13 +35,15 @@ docker_pkgs_use_docker_ce:
yum_conf: "{{ docker_yum_conf }}"
docker_pkgs:
- name: "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}"
yum_conf: "{{ docker_yum_conf }}"
- name: "{{ docker_versioned_pkg[docker_version | string] }}"
yum_conf: "{{ docker_yum_conf }}"
docker_package_info:
pkg_mgr: yum
pkgs: |-
{%- if docker_version is version('17.04', '<') -%}
{%- if docker_version != "latest" and docker_version is version('17.04', '<') -%}
{{ docker_pkgs_use_docker_ce }}
{%- else -%}
{{ docker_pkgs }}

View file

@ -16,9 +16,16 @@ docker_versioned_pkg:
'stable': docker-ce=5:18.09.7~3-0~ubuntu-{{ ansible_distribution_release|lower }}
'edge': docker-ce=5:19.03.7~3-0~ubuntu-{{ ansible_distribution_release|lower }}
docker_cli_versioned_pkg:
'latest': docker-ce-cli
'18.09': docker-ce-cli=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }}
'19.03': docker-ce-cli=5:19.03.9~3-0~ubuntu-{{ ansible_distribution_release|lower }}
docker_package_info:
pkg_mgr: apt
pkgs:
- name: "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}"
force: yes
- name: "{{ docker_versioned_pkg[docker_version | string] }}"
force: yes

View file

@ -12,9 +12,16 @@ docker_versioned_pkg:
'stable': docker-ce=5:18.09.7~3-0~ubuntu-{{ ansible_distribution_release|lower }}
'edge': docker-ce=5:19.03.7~3-0~ubuntu-{{ ansible_distribution_release|lower }}
docker_cli_versioned_pkg:
'latest': docker-ce-cli
'18.09': docker-ce-cli=5:18.09.9~3-0~ubuntu-{{ ansible_distribution_release|lower }}
'19.03': docker-ce-cli=5:19.03.9~3-0~ubuntu-{{ ansible_distribution_release|lower }}
docker_package_info:
pkg_mgr: apt
pkgs:
- name: "{{ docker_cli_versioned_pkg[docker_cli_version | string] }}"
force: yes
- name: "{{ docker_versioned_pkg[docker_version | string] }}"
force: yes

View file

@ -17,9 +17,18 @@
#
# Example usage: kubectl create -f <this_file>
{% if dashboard_namespace != "kube-system" %}
---
apiVersion: v1
kind: Namespace
metadata:
name: {{ dashboard_namespace }}
labels:
name: {{ dashboard_namespace }}
{% endif %}
---
# ------------------- Dashboard Secrets ------------------- #
apiVersion: v1
kind: Secret
metadata:
@ -178,7 +187,7 @@ spec:
- --auto-generate-certificates
{% endif %}
{% if dashboard_skip_login %}
- --enable-skip-login
- --enable-skip-login
{% endif %}
- --authentication-mode=token{% if kube_basic_auth|default(false) %},basic{% endif %}
# Uncomment the following line to manually specify Kubernetes API server Host

View file

@ -1,7 +1,7 @@
---
- name: kubeadm | Check api is up
uri:
url: "https://{{ ip | default(fallback_ips[inventory_hostname]) }}:6443/healthz"
url: "https://{{ ip | default(fallback_ips[inventory_hostname]) }}:{{ kube_apiserver_port }}/healthz"
validate_certs: false
when: inventory_hostname == groups['kube-master']|first
register: _result

View file

@ -0,0 +1,14 @@
---
# Instance settings
cloud_image: centos-7
mode: default
# Kubespray settings
deploy_netchecker: true
dns_min_replicas: 1
container_manager: crio
# CRI-O requirements
download_container: false
etcd_deployment_type: host
kubelet_deployment_type: host

View file

@ -1,5 +0,0 @@
---
dns_min_replicas: 1
deploy_netchecker: true
# resolvconf_mode: host_resolvconf # this is required as long as the coreos stable channel uses docker < 1.12

View file

@ -1,4 +0,0 @@
---
dns_min_replicas: 1
deploy_netchecker: true
sonobuoy_enabled: true