Add .editorconfig file (#6307)
This commit is contained in:
parent
09b23f96d7
commit
4c1e0b188d
67 changed files with 232 additions and 217 deletions
15
.editorconfig
Normal file
15
.editorconfig
Normal file
|
@ -0,0 +1,15 @@
|
|||
root = true
|
||||
|
||||
[*.{yaml,yml,yml.j2,yaml.j2}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
charset = utf-8
|
||||
|
||||
[{Dockerfile}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
charset = utf-8
|
|
@ -171,4 +171,4 @@ tf-elastx_ubuntu18-calico:
|
|||
TF_VAR_flavor_k8s_master: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||
TF_VAR_flavor_k8s_node: 3f73fc93-ec61-4808-88df-2580d94c1a9b # v1-standard-2
|
||||
TF_VAR_image: ubuntu-18.04-server-latest
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
TF_VAR_k8s_allowed_remote_ips: '["0.0.0.0/0"]'
|
||||
|
|
|
@ -51,4 +51,4 @@ vagrant_ubuntu18-weave-medium:
|
|||
vagrant_ubuntu20-flannel:
|
||||
stage: deploy-part2
|
||||
extends: .vagrant
|
||||
when: on_success
|
||||
when: on_success
|
||||
|
|
12
Dockerfile
12
Dockerfile
|
@ -6,12 +6,12 @@ RUN apt update -y && \
|
|||
apt install -y \
|
||||
libssl-dev python3-dev sshpass apt-transport-https jq moreutils \
|
||||
ca-certificates curl gnupg2 software-properties-common python3-pip rsync
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
|
||||
add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable" \
|
||||
&& apt update -y && apt-get install docker-ce -y
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
|
||||
add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable" \
|
||||
&& apt update -y && apt-get install docker-ce -y
|
||||
COPY . .
|
||||
RUN /usr/bin/python3 -m pip install pip -U && /usr/bin/python3 -m pip install -r tests/requirements.txt && python3 -m pip install -r requirements.txt && update-alternatives --install /usr/bin/python python /usr/bin/python3 1
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.17.5/bin/linux/amd64/kubectl \
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
---
|
||||
theme: jekyll-theme-slate
|
||||
theme: jekyll-theme-slate
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
{% for host in groups['gfs-cluster'] %}
|
||||
{
|
||||
"addresses": [
|
||||
{
|
||||
{
|
||||
"ip": "{{hostvars[host]['ip']|default(hostvars[host].ansible_default_ipv4['address'])}}"
|
||||
}
|
||||
],
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: glusterfs
|
||||
name: glusterfs
|
||||
spec:
|
||||
capacity:
|
||||
storage: "{{ hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb }}Gi"
|
||||
|
|
|
@ -6,11 +6,11 @@
|
|||
|
||||
- name: bootstrap/start_vault_temp | Start single node Vault with file backend
|
||||
command: >
|
||||
docker run -d --cap-add=IPC_LOCK --name {{ vault_temp_container_name }}
|
||||
-p {{ vault_port }}:{{ vault_port }}
|
||||
-e 'VAULT_LOCAL_CONFIG={{ vault_temp_config|to_json }}'
|
||||
-v /etc/vault:/etc/vault
|
||||
{{ vault_image_repo }}:{{ vault_version }} server
|
||||
docker run -d --cap-add=IPC_LOCK --name {{ vault_temp_container_name }}
|
||||
-p {{ vault_port }}:{{ vault_port }}
|
||||
-e 'VAULT_LOCAL_CONFIG={{ vault_temp_config|to_json }}'
|
||||
-v /etc/vault:/etc/vault
|
||||
{{ vault_image_repo }}:{{ vault_version }} server
|
||||
|
||||
- name: bootstrap/start_vault_temp | Start again single node Vault with file backend
|
||||
command: docker start {{ vault_temp_container_name }}
|
||||
|
|
|
@ -21,9 +21,9 @@
|
|||
- name: bootstrap/sync_secrets | Print out warning message if secrets are not available and vault is initialized
|
||||
pause:
|
||||
prompt: >
|
||||
Vault orchestration may not be able to proceed. The Vault cluster is initialized, but
|
||||
'root_token' or 'unseal_keys' were not found in {{ vault_secrets_dir }}. These are
|
||||
needed for many vault orchestration steps.
|
||||
Vault orchestration may not be able to proceed. The Vault cluster is initialized, but
|
||||
'root_token' or 'unseal_keys' were not found in {{ vault_secrets_dir }}. These are
|
||||
needed for many vault orchestration steps.
|
||||
when: vault_cluster_is_initialized and not vault_secrets_available
|
||||
|
||||
- name: bootstrap/sync_secrets | Cat root_token from a vault host
|
||||
|
|
|
@ -25,6 +25,6 @@
|
|||
- name: check_etcd | Fail if etcd is not available and needed
|
||||
fail:
|
||||
msg: >
|
||||
Unable to start Vault cluster! Etcd is not available at
|
||||
{{ vault_etcd_url.split(',') | first }} however it is needed by Vault as a backend.
|
||||
Unable to start Vault cluster! Etcd is not available at
|
||||
{{ vault_etcd_url.split(',') | first }} however it is needed by Vault as a backend.
|
||||
when: vault_etcd_needed|d() and not vault_etcd_available
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
set_fact:
|
||||
vault_cluster_is_initialized: >-
|
||||
{{ vault_is_initialized or
|
||||
hostvars[item]['vault_is_initialized'] or
|
||||
('value' in vault_etcd_exists.stdout|default('')) }}
|
||||
hostvars[item]['vault_is_initialized'] or
|
||||
('value' in vault_etcd_exists.stdout|default('')) }}
|
||||
with_items: "{{ groups.vault }}"
|
||||
run_once: true
|
||||
|
|
|
@ -6,9 +6,9 @@
|
|||
ca_cert: "{{ vault_cert_dir }}/ca.pem"
|
||||
name: "{{ create_role_name }}"
|
||||
rules: >-
|
||||
{%- if create_role_policy_rules|d("default") == "default" -%}
|
||||
{{
|
||||
{ 'path': {
|
||||
{%- if create_role_policy_rules|d("default") == "default" -%}
|
||||
{{
|
||||
{ 'path': {
|
||||
create_role_mount_path + '/issue/' + create_role_name: {'policy': 'write'},
|
||||
create_role_mount_path + '/roles/' + create_role_name: {'policy': 'read'}
|
||||
}} | to_json + '\n'
|
||||
|
@ -24,13 +24,13 @@
|
|||
ca_cert: "{{ vault_cert_dir }}/ca.pem"
|
||||
secret: "{{ create_role_mount_path }}/roles/{{ create_role_name }}"
|
||||
data: |
|
||||
{%- if create_role_options|d("default") == "default" -%}
|
||||
{
|
||||
allow_any_name: true
|
||||
}
|
||||
{%- else -%}
|
||||
{{ create_role_options | to_json }}
|
||||
{%- endif -%}
|
||||
{%- if create_role_options|d("default") == "default" -%}
|
||||
{
|
||||
allow_any_name: true
|
||||
}
|
||||
{%- else -%}
|
||||
{{ create_role_options | to_json }}
|
||||
{%- endif -%}
|
||||
|
||||
## Userpass based auth method
|
||||
|
||||
|
|
|
@ -18,8 +18,8 @@
|
|||
- name: shared/gen_userpass | Copy credentials to all hosts in the group
|
||||
copy:
|
||||
content: >
|
||||
{{
|
||||
{'username': gen_userpass_username,
|
||||
'password': gen_userpass_password} | to_nice_json(indent=4)
|
||||
}}
|
||||
{{
|
||||
{'username': gen_userpass_username,
|
||||
'password': gen_userpass_password} | to_nice_json(indent=4)
|
||||
}}
|
||||
dest: "{{ vault_roles_dir }}/{{ gen_userpass_role }}/userpass"
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
[Service]
|
||||
Environment={% if http_proxy %}"HTTP_PROXY={{ http_proxy }}"{% endif %} {% if https_proxy %}"HTTPS_PROXY={{ https_proxy }}"{% endif %} {% if no_proxy %}"NO_PROXY={{ no_proxy }}"{% endif %}
|
||||
Environment={% if http_proxy %}"HTTP_PROXY={{ http_proxy }}"{% endif %} {% if https_proxy %}"HTTPS_PROXY={{ https_proxy }}"{% endif %} {% if no_proxy %}"NO_PROXY={{ no_proxy }}"{% endif %}
|
||||
|
|
|
@ -19,4 +19,4 @@
|
|||
# etcd_peer_client_auth: true
|
||||
|
||||
## Settings for etcd deployment type
|
||||
etcd_deployment_type: docker
|
||||
etcd_deployment_type: docker
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
Package: {{ containerd_package }}
|
||||
Pin: version {{ containerd_version }}*
|
||||
Pin-Priority: 1001
|
||||
Package: {{ containerd_package }}
|
||||
Pin: version {{ containerd_version }}*
|
||||
Pin-Priority: 1001
|
||||
|
|
|
@ -15,8 +15,8 @@ containerd_repo_info:
|
|||
pkg_repo: apt_repository
|
||||
repos:
|
||||
- >
|
||||
deb {{ containerd_debian_repo_base_url }}
|
||||
{{ ansible_distribution_release|lower }}
|
||||
{{ containerd_debian_repo_component }}
|
||||
deb {{ containerd_debian_repo_base_url }}
|
||||
{{ ansible_distribution_release|lower }}
|
||||
{{ containerd_debian_repo_component }}
|
||||
|
||||
runc_binary: /usr/bin/runc
|
||||
|
|
|
@ -15,8 +15,8 @@ containerd_repo_info:
|
|||
pkg_repo: apt_repository
|
||||
repos:
|
||||
- >
|
||||
deb {{ containerd_ubuntu_repo_base_url }}
|
||||
{{ ansible_distribution_release|lower }}
|
||||
{{ containerd_ubuntu_repo_component }}
|
||||
deb {{ containerd_ubuntu_repo_base_url }}
|
||||
{{ ansible_distribution_release|lower }}
|
||||
{{ containerd_ubuntu_repo_component }}
|
||||
|
||||
runc_binary: /usr/bin/runc
|
||||
|
|
|
@ -41,9 +41,9 @@ docker_repo_info:
|
|||
pkg_repo: apt_repository
|
||||
repos:
|
||||
- >
|
||||
deb {{ docker_debian_repo_base_url }}
|
||||
{{ ansible_distribution_release|lower }}
|
||||
stable
|
||||
deb {{ docker_debian_repo_base_url }}
|
||||
{{ ansible_distribution_release|lower }}
|
||||
stable
|
||||
|
||||
dockerproject_repo_key_info:
|
||||
pkg_key: apt_key
|
||||
|
@ -55,6 +55,6 @@ dockerproject_repo_info:
|
|||
pkg_repo: apt_repository
|
||||
repos:
|
||||
- >
|
||||
deb {{ docker_debian_repo_base_url }}
|
||||
{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
|
||||
main
|
||||
deb {{ docker_debian_repo_base_url }}
|
||||
{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
|
||||
main
|
||||
|
|
|
@ -41,9 +41,9 @@ docker_repo_info:
|
|||
pkg_repo: apt_repository
|
||||
repos:
|
||||
- >
|
||||
deb [arch={{ host_architecture }}] {{ docker_ubuntu_repo_base_url }}
|
||||
{{ ansible_distribution_release|lower }}
|
||||
stable
|
||||
deb [arch={{ host_architecture }}] {{ docker_ubuntu_repo_base_url }}
|
||||
{{ ansible_distribution_release|lower }}
|
||||
stable
|
||||
|
||||
dockerproject_repo_key_info:
|
||||
pkg_key: apt_key
|
||||
|
@ -55,6 +55,6 @@ dockerproject_repo_info:
|
|||
pkg_repo: apt_repository
|
||||
repos:
|
||||
- >
|
||||
deb [arch={{ host_architecture }}] {{ docker_ubuntu_repo_base_url }}
|
||||
{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
|
||||
main
|
||||
deb [arch={{ host_architecture }}] {{ docker_ubuntu_repo_base_url }}
|
||||
{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
|
||||
main
|
||||
|
|
|
@ -37,9 +37,9 @@ docker_repo_info:
|
|||
pkg_repo: apt_repository
|
||||
repos:
|
||||
- >
|
||||
deb [arch={{ host_architecture }}] {{ docker_ubuntu_repo_base_url }}
|
||||
{{ ansible_distribution_release|lower }}
|
||||
stable
|
||||
deb [arch={{ host_architecture }}] {{ docker_ubuntu_repo_base_url }}
|
||||
{{ ansible_distribution_release|lower }}
|
||||
stable
|
||||
|
||||
dockerproject_repo_key_info:
|
||||
pkg_key: apt_key
|
||||
|
@ -51,6 +51,6 @@ dockerproject_repo_info:
|
|||
pkg_repo: apt_repository
|
||||
repos:
|
||||
- >
|
||||
deb [arch={{ host_architecture }}] {{ docker_ubuntu_repo_base_url }}
|
||||
{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
|
||||
main
|
||||
deb [arch={{ host_architecture }}] {{ docker_ubuntu_repo_base_url }}
|
||||
{{ ansible_distribution|lower }}-{{ ansible_distribution_release|lower }}
|
||||
main
|
||||
|
|
|
@ -30,14 +30,14 @@
|
|||
with_items: "{{ expected_files }}"
|
||||
vars:
|
||||
expected_files: >-
|
||||
['{{ etcd_cert_dir }}/ca.pem',
|
||||
{% set all_etcd_hosts = groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique|sort %}
|
||||
{% for host in all_etcd_hosts %}
|
||||
'{{ etcd_cert_dir }}/node-{{ host }}-key.pem',
|
||||
'{{ etcd_cert_dir }}/admin-{{ host }}-key.pem',
|
||||
'{{ etcd_cert_dir }}/member-{{ host }}-key.pem'
|
||||
{% if not loop.last %}{{','}}{% endif %}
|
||||
{% endfor %}]
|
||||
['{{ etcd_cert_dir }}/ca.pem',
|
||||
{% set all_etcd_hosts = groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique|sort %}
|
||||
{% for host in all_etcd_hosts %}
|
||||
'{{ etcd_cert_dir }}/node-{{ host }}-key.pem',
|
||||
'{{ etcd_cert_dir }}/admin-{{ host }}-key.pem',
|
||||
'{{ etcd_cert_dir }}/member-{{ host }}-key.pem'
|
||||
{% if not loop.last %}{{','}}{% endif %}
|
||||
{% endfor %}]
|
||||
|
||||
- name: "Check_certs | Set 'gen_master_certs' to true"
|
||||
set_fact:
|
||||
|
|
|
@ -111,9 +111,9 @@
|
|||
|
||||
- name: Gen_certs | Set cert names per node
|
||||
set_fact:
|
||||
my_etcd_node_certs: ['ca.pem',
|
||||
'node-{{ inventory_hostname }}.pem',
|
||||
'node-{{ inventory_hostname }}-key.pem']
|
||||
my_etcd_node_certs: [ 'ca.pem',
|
||||
'node-{{ inventory_hostname }}.pem',
|
||||
'node-{{ inventory_hostname }}-key.pem']
|
||||
tags:
|
||||
- facts
|
||||
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
---
|
||||
- name: Install | Copy etcdctl binary from docker container
|
||||
command: sh -c "{{ docker_bin_dir }}/docker rm -f etcdctl-binarycopy;
|
||||
{{ docker_bin_dir }}/docker create --name etcdctl-binarycopy {{ etcd_image_repo }}:{{ etcd_image_tag }} &&
|
||||
{{ docker_bin_dir }}/docker cp etcdctl-binarycopy:/usr/local/bin/etcdctl {{ bin_dir }}/etcdctl &&
|
||||
{{ docker_bin_dir }}/docker rm -f etcdctl-binarycopy"
|
||||
{{ docker_bin_dir }}/docker create --name etcdctl-binarycopy {{ etcd_image_repo }}:{{ etcd_image_tag }} &&
|
||||
{{ docker_bin_dir }}/docker cp etcdctl-binarycopy:/usr/local/bin/etcdctl {{ bin_dir }}/etcdctl &&
|
||||
{{ docker_bin_dir }}/docker rm -f etcdctl-binarycopy"
|
||||
register: etcdctl_install_result
|
||||
until: etcdctl_install_result.rc == 0
|
||||
retries: "{{ etcd_retries }}"
|
||||
|
|
|
@ -18,14 +18,14 @@ auth:
|
|||
useInstancePrincipals: true
|
||||
{% else %}
|
||||
useInstancePrincipals: false
|
||||
|
||||
|
||||
region: {{ oci_region_id }}
|
||||
tenancy: {{ oci_tenancy_id }}
|
||||
user: {{ oci_user_id }}
|
||||
key: |
|
||||
key: |
|
||||
{{ oci_private_key }}
|
||||
|
||||
{% if oci_private_key_passphrase is defined %}
|
||||
{% if oci_private_key_passphrase is defined %}
|
||||
passphrase: {{ oci_private_key_passphrase }}
|
||||
{% endif %}
|
||||
|
||||
|
@ -75,16 +75,16 @@ loadBalancer:
|
|||
# Optional rate limit controls for accessing OCI API
|
||||
rateLimiter:
|
||||
{% if oci_rate_limit.rate_limit_qps_read %}
|
||||
rateLimitQPSRead: {{ oci_rate_limit.rate_limit_qps_read }}
|
||||
rateLimitQPSRead: {{ oci_rate_limit.rate_limit_qps_read }}
|
||||
{% endif %}
|
||||
{% if oci_rate_limit.rate_limit_qps_write %}
|
||||
rateLimitQPSWrite: {{ oci_rate_limit.rate_limit_qps_write }}
|
||||
rateLimitQPSWrite: {{ oci_rate_limit.rate_limit_qps_write }}
|
||||
{% endif %}
|
||||
{% if oci_rate_limit.rate_limit_bucket_read %}
|
||||
rateLimitBucketRead: {{ oci_rate_limit.rate_limit_bucket_read }}
|
||||
rateLimitBucketRead: {{ oci_rate_limit.rate_limit_bucket_read }}
|
||||
{% endif %}
|
||||
{% if oci_rate_limit.rate_limit_bucket_write %}
|
||||
rateLimitBucketWrite: {{ oci_rate_limit.rate_limit_bucket_write }}
|
||||
rateLimitBucketWrite: {{ oci_rate_limit.rate_limit_bucket_write }}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
|
|
|
@ -17,4 +17,4 @@ rules:
|
|||
- nodes/spec
|
||||
- nodes/metrics
|
||||
verbs:
|
||||
- "*"
|
||||
- "*"
|
||||
|
|
|
@ -14,4 +14,4 @@ cinder_cacert: "{{ lookup('env','OS_CACERT') }}"
|
|||
|
||||
# For now, only Cinder v3 is supported in Cinder CSI driver
|
||||
cinder_blockstorage_version: "v3"
|
||||
cinder_csi_controller_replicas: 1
|
||||
cinder_csi_controller_replicas: 1
|
||||
|
|
|
@ -8,7 +8,7 @@ metadata:
|
|||
namespace: kube-system
|
||||
|
||||
---
|
||||
# external attacher
|
||||
# external attacher
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
|
|
|
@ -197,4 +197,4 @@ roleRef:
|
|||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-gce-pd-node-sa
|
||||
namespace: kube-system
|
||||
namespace: kube-system
|
||||
|
|
|
@ -6,4 +6,4 @@ local_path_provisioner_reclaim_policy: Delete
|
|||
local_path_provisioner_claim_root: /opt/local-path-provisioner/
|
||||
local_path_provisioner_is_default_storageclass: "true"
|
||||
local_path_provisioner_debug: false
|
||||
local_path_provisioner_helper_image_tag: "latest"
|
||||
local_path_provisioner_helper_image_tag: "latest"
|
||||
|
|
|
@ -2,4 +2,4 @@
|
|||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: {{ local_path_provisioner_namespace }}
|
||||
name: {{ local_path_provisioner_namespace }}
|
||||
|
|
|
@ -3,4 +3,4 @@ apiVersion: v1
|
|||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: local-path-provisioner-service-account
|
||||
namespace: {{ local_path_provisioner_namespace }}
|
||||
namespace: {{ local_path_provisioner_namespace }}
|
||||
|
|
|
@ -10,4 +10,4 @@ rules:
|
|||
verbs: ["list", "create", "get", "update", "watch", "patch"]
|
||||
- apiGroups: ["", "extensions"]
|
||||
resources: ["nodes", "pods", "secrets", "services", "namespaces"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
|
|
@ -33,7 +33,7 @@ spec:
|
|||
# Limit the namespace where this ALB Ingress Controller deployment will
|
||||
# resolve ingress resources. If left commented, all namespaces are used.
|
||||
#- --watch-namespace=your-k8s-namespace
|
||||
|
||||
|
||||
# Setting the ingress-class flag below will ensure that only ingress resources with the
|
||||
# annotation kubernetes.io/ingress.class: "alb" are respected by the controller. You may
|
||||
# choose any class you'd like for this controller to respect.
|
||||
|
@ -42,7 +42,7 @@ spec:
|
|||
# by the ALB Ingress Controller, providing distinction between
|
||||
# clusters.
|
||||
- --cluster-name={{ cluster_name }}
|
||||
|
||||
|
||||
# Enables logging on all outbound requests sent to the AWS API.
|
||||
# If logging is desired, set to true.
|
||||
# - ---aws-api-debug
|
||||
|
@ -71,4 +71,4 @@ spec:
|
|||
terminationGracePeriodSeconds: 30
|
||||
{% if rbac_enabled %}
|
||||
serviceAccountName: alb-ingress
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
|
|
@ -20,4 +20,4 @@ spec:
|
|||
shortNames:
|
||||
- cert
|
||||
- certs
|
||||
|
||||
|
||||
|
|
|
@ -325,32 +325,32 @@ apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
|||
kind: KubeProxyConfiguration
|
||||
bindAddress: {{ kube_proxy_bind_address }}
|
||||
clientConnection:
|
||||
acceptContentTypes: {{ kube_proxy_client_accept_content_types }}
|
||||
burst: {{ kube_proxy_client_burst }}
|
||||
contentType: {{ kube_proxy_client_content_type }}
|
||||
kubeconfig: {{ kube_proxy_client_kubeconfig }}
|
||||
qps: {{ kube_proxy_client_qps }}
|
||||
acceptContentTypes: {{ kube_proxy_client_accept_content_types }}
|
||||
burst: {{ kube_proxy_client_burst }}
|
||||
contentType: {{ kube_proxy_client_content_type }}
|
||||
kubeconfig: {{ kube_proxy_client_kubeconfig }}
|
||||
qps: {{ kube_proxy_client_qps }}
|
||||
clusterCIDR: {{ kube_pods_subnet }}
|
||||
configSyncPeriod: {{ kube_proxy_config_sync_period }}
|
||||
conntrack:
|
||||
maxPerCore: {{ kube_proxy_conntrack_max_per_core }}
|
||||
min: {{ kube_proxy_conntrack_min }}
|
||||
tcpCloseWaitTimeout: {{ kube_proxy_conntrack_tcp_close_wait_timeout }}
|
||||
tcpEstablishedTimeout: {{ kube_proxy_conntrack_tcp_established_timeout }}
|
||||
maxPerCore: {{ kube_proxy_conntrack_max_per_core }}
|
||||
min: {{ kube_proxy_conntrack_min }}
|
||||
tcpCloseWaitTimeout: {{ kube_proxy_conntrack_tcp_close_wait_timeout }}
|
||||
tcpEstablishedTimeout: {{ kube_proxy_conntrack_tcp_established_timeout }}
|
||||
enableProfiling: {{ kube_proxy_enable_profiling }}
|
||||
healthzBindAddress: {{ kube_proxy_healthz_bind_address }}
|
||||
hostnameOverride: {{ kube_override_hostname }}
|
||||
iptables:
|
||||
masqueradeAll: {{ kube_proxy_masquerade_all }}
|
||||
masqueradeBit: {{ kube_proxy_masquerade_bit }}
|
||||
minSyncPeriod: {{ kube_proxy_min_sync_period }}
|
||||
syncPeriod: {{ kube_proxy_sync_period }}
|
||||
masqueradeAll: {{ kube_proxy_masquerade_all }}
|
||||
masqueradeBit: {{ kube_proxy_masquerade_bit }}
|
||||
minSyncPeriod: {{ kube_proxy_min_sync_period }}
|
||||
syncPeriod: {{ kube_proxy_sync_period }}
|
||||
ipvs:
|
||||
excludeCIDRs: {{ kube_proxy_exclude_cidrs }}
|
||||
minSyncPeriod: {{ kube_proxy_min_sync_period }}
|
||||
scheduler: {{ kube_proxy_scheduler }}
|
||||
syncPeriod: {{ kube_proxy_sync_period }}
|
||||
strictARP: {{ kube_proxy_strict_arp }}
|
||||
excludeCIDRs: {{ kube_proxy_exclude_cidrs }}
|
||||
minSyncPeriod: {{ kube_proxy_min_sync_period }}
|
||||
scheduler: {{ kube_proxy_scheduler }}
|
||||
syncPeriod: {{ kube_proxy_sync_period }}
|
||||
strictARP: {{ kube_proxy_strict_arp }}
|
||||
metricsBindAddress: {{ kube_proxy_metrics_bind_address }}
|
||||
mode: {{ kube_proxy_mode }}
|
||||
nodePortAddresses: {{ kube_proxy_nodeport_addresses }}
|
||||
|
|
|
@ -14,4 +14,4 @@ contexts:
|
|||
- context:
|
||||
cluster: webhook-token-auth-cluster
|
||||
user: webhook-token-auth-user
|
||||
name: webhook-token-auth
|
||||
name: webhook-token-auth
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
|
||||
- name: Set label to node
|
||||
command: >-
|
||||
{{ bin_dir }}/kubectl label node {{ inventory_hostname }} {{ item }} --overwrite=true
|
||||
{{ bin_dir }}/kubectl label node {{ inventory_hostname }} {{ item }} --overwrite=true
|
||||
loop: "{{ role_node_labels + inventory_node_labels }}"
|
||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||
changed_when: false
|
||||
|
|
|
@ -44,8 +44,8 @@
|
|||
- name: Hosts | Extract existing entries for localhost from hosts file
|
||||
set_fact:
|
||||
etc_hosts_localhosts_dict: >-
|
||||
{%- set splitted = (item | regex_replace('[ \t]+', ' ')|regex_replace('#.*$')|trim).split( ' ') -%}
|
||||
{{ etc_hosts_localhosts_dict|default({}) | combine({splitted[0]: splitted[1::] }) }}
|
||||
{%- set splitted = (item | regex_replace('[ \t]+', ' ')|regex_replace('#.*$')|trim).split( ' ') -%}
|
||||
{{ etc_hosts_localhosts_dict|default({}) | combine({splitted[0]: splitted[1::] }) }}
|
||||
with_items: "{{ (etc_hosts_content['content'] | b64decode).splitlines() }}"
|
||||
when:
|
||||
- etc_hosts_content.content is defined
|
||||
|
|
|
@ -27,9 +27,9 @@
|
|||
sync_tokens: >-
|
||||
{%- set tokens = {'sync': False} -%}
|
||||
{%- for server in groups['kube-master'] | intersect(ansible_play_batch)
|
||||
if (not hostvars[server].known_tokens.stat.exists) or
|
||||
(hostvars[server].known_tokens.stat.checksum|default('') != known_tokens_master.stat.checksum|default('')) -%}
|
||||
{%- set _ = tokens.update({'sync': True}) -%}
|
||||
if (not hostvars[server].known_tokens.stat.exists) or
|
||||
(hostvars[server].known_tokens.stat.checksum|default('') != known_tokens_master.stat.checksum|default('')) -%}
|
||||
{%- set _ = tokens.update({'sync': True}) -%}
|
||||
{%- endfor -%}
|
||||
{{ tokens.sync }}
|
||||
run_once: true
|
||||
|
|
|
@ -434,13 +434,13 @@ loadbalancer_apiserver_type: "nginx"
|
|||
apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
|
||||
kube_apiserver_endpoint: |-
|
||||
{% if loadbalancer_apiserver is defined -%}
|
||||
https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }}
|
||||
https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port|default(kube_apiserver_port) }}
|
||||
{%- elif not is_kube_master and loadbalancer_apiserver_localhost -%}
|
||||
https://localhost:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}
|
||||
https://localhost:{{ loadbalancer_apiserver_port|default(kube_apiserver_port) }}
|
||||
{%- elif is_kube_master -%}
|
||||
https://{{ kube_apiserver_bind_address | regex_replace('0\.0\.0\.0','127.0.0.1') }}:{{ kube_apiserver_port }}
|
||||
https://{{ kube_apiserver_bind_address | regex_replace('0\.0\.0\.0','127.0.0.1') }}:{{ kube_apiserver_port }}
|
||||
{%- else -%}
|
||||
https://{{ first_kube_master }}:{{ kube_apiserver_port }}
|
||||
https://{{ first_kube_master }}:{{ kube_apiserver_port }}
|
||||
{%- endif %}
|
||||
kube_apiserver_insecure_endpoint: >-
|
||||
http://{{ kube_apiserver_insecure_bind_address | regex_replace('0\.0\.0\.0','127.0.0.1') }}:{{ kube_apiserver_insecure_port }}
|
||||
|
|
|
@ -198,11 +198,11 @@
|
|||
"apiVersion": "projectcalico.org/v3",
|
||||
"kind": "BGPPeer",
|
||||
"metadata": {
|
||||
"name": "global-{{ item.router_id }}"
|
||||
"name": "global-{{ item.router_id }}"
|
||||
},
|
||||
"spec": {
|
||||
"asNumber": "{{ item.as }}",
|
||||
"peerIP": "{{ item.router_id }}"
|
||||
"asNumber": "{{ item.as }}",
|
||||
"peerIP": "{{ item.router_id }}"
|
||||
}}' | {{ bin_dir }}/calicoctl.sh apply -f -
|
||||
register: output
|
||||
retries: 4
|
||||
|
@ -220,11 +220,11 @@
|
|||
"apiVersion": "projectcalico.org/v3",
|
||||
"kind": "BGPPeer",
|
||||
"metadata": {
|
||||
"name": "peer-to-rrs"
|
||||
"name": "peer-to-rrs"
|
||||
},
|
||||
"spec": {
|
||||
"nodeSelector": "!has(i-am-a-route-reflector)",
|
||||
"peerSelector": "has(i-am-a-route-reflector)"
|
||||
"nodeSelector": "!has(i-am-a-route-reflector)",
|
||||
"peerSelector": "has(i-am-a-route-reflector)"
|
||||
}}' | {{ bin_dir }}/calicoctl.sh apply -f -
|
||||
register: output
|
||||
retries: 4
|
||||
|
@ -242,11 +242,11 @@
|
|||
"apiVersion": "projectcalico.org/v3",
|
||||
"kind": "BGPPeer",
|
||||
"metadata": {
|
||||
"name": "rr-mesh"
|
||||
"name": "rr-mesh"
|
||||
},
|
||||
"spec": {
|
||||
"nodeSelector": "has(i-am-a-route-reflector)",
|
||||
"peerSelector": "has(i-am-a-route-reflector)"
|
||||
"nodeSelector": "has(i-am-a-route-reflector)",
|
||||
"peerSelector": "has(i-am-a-route-reflector)"
|
||||
}}' | {{ bin_dir }}/calicoctl.sh apply -f -
|
||||
register: output
|
||||
retries: 4
|
||||
|
@ -315,13 +315,13 @@
|
|||
"apiVersion": "projectcalico.org/v3",
|
||||
"kind": "Node",
|
||||
"metadata": {
|
||||
"name": "{{ inventory_hostname }}"
|
||||
"name": "{{ inventory_hostname }}"
|
||||
},
|
||||
"spec": {
|
||||
"bgp": {
|
||||
"asNumber": "{{ local_as }}"
|
||||
},
|
||||
"orchRefs":[{"nodeName":"{{ inventory_hostname }}","orchestrator":"k8s"}]
|
||||
"bgp": {
|
||||
"asNumber": "{{ local_as }}"
|
||||
},
|
||||
"orchRefs":[{"nodeName":"{{ inventory_hostname }}","orchestrator":"k8s"}]
|
||||
}}' | {{ bin_dir }}/calicoctl.sh apply -f -
|
||||
register: output
|
||||
retries: 4
|
||||
|
@ -339,12 +339,12 @@
|
|||
"apiVersion": "projectcalico.org/v3",
|
||||
"kind": "BGPPeer",
|
||||
"metadata": {
|
||||
"name": "{{ inventory_hostname }}-{{ item.router_id }}"
|
||||
"name": "{{ inventory_hostname }}-{{ item.router_id }}"
|
||||
},
|
||||
"spec": {
|
||||
"asNumber": "{{ item.as }}",
|
||||
"node": "{{ inventory_hostname }}",
|
||||
"peerIP": "{{ item.router_id }}"
|
||||
"asNumber": "{{ item.as }}",
|
||||
"node": "{{ inventory_hostname }}",
|
||||
"peerIP": "{{ item.router_id }}"
|
||||
}}' | {{ bin_dir }}/calicoctl.sh apply -f -
|
||||
register: output
|
||||
retries: 4
|
||||
|
|
|
@ -368,10 +368,10 @@ spec:
|
|||
secret:
|
||||
secretName: typha-client
|
||||
items:
|
||||
- key: tls.crt
|
||||
path: typha-client.crt
|
||||
- key: tls.key
|
||||
path: typha-client.key
|
||||
- key: tls.crt
|
||||
path: typha-client.crt
|
||||
- key: tls.key
|
||||
path: typha-client.key
|
||||
- name: typha-cacert
|
||||
hostPath:
|
||||
path: "/etc/kubernetes/ssl/"
|
||||
|
|
|
@ -145,17 +145,17 @@ spec:
|
|||
periodSeconds: 10
|
||||
{% if typha_secure %}
|
||||
volumes:
|
||||
- name: typha-server
|
||||
secret:
|
||||
secretName: typha-server
|
||||
items:
|
||||
- key: tls.crt
|
||||
path: server_certificate.pem
|
||||
- key: tls.key
|
||||
path: server_key.pem
|
||||
- name: cacert
|
||||
hostPath:
|
||||
path: "{{ kube_cert_dir }}"
|
||||
- name: typha-server
|
||||
secret:
|
||||
secretName: typha-server
|
||||
items:
|
||||
- key: tls.crt
|
||||
path: server_certificate.pem
|
||||
- key: tls.key
|
||||
path: server_key.pem
|
||||
- name: cacert
|
||||
hostPath:
|
||||
path: "{{ kube_cert_dir }}"
|
||||
{% endif %}
|
||||
|
||||
---
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: felixconfigurations.crd.projectcalico.org
|
||||
name: felixconfigurations.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
|
|
|
@ -146,9 +146,9 @@
|
|||
|
||||
- name: Contiv | Copy netctl binary from docker container
|
||||
command: sh -c "{{ docker_bin_dir }}/docker rm -f netctl-binarycopy;
|
||||
{{ docker_bin_dir }}/docker create --name netctl-binarycopy {{ contiv_image_repo }}:{{ contiv_image_tag }} &&
|
||||
{{ docker_bin_dir }}/docker cp netctl-binarycopy:/contiv/bin/netctl {{ bin_dir }}/netctl &&
|
||||
{{ docker_bin_dir }}/docker rm -f netctl-binarycopy"
|
||||
{{ docker_bin_dir }}/docker create --name netctl-binarycopy {{ contiv_image_repo }}:{{ contiv_image_tag }} &&
|
||||
{{ docker_bin_dir }}/docker cp netctl-binarycopy:/contiv/bin/netctl {{ bin_dir }}/netctl &&
|
||||
{{ docker_bin_dir }}/docker rm -f netctl-binarycopy"
|
||||
register: contiv_task_result
|
||||
until: contiv_task_result.rc == 0
|
||||
retries: 4
|
||||
|
|
|
@ -29,15 +29,15 @@ spec:
|
|||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /etc/openvswitch
|
||||
name: etc-openvswitch
|
||||
readOnly: false
|
||||
- mountPath: /var/run
|
||||
name: var-run
|
||||
readOnly: false
|
||||
- mountPath: /opt/cni/bin
|
||||
name: cni-bin-dir
|
||||
readOnly: false
|
||||
- mountPath: /etc/openvswitch
|
||||
name: etc-openvswitch
|
||||
readOnly: false
|
||||
- mountPath: /var/run
|
||||
name: var-run
|
||||
readOnly: false
|
||||
- mountPath: /opt/cni/bin
|
||||
name: cni-bin-dir
|
||||
readOnly: false
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
|
|
|
@ -22,10 +22,10 @@ data:
|
|||
}
|
||||
contiv_k8s_config: |-
|
||||
{
|
||||
"K8S_API_SERVER": "{{ kube_apiserver_endpoint_for_contiv }}",
|
||||
"K8S_CA": "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt",
|
||||
"K8S_KEY": "",
|
||||
"K8S_CERT": "",
|
||||
"K8S_TOKEN": "",
|
||||
"SVC_SUBNET": "{{ kube_service_addresses }}"
|
||||
"K8S_API_SERVER": "{{ kube_apiserver_endpoint_for_contiv }}",
|
||||
"K8S_CA": "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt",
|
||||
"K8S_KEY": "",
|
||||
"K8S_CERT": "",
|
||||
"K8S_TOKEN": "",
|
||||
"SVC_SUBNET": "{{ kube_service_addresses }}"
|
||||
}
|
||||
|
|
|
@ -20,12 +20,12 @@ spec:
|
|||
dnsPolicy: ClusterFirstWithHostNet
|
||||
hostPID: true
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: DoesNotExist
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: DoesNotExist
|
||||
containers:
|
||||
- name: contiv-etcd-proxy
|
||||
image: {{ contiv_etcd_image_repo }}:{{ contiv_etcd_image_tag }}
|
||||
|
|
|
@ -78,8 +78,8 @@ spec:
|
|||
value: kubernetes
|
||||
- name: CONTIV_NETPLUGIN_VTEP_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: CONTIV_NETPLUGIN_ETCD_ENDPOINTS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
|
|
|
@ -57,15 +57,15 @@ spec:
|
|||
name: contiv-config
|
||||
key: contiv_ovs_vswitchd_extra_flags
|
||||
volumeMounts:
|
||||
- mountPath: /etc/openvswitch
|
||||
name: etc-openvswitch
|
||||
readOnly: false
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
- mountPath: /var/run
|
||||
name: var-run
|
||||
readOnly: false
|
||||
- mountPath: /etc/openvswitch
|
||||
name: etc-openvswitch
|
||||
readOnly: false
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
- mountPath: /var/run
|
||||
name: var-run
|
||||
readOnly: false
|
||||
volumes:
|
||||
# Used by contiv-ovs
|
||||
- name: etc-openvswitch
|
||||
|
|
|
@ -13,4 +13,4 @@ kube_ovn_pinger_cpu_limit: 200m
|
|||
kube_ovn_pinger_memory_limit: 400Mi
|
||||
|
||||
traffic_mirror: true
|
||||
encap_checksum: true
|
||||
encap_checksum: true
|
||||
|
|
|
@ -104,4 +104,4 @@ spec:
|
|||
JSONPath: .spec.providerInterfaceName
|
||||
- name: Subnet
|
||||
type: string
|
||||
JSONPath: .spec.subnet
|
||||
JSONPath: .spec.subnet
|
||||
|
|
|
@ -349,4 +349,4 @@ spec:
|
|||
path: /var/log/openvswitch
|
||||
- name: host-log-ovn
|
||||
hostPath:
|
||||
path: /var/log/ovn
|
||||
path: /var/log/ovn
|
||||
|
|
|
@ -57,4 +57,4 @@ kube_router_enable_metrics: false
|
|||
kube_router_metrics_path: /metrics
|
||||
|
||||
# Prometheus metrics port to use
|
||||
kube_router_metrics_port: 9255
|
||||
kube_router_metrics_port: 9255
|
||||
|
|
|
@ -25,4 +25,4 @@ rules:
|
|||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- update
|
||||
- update
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
# Node NotReady: type = ready, status = Unknown
|
||||
- name: See if node is in ready state
|
||||
shell: >-
|
||||
{{ bin_dir }}/kubectl get node {{ inventory_hostname }}
|
||||
-o jsonpath='{ range .status.conditions[?(@.type == "Ready")].status }{ @ }{ end }'
|
||||
{{ bin_dir }}/kubectl get node {{ inventory_hostname }}
|
||||
-o jsonpath='{ range .status.conditions[?(@.type == "Ready")].status }{ @ }{ end }'
|
||||
register: kubectl_node_ready
|
||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||
failed_when: false
|
||||
|
@ -14,8 +14,8 @@
|
|||
# else unschedulable key doesn't exist
|
||||
- name: See if node is schedulable
|
||||
shell: >-
|
||||
{{ bin_dir }}/kubectl get node {{ inventory_hostname }}
|
||||
-o jsonpath='{ .spec.unschedulable }'
|
||||
{{ bin_dir }}/kubectl get node {{ inventory_hostname }}
|
||||
-o jsonpath='{ .spec.unschedulable }'
|
||||
register: kubectl_node_schedulable
|
||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||
failed_when: false
|
||||
|
|
|
@ -70,4 +70,4 @@ images:
|
|||
filename: openSUSE-Leap-15.1-OpenStack.x86_64-0.0.4-Build6.106.qcow2
|
||||
url: https://download.opensuse.org/repositories/Cloud:/Images:/Leap_15.1/images/openSUSE-Leap-15.1-OpenStack.x86_64-0.0.4-Build6.106.qcow2
|
||||
checksum: sha256:e3c016a889505c5ae51dafe6eedc836a9e9546ab951fdc96f07eb35e34d12b8c
|
||||
converted: true
|
||||
converted: true
|
||||
|
|
|
@ -10,4 +10,4 @@ kubernetes_audit: true
|
|||
dns_min_replicas: 1
|
||||
|
||||
# Needed to upgrade from 1.16 to 1.17, otherwise upgrade is partial and bug followed
|
||||
upgrade_cluster_setup: true
|
||||
upgrade_cluster_setup: true
|
||||
|
|
|
@ -6,4 +6,4 @@ mode: default
|
|||
# Kubespray settings
|
||||
kube_network_plugin: kube-ovn
|
||||
deploy_netchecker: true
|
||||
dns_min_replicas: 1
|
||||
dns_min_replicas: 1
|
||||
|
|
|
@ -14,4 +14,4 @@ helm_version: v3.1.0
|
|||
|
||||
# https://gitlab.com/miouge/kubespray-ci/-/blob/a4fd5ed6857807f1c353cb60848aedebaf7d2c94/manifests/http-proxy.yml#L42
|
||||
http_proxy: http://172.30.30.30:8888
|
||||
https_proxy: http://172.30.30.30:8888
|
||||
https_proxy: http://172.30.30.30:8888
|
||||
|
|
|
@ -4,4 +4,4 @@ deploy_netchecker: true
|
|||
sonobuoy_enabled: true
|
||||
|
||||
# Ignore ping errors
|
||||
ignore_assert_errors: true
|
||||
ignore_assert_errors: true
|
||||
|
|
|
@ -4,4 +4,4 @@
|
|||
kube_network_plugin: flannel
|
||||
|
||||
deploy_netchecker: true
|
||||
dns_min_replicas: 1
|
||||
dns_min_replicas: 1
|
||||
|
|
|
@ -4,4 +4,4 @@
|
|||
kube_network_plugin: flannel
|
||||
|
||||
deploy_netchecker: true
|
||||
dns_min_replicas: 1
|
||||
dns_min_replicas: 1
|
||||
|
|
|
@ -17,4 +17,4 @@
|
|||
that:
|
||||
- apiserver_response.json.gitVersion == kube_version
|
||||
fail_msg: "apiserver version different than expected {{ kube_version }}"
|
||||
when: kube_version is defined
|
||||
when: kube_version is defined
|
||||
|
|
|
@ -30,4 +30,4 @@
|
|||
# Check that all nodes are Status=Ready
|
||||
- '(get_nodes_yaml.stdout | from_yaml)["items"] | map(attribute = "status.conditions") | map("items2dict", key_name="type", value_name="status") | map(attribute="Ready") | list | min'
|
||||
retries: 30
|
||||
delay: 10
|
||||
delay: 10
|
||||
|
|
|
@ -51,13 +51,13 @@
|
|||
|
||||
- name: Get hostnet pods
|
||||
command: "{{ bin_dir }}/kubectl get pods -n test -o
|
||||
jsonpath='{range .items[?(.spec.hostNetwork)]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
|
||||
jsonpath='{range .items[?(.spec.hostNetwork)]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
|
||||
register: hostnet_pods
|
||||
no_log: true
|
||||
|
||||
- name: Get running pods
|
||||
command: "{{ bin_dir }}/kubectl get pods -n test -o
|
||||
jsonpath='{range .items[?(.status.phase==\"Running\")]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
|
||||
jsonpath='{range .items[?(.status.phase==\"Running\")]}{.metadata.name} {.status.podIP} {.status.containerStatuses} {end}'"
|
||||
register: running_pods
|
||||
no_log: true
|
||||
|
||||
|
|
Loading…
Reference in a new issue