Adding yamllinter to ci steps (#1556)
* Adding yaml linter to ci check * Minor linting fixes from yamllint * Changing CI to install python pkgs from requirements.txt - adding in a secondary requirements.txt for tests - moving yamllint to tests requirements
This commit is contained in:
parent
ecb6dc3679
commit
8b151d12b9
106 changed files with 301 additions and 274 deletions
|
@ -18,10 +18,7 @@ variables:
|
|||
# us-west1-a
|
||||
|
||||
before_script:
|
||||
- pip install ansible==2.3.0
|
||||
- pip install netaddr
|
||||
- pip install apache-libcloud==0.20.1
|
||||
- pip install boto==2.9.0
|
||||
- pip install -r tests/requirements.txt
|
||||
- mkdir -p /.ssh
|
||||
- cp tests/ansible.cfg .
|
||||
|
||||
|
@ -75,10 +72,7 @@ before_script:
|
|||
- $HOME/.cache
|
||||
before_script:
|
||||
- docker info
|
||||
- pip install ansible==2.3.0
|
||||
- pip install netaddr
|
||||
- pip install apache-libcloud==0.20.1
|
||||
- pip install boto==2.9.0
|
||||
- pip install -r tests/requirements.txt
|
||||
- mkdir -p /.ssh
|
||||
- mkdir -p $HOME/.ssh
|
||||
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
||||
|
@ -642,6 +636,13 @@ syntax-check:
|
|||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root extra_playbooks/upgrade-only-k8s.yml -vvv --syntax-check
|
||||
except: ['triggers', 'master']
|
||||
|
||||
yamllint:
|
||||
<<: *job
|
||||
stage: unit-tests
|
||||
script:
|
||||
- yamllint roles
|
||||
except: ['triggers', 'master']
|
||||
|
||||
tox-inventory-builder:
|
||||
stage: unit-tests
|
||||
<<: *job
|
||||
|
|
16
.yamllint
Normal file
16
.yamllint
Normal file
|
@ -0,0 +1,16 @@
|
|||
---
|
||||
extends: default
|
||||
|
||||
rules:
|
||||
braces:
|
||||
min-spaces-inside: 0
|
||||
max-spaces-inside: 1
|
||||
brackets:
|
||||
min-spaces-inside: 0
|
||||
max-spaces-inside: 1
|
||||
indentation:
|
||||
spaces: 2
|
||||
indent-sequences: consistent
|
||||
line-length: disable
|
||||
new-line-at-end-of-file: disable
|
||||
truthy: disable
|
|
@ -49,4 +49,3 @@
|
|||
pip:
|
||||
name: "{{ item }}"
|
||||
with_items: "{{pip_python_modules}}"
|
||||
|
||||
|
|
|
@ -27,4 +27,3 @@
|
|||
hostname:
|
||||
name: "{{inventory_hostname}}"
|
||||
when: ansible_hostname == 'localhost'
|
||||
|
||||
|
|
|
@ -6,4 +6,3 @@
|
|||
regexp: '^\w+\s+requiretty'
|
||||
dest: /etc/sudoers
|
||||
state: absent
|
||||
|
||||
|
|
|
@ -86,4 +86,3 @@
|
|||
port: 53
|
||||
timeout: 180
|
||||
when: inventory_hostname == groups['kube-node'][0] and groups['kube-node'][0] in ansible_play_hosts
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
@ -47,4 +48,3 @@ spec:
|
|||
- --default-params={"linear":{"nodesPerReplica":{{ dnsmasq_nodes_per_replica }},"preventSinglePointFailure":true}}
|
||||
- --logtostderr=true
|
||||
- --v={{ kube_log_level }}
|
||||
|
||||
|
|
|
@ -35,7 +35,6 @@ spec:
|
|||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
cpu: {{ dns_cpu_limit }}
|
||||
|
@ -64,4 +63,3 @@ spec:
|
|||
hostPath:
|
||||
path: /etc/dnsmasq.d-available
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
docker_version: '1.13'
|
||||
|
||||
docker_package_info:
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
docker_kernel_min_version: '3.10'
|
||||
|
||||
# https://apt.dockerproject.org/repo/dists/debian-wheezy/main/filelist
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
docker_kernel_min_version: '0'
|
||||
|
||||
# versioning: docker-io itself is pinned at docker 1.5
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
docker_kernel_min_version: '0'
|
||||
|
||||
# https://docs.docker.com/engine/installation/linux/fedora/#install-from-a-package
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
docker_kernel_min_version: '0'
|
||||
|
||||
# https://yum.dockerproject.org/repo/main/centos/7/Packages/
|
||||
|
|
|
@ -43,4 +43,3 @@
|
|||
ETCDCTL_API: 3
|
||||
retries: 3
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
|
||||
|
|
|
@ -30,4 +30,3 @@
|
|||
- name: set etcd_secret_changed
|
||||
set_fact:
|
||||
etcd_secret_changed: true
|
||||
|
||||
|
|
|
@ -66,4 +66,3 @@
|
|||
{%- set _ = certs.update({'sync': True}) -%}
|
||||
{% endif %}
|
||||
{{ certs.sync }}
|
||||
|
||||
|
|
|
@ -76,8 +76,7 @@
|
|||
'admin-{{ inventory_hostname }}.pem',
|
||||
'admin-{{ inventory_hostname }}-key.pem',
|
||||
'member-{{ inventory_hostname }}.pem',
|
||||
'member-{{ inventory_hostname }}-key.pem'
|
||||
]
|
||||
'member-{{ inventory_hostname }}-key.pem']
|
||||
all_node_certs: "['ca.pem',
|
||||
{% for node in (groups['k8s-cluster'] + groups['calico-rr']|default([]))|unique %}
|
||||
'node-{{ node }}.pem',
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
when: inventory_hostname in etcd_node_cert_hosts
|
||||
tags: etcd-secrets
|
||||
|
||||
|
||||
- name: gen_certs_vault | Read in the local credentials
|
||||
command: cat /etc/vault/roles/etcd/userpass
|
||||
register: etcd_vault_creds_cat
|
||||
|
@ -96,5 +95,3 @@
|
|||
with_items: "{{ etcd_node_certs_needed|d([]) }}"
|
||||
when: inventory_hostname in etcd_node_cert_hosts
|
||||
notify: set etcd_secret_changed
|
||||
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
- name: "Pre-upgrade | check for etcd-proxy unit file"
|
||||
stat:
|
||||
path: /etc/systemd/system/etcd-proxy.service
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
- name: Refresh config | Create etcd config file
|
||||
template:
|
||||
src: etcd.env.yml
|
||||
src: etcd.env.j2
|
||||
dest: /etc/etcd.env
|
||||
notify: restart etcd
|
||||
when: is_etcd_master
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
---
|
||||
|
||||
elrepo_key_url: 'https://www.elrepo.org/RPM-GPG-KEY-elrepo.org'
|
||||
elrepo_rpm: elrepo-release-7.0-3.el7.elrepo.noarch.rpm
|
||||
elrepo_mirror: http://www.elrepo.org
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Versions
|
||||
kubedns_version: 1.14.2
|
||||
kubednsautoscaler_version: 1.1.1
|
||||
|
|
|
@ -14,12 +14,12 @@
|
|||
dest: "{{kube_config_dir}}/{{item.file}}"
|
||||
with_items:
|
||||
- {name: kubedns, file: kubedns-sa.yml, type: sa}
|
||||
- {name: kubedns, file: kubedns-deploy.yml, type: deployment}
|
||||
- {name: kubedns, file: kubedns-deploy.yml.j2, type: deployment}
|
||||
- {name: kubedns, file: kubedns-svc.yml, type: svc}
|
||||
- {name: kubedns-autoscaler, file: kubedns-autoscaler-sa.yml, type: sa}
|
||||
- {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrole.yml, type: clusterrole}
|
||||
- {name: kubedns-autoscaler, file: kubedns-autoscaler-clusterrolebinding.yml, type: clusterrolebinding}
|
||||
- {name: kubedns-autoscaler, file: kubedns-autoscaler.yml, type: deployment}
|
||||
- {name: kubedns-autoscaler, file: kubedns-autoscaler.yml.j2, type: deployment}
|
||||
register: manifests
|
||||
when:
|
||||
- dns_mode != 'none' and inventory_hostname == groups['kube-master'][0]
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
- name: Kubernetes Apps | Lay Down Netchecker Template
|
||||
template:
|
||||
src: "{{item.file}}"
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Copyright 2016 The Kubernetes Authors. All rights reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Copyright 2016 The Kubernetes Authors. All rights reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Copyright 2016 The Kubernetes Authors. All rights reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
|
@ -19,4 +20,3 @@ spec:
|
|||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
dependencies:
|
||||
- role: download
|
||||
file: "{{ downloads.elasticsearch }}"
|
||||
|
|
|
@ -38,4 +38,3 @@
|
|||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/elasticsearch-service.yaml -n {{ system_namespace }}"
|
||||
run_once: true
|
||||
when: es_service_manifest.changed
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
dependencies:
|
||||
- role: download
|
||||
file: "{{ downloads.fluentd }}"
|
||||
|
|
|
@ -20,4 +20,3 @@
|
|||
command: "{{ bin_dir }}/kubectl apply -f {{ kube_config_dir }}/fluentd-ds.yaml -n {{ system_namespace }}"
|
||||
run_once: true
|
||||
when: fluentd_ds_manifest.changed
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
dependencies:
|
||||
- role: download
|
||||
file: "{{ downloads.kibana }}"
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
dependencies:
|
||||
- role: kubernetes-apps/efk/elasticsearch
|
||||
- role: kubernetes-apps/efk/fluentd
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
helm_enabled: false
|
||||
|
||||
# specify a dir and attach it to helm for HELM_HOME.
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
dependencies:
|
||||
- role: download
|
||||
file: "{{ downloads.helm }}"
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
dependencies:
|
||||
- role: download
|
||||
file: "{{ downloads.netcheck_server }}"
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
- name: Create canal ConfigMap
|
||||
run_once: true
|
||||
kube:
|
||||
|
@ -29,4 +30,3 @@
|
|||
namespace: "{{system_namespace}}"
|
||||
state: "{{ item | ternary('latest','present') }}"
|
||||
with_items: "{{ canal_node_manifest.changed }}"
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# FIXME: remove if kubernetes/features#124 is implemented
|
||||
- name: Weave | Purge old weave daemonset
|
||||
kube:
|
||||
|
@ -9,7 +10,6 @@
|
|||
state: absent
|
||||
when: inventory_hostname == groups['kube-master'][0] and weave_manifest.changed
|
||||
|
||||
|
||||
- name: Weave | Start Resources
|
||||
kube:
|
||||
name: "weave-net"
|
||||
|
@ -21,7 +21,6 @@
|
|||
with_items: "{{ weave_manifest.changed }}"
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
|
||||
- name: "Weave | wait for weave to become available"
|
||||
uri:
|
||||
url: http://127.0.0.1:6784/status
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Limits for calico apps
|
||||
calico_policy_controller_cpu_limit: 100m
|
||||
calico_policy_controller_memory_limit: 256M
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
- set_fact:
|
||||
calico_cert_dir: "{{ canal_cert_dir }}"
|
||||
when: kube_network_plugin == 'canal'
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# An experimental dev/test only dynamic volumes provisioner,
|
||||
# for PetSets. Works for kube>=v1.3 only.
|
||||
kube_hostpath_dynamic_provisioner: "false"
|
||||
|
|
|
@ -88,4 +88,3 @@
|
|||
|
||||
- include: post-upgrade.yml
|
||||
tags: k8s-post-upgrade
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# Valid options: docker (default), rkt, or host
|
||||
kubelet_deployment_type: host
|
||||
|
||||
|
|
|
@ -21,4 +21,3 @@
|
|||
dest: "/etc/systemd/system/kubelet.service"
|
||||
backup: "yes"
|
||||
notify: restart kubelet
|
||||
|
||||
|
|
|
@ -30,4 +30,3 @@
|
|||
dest: /etc/systemd/system/kubelet.service.d/http-proxy.conf
|
||||
when: http_proxy is defined or https_proxy is defined or no_proxy is defined
|
||||
notify: restart kubelet
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
- name: Preinstall | restart network
|
||||
command: /bin/true
|
||||
notify:
|
||||
|
|
|
@ -48,5 +48,3 @@
|
|||
fail:
|
||||
msg: "azure_route_table_name is missing"
|
||||
when: azure_route_table_name is not defined or azure_route_table_name == ""
|
||||
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
- name: check vsphere environment variables
|
||||
fail:
|
||||
msg: "{{ item.name }} is missing"
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
required_pkgs:
|
||||
- libselinux-python
|
||||
- device-mapper-libs
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
required_pkgs:
|
||||
- python-apt
|
||||
- aufs-tools
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
required_pkgs:
|
||||
- libselinux-python
|
||||
- device-mapper-libs
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
required_pkgs:
|
||||
- libselinux-python
|
||||
- device-mapper-libs
|
||||
|
|
|
@ -105,4 +105,3 @@
|
|||
{%- set _ = certs.update({'sync': True}) -%}
|
||||
{% endif %}
|
||||
{{ certs.sync }}
|
||||
|
||||
|
|
|
@ -74,8 +74,7 @@
|
|||
'kube-scheduler.pem',
|
||||
'kube-scheduler-key.pem',
|
||||
'kube-controller-manager.pem',
|
||||
'kube-controller-manager-key.pem',
|
||||
]
|
||||
'kube-controller-manager-key.pem']
|
||||
all_node_certs: "['ca.pem',
|
||||
{% for node in groups['k8s-cluster'] %}
|
||||
'node-{{ node }}.pem',
|
||||
|
@ -87,8 +86,7 @@
|
|||
'node-{{ inventory_hostname }}.pem',
|
||||
'node-{{ inventory_hostname }}-key.pem',
|
||||
'kube-proxy-{{ inventory_hostname }}.pem',
|
||||
'kube-proxy-{{ inventory_hostname }}-key.pem',
|
||||
]
|
||||
'kube-proxy-{{ inventory_hostname }}-key.pem']
|
||||
tags: facts
|
||||
|
||||
- name: Gen_certs | Gather master certs
|
||||
|
@ -195,4 +193,3 @@
|
|||
- name: Gen_certs | update ca-certificates (RedHat)
|
||||
command: update-ca-trust extract
|
||||
when: kube_ca_cert.changed and ansible_os_family == "RedHat"
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
## Required for bootstrap-os/preinstall/download roles and setting facts
|
||||
# Valid bootstrap options (required): ubuntu, coreos, centos, none
|
||||
bootstrap_os: none
|
||||
|
@ -88,8 +89,10 @@ kube_network_node_prefix: 24
|
|||
|
||||
# The port the API Server will be listening on.
|
||||
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
|
||||
kube_apiserver_port: 6443 # (https)
|
||||
kube_apiserver_insecure_port: 8080 # (http)
|
||||
# https
|
||||
kube_apiserver_port: 6443
|
||||
# http
|
||||
kube_apiserver_insecure_port: 8080
|
||||
|
||||
# Path used to store Docker data
|
||||
docker_daemon_graph: "/var/lib/docker"
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
- name: Configure defaults
|
||||
debug:
|
||||
msg: "Check roles/kubespray-defaults/defaults/main.yml"
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
dependencies:
|
||||
- role: etcd
|
||||
- role: docker
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
# The interface used by canal for host <-> host communication.
|
||||
# If left blank, then the interface is chosing using the node's
|
||||
# default route.
|
||||
|
@ -30,4 +31,3 @@ calicoctl_memory_limit: 170M
|
|||
calicoctl_cpu_limit: 100m
|
||||
calicoctl_memory_requests: 32M
|
||||
calicoctl_cpu_requests: 25m
|
||||
|
||||
|
|
|
@ -14,4 +14,3 @@
|
|||
owner: kube
|
||||
recurse: true
|
||||
mode: "u=rwX,g-rwx,o-rwx"
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
---
|
||||
- name: Weave pre-upgrade | Stop legacy weave
|
||||
command: weave stop
|
||||
failed_when: false
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
---
|
||||
|
||||
- name: Uncordon node
|
||||
command: "{{ bin_dir }}/kubectl uncordon {{ inventory_hostname }}"
|
||||
delegate_to: "{{ groups['kube-master'][0] }}"
|
||||
when: (needs_cordoning|default(false)) and ( {%- if inventory_hostname in groups['kube-node'] -%} true {%- else -%} false {%- endif -%} )
|
||||
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
---
|
||||
drain_grace_period: 90
|
||||
drain_timeout: 120s
|
||||
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
---
|
||||
|
||||
- include: ../shared/create_role.yml
|
||||
vars:
|
||||
create_role_name: "{{ item.name }}"
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
---
|
||||
|
||||
- name: bootstrap/start_vault_temp | Ensure vault-temp isn't already running
|
||||
shell: if docker rm -f {{ vault_temp_container_name }} 2>&1 1>/dev/null;then echo true;else echo false;fi
|
||||
register: vault_temp_stop_check
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
---
|
||||
|
||||
- include: ../shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "ca.pem"
|
||||
|
@ -29,4 +28,3 @@
|
|||
- name: bootstrap/sync_vault_certs | Unset sync_file_results after api.pem sync
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
---
|
||||
|
||||
- include: ../shared/check_vault.yml
|
||||
when: inventory_hostname in groups.vault
|
||||
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
---
|
||||
|
||||
- name: shared/auth_backend | Test if the auth backend exists
|
||||
uri:
|
||||
url: "{{ vault_leader_url }}/v1/sys/auth/{{ auth_backend_path }}/tune"
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
---
|
||||
|
||||
# Stop temporary Vault if it's running (can linger if playbook fails out)
|
||||
- name: stop vault-temp container
|
||||
shell: docker stop {{ vault_temp_container_name }} || rkt stop {{ vault_temp_container_name }}
|
||||
|
|
5
tests/requirements.txt
Normal file
5
tests/requirements.txt
Normal file
|
@ -0,0 +1,5 @@
|
|||
-r ../requirements.txt
|
||||
yamllint
|
||||
apache-libcloud==0.20.1
|
||||
boto==2.9.0
|
||||
tox
|
Loading…
Reference in a new issue