Remove vault (#7400)
* Remove contrib/vault This is marked as broken since 2018 /3dcb914607
This still reference apiserver.pem, not used sinceddffdb63bf
Signed-off-by: Etienne Champetier <e.champetier@ateme.com> * Finish nuking vault from the codebase Signed-off-by: Etienne Champetier <e.champetier@ateme.com>
This commit is contained in:
parent
8655b92e93
commit
f0cdf71ccb
59 changed files with 7 additions and 2032 deletions
|
@ -48,7 +48,7 @@ As a consequence, `ansible-playbook` command will fail with:
|
|||
ERROR! no action detected in task. This often indicates a misspelled module name, or incorrect module path.
|
||||
```
|
||||
|
||||
probably pointing on a task depending on a module present in requirements.txt (i.e. "unseal vault").
|
||||
probably pointing on a task depending on a module present in requirements.txt.
|
||||
|
||||
One way of solving this would be to uninstall the Ansible package and then, to install it via pip but it is not always possible.
|
||||
A workaround consists of setting `ANSIBLE_LIBRARY` and `ANSIBLE_MODULE_UTILS` environment variables respectively to the `ansible/modules` and `ansible/module_utils` subdirectories of pip packages installation location, which can be found in the Location field of the output of `pip show [package]` before executing `ansible-playbook`.
|
||||
|
|
|
@ -245,7 +245,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
|||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s-cluster"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
@ -292,7 +292,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
|||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s-cluster"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
@ -337,7 +337,7 @@ resource "openstack_compute_instance_v2" "etcd" {
|
|||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,vault,no-floating"
|
||||
kubespray_groups = "etcd,no-floating"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
@ -379,7 +379,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
|||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||
kubespray_groups = "etcd,kube_control_plane,${var.supplementary_master_groups},k8s-cluster,no-floating"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
@ -421,7 +421,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
|||
|
||||
metadata = {
|
||||
ssh_user = var.ssh_user
|
||||
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
|
||||
kubespray_groups = "kube_control_plane,${var.supplementary_master_groups},k8s-cluster,no-floating"
|
||||
depends_on = var.network_id
|
||||
use_access_ip = var.use_access_ip
|
||||
}
|
||||
|
|
|
@ -1,32 +0,0 @@
|
|||
---
|
||||
vault_deployment_type: docker
|
||||
vault_binary_checksum: 3c4d70ba71619a43229e65c67830e30e050eab7a81ac6b28325ff707e5914188
|
||||
vault_version: 0.10.1
|
||||
vault_download_url: "https://releases.hashicorp.com/vault/{{ vault_version }}/vault_{{ vault_version }}_linux_{{ image_arch }}.zip"
|
||||
vault_image_repo: "vault"
|
||||
vault_image_tag: "{{ vault_version }}"
|
||||
vault_downloads:
|
||||
vault:
|
||||
enabled: "{{ cert_management == 'vault' }}"
|
||||
container: "{{ vault_deployment_type != 'host' }}"
|
||||
file: "{{ vault_deployment_type == 'host' }}"
|
||||
dest: "{{local_release_dir}}/vault/vault_{{ vault_version }}_linux_{{ image_arch }}.zip"
|
||||
mode: "0755"
|
||||
owner: "vault"
|
||||
repo: "{{ vault_image_repo }}"
|
||||
sha256: "{{ vault_binary_checksum if vault_deployment_type == 'host' else vault_digest_checksum|d(none) }}"
|
||||
tag: "{{ vault_image_tag }}"
|
||||
unarchive: true
|
||||
url: "{{ vault_download_url }}"
|
||||
version: "{{ vault_version }}"
|
||||
groups:
|
||||
- vault
|
||||
|
||||
# Vault data dirs.
|
||||
vault_base_dir: /etc/vault
|
||||
vault_cert_dir: "{{ vault_base_dir }}/ssl"
|
||||
vault_config_dir: "{{ vault_base_dir }}/config"
|
||||
vault_roles_dir: "{{ vault_base_dir }}/roles"
|
||||
vault_secrets_dir: "{{ vault_base_dir }}/secrets"
|
||||
kube_vault_mount_path: "/kube"
|
||||
etcd_vault_mount_path: "/etcd"
|
|
@ -1 +0,0 @@
|
|||
ansible-modules-hashivault>=3.9.4
|
|
@ -1,73 +0,0 @@
|
|||
---
|
||||
- include_tasks: sync_etcd_master_certs.yml
|
||||
when: inventory_hostname in groups.etcd
|
||||
tags:
|
||||
- etcd-secrets
|
||||
|
||||
- include_tasks: sync_etcd_node_certs.yml
|
||||
when: inventory_hostname in etcd_node_cert_hosts
|
||||
tags:
|
||||
- etcd-secrets
|
||||
|
||||
# Issue master certs to Etcd nodes
|
||||
- include_tasks: ../../vault/tasks/shared/issue_cert.yml
|
||||
vars:
|
||||
issue_cert_common_name: "etcd:master:{{ item.rsplit('/', 1)[1].rsplit('.', 1)[0] }}"
|
||||
issue_cert_alt_names: "{{ groups['etcd'] + ['localhost'] + (etcd_cert_alt_names)|default() }}"
|
||||
issue_cert_copy_ca: "{{ item == etcd_master_certs_needed|first }}"
|
||||
issue_cert_file_group: "{{ etcd_cert_group }}"
|
||||
issue_cert_file_owner: kube
|
||||
issue_cert_hosts: "{{ groups.etcd }}"
|
||||
issue_cert_ip_sans: >-
|
||||
[
|
||||
{%- for host in groups.etcd -%}
|
||||
"{{ hostvars[host]['ansible_default_ipv4']['address'] }}",
|
||||
{%- if hostvars[host]['ip'] is defined -%}
|
||||
"{{ hostvars[host]['ip'] }}",
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
{%- for cert_alt_ip in etcd_cert_alt_ips -%}
|
||||
"{{ cert_alt_ip }}",
|
||||
{%- endfor -%}
|
||||
"127.0.0.1","::1"
|
||||
]
|
||||
issue_cert_path: "{{ item }}"
|
||||
issue_cert_role: etcd
|
||||
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||
issue_cert_mount_path: "{{ etcd_vault_mount_path }}"
|
||||
with_items: "{{ etcd_master_certs_needed|d([]) }}"
|
||||
when: inventory_hostname in groups.etcd
|
||||
notify: set etcd_secret_changed
|
||||
|
||||
# Issue node certs to everyone else
|
||||
- include_tasks: ../../vault/tasks/shared/issue_cert.yml
|
||||
vars:
|
||||
issue_cert_common_name: "etcd:node:{{ item.rsplit('/', 1)[1].rsplit('.', 1)[0] }}"
|
||||
issue_cert_alt_names: "{{ etcd_node_cert_hosts }}"
|
||||
issue_cert_copy_ca: "{{ item == etcd_node_certs_needed|first }}"
|
||||
issue_cert_file_group: "{{ etcd_cert_group }}"
|
||||
issue_cert_file_owner: kube
|
||||
issue_cert_hosts: "{{ etcd_node_cert_hosts }}"
|
||||
issue_cert_ip_sans: >-
|
||||
[
|
||||
{%- for host in etcd_node_cert_hosts -%}
|
||||
"{{ hostvars[host]['ansible_default_ipv4']['address'] }}",
|
||||
{%- if hostvars[host]['ip'] is defined -%}
|
||||
"{{ hostvars[host]['ip'] }}",
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
"127.0.0.1","::1"
|
||||
]
|
||||
issue_cert_path: "{{ item }}"
|
||||
issue_cert_role: etcd
|
||||
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||
issue_cert_mount_path: "{{ etcd_vault_mount_path }}"
|
||||
with_items: "{{ etcd_node_certs_needed|d([]) }}"
|
||||
when: inventory_hostname in etcd_node_cert_hosts
|
||||
notify: set etcd_secret_changed
|
||||
|
||||
- name: gen_certs_vault | ensure file permissions
|
||||
shell: >-
|
||||
find {{etcd_cert_dir }} -type d -exec chmod 0755 {} \; &&
|
||||
find {{etcd_cert_dir }} -type f -exec chmod 0640 {} \;
|
||||
changed_when: false
|
|
@ -1,39 +0,0 @@
|
|||
---
|
||||
|
||||
- name: sync_etcd_master_certs | Create list of master certs needing creation
|
||||
set_fact:
|
||||
etcd_master_cert_list: >-
|
||||
{{ etcd_master_cert_list|default([]) + [
|
||||
"admin-" + inventory_hostname + ".pem",
|
||||
"member-" + inventory_hostname + ".pem"
|
||||
] }}
|
||||
|
||||
- include_tasks: ../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "{{ item }}"
|
||||
sync_file_dir: "{{ etcd_cert_dir }}"
|
||||
sync_file_hosts: [ "{{ inventory_hostname }}" ]
|
||||
sync_file_owner: kube
|
||||
sync_file_group: root
|
||||
sync_file_is_cert: true
|
||||
with_items: "{{ etcd_master_cert_list|d([]) }}"
|
||||
|
||||
- name: sync_etcd_certs | Set facts for etcd sync_file results
|
||||
set_fact:
|
||||
etcd_master_certs_needed: "{{ etcd_master_certs_needed|default([]) + [item.path] }}"
|
||||
with_items: "{{ sync_file_results|d([]) }}"
|
||||
when: item.no_srcs|bool
|
||||
|
||||
- name: sync_etcd_certs | Unset sync_file_results after etcd certs sync
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
- include_tasks: ../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: ca.pem
|
||||
sync_file_dir: "{{ etcd_cert_dir }}"
|
||||
sync_file_hosts: [ "{{ inventory_hostname }}" ]
|
||||
|
||||
- name: sync_etcd_certs | Unset sync_file_results after ca.pem sync
|
||||
set_fact:
|
||||
sync_file_results: []
|
|
@ -1,33 +0,0 @@
|
|||
---
|
||||
|
||||
- name: sync_etcd_node_certs | Create list of node certs needing creation
|
||||
set_fact:
|
||||
etcd_node_cert_list: "{{ etcd_node_cert_list|default([]) + ['node-' + inventory_hostname + '.pem'] }}"
|
||||
|
||||
- include_tasks: ../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "{{ item }}"
|
||||
sync_file_dir: "{{ etcd_cert_dir }}"
|
||||
sync_file_hosts: [ "{{ inventory_hostname }}" ]
|
||||
sync_file_is_cert: true
|
||||
with_items: "{{ etcd_node_cert_list|d([]) }}"
|
||||
|
||||
- name: sync_etcd_node_certs | Set facts for etcd sync_file results
|
||||
set_fact:
|
||||
etcd_node_certs_needed: "{{ etcd_node_certs_needed|default([]) + [item.path] }}"
|
||||
with_items: "{{ sync_file_results|d([]) }}"
|
||||
when: item.no_srcs|bool
|
||||
|
||||
- name: sync_etcd_node_certs | Unset sync_file_results after etcd node certs
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
- include_tasks: ../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: ca.pem
|
||||
sync_file_dir: "{{ etcd_cert_dir }}"
|
||||
sync_file_hosts: "{{ groups['etcd'] }}"
|
||||
|
||||
- name: sync_etcd_node_certs | Unset sync_file_results after ca.pem
|
||||
set_fact:
|
||||
sync_file_results: []
|
|
@ -1,134 +0,0 @@
|
|||
---
|
||||
- import_tasks: sync_kube_master_certs.yml
|
||||
when: inventory_hostname in groups['kube_control_plane']
|
||||
|
||||
- import_tasks: sync_kube_node_certs.yml
|
||||
when: inventory_hostname in groups['k8s-cluster']
|
||||
|
||||
# Issue admin certs to kube_control_plane hosts
|
||||
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
|
||||
vars:
|
||||
issue_cert_common_name: "admin"
|
||||
issue_cert_copy_ca: "{{ item == kube_admin_certs_needed|first }}"
|
||||
issue_cert_file_group: "{{ kube_cert_group }}"
|
||||
issue_cert_file_owner: kube
|
||||
issue_cert_hosts: "{{ groups['kube_control_plane'] }}"
|
||||
issue_cert_path: "{{ item }}"
|
||||
issue_cert_role: kube_control_plane
|
||||
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
|
||||
with_items: "{{ kube_admin_certs_needed|d([]) }}"
|
||||
when: inventory_hostname in groups['kube_control_plane']
|
||||
|
||||
- name: gen_certs_vault | Set fact about certificate alt names
|
||||
set_fact:
|
||||
kube_cert_alt_names: >-
|
||||
{{
|
||||
groups['kube_control_plane'] +
|
||||
['kubernetes.default.svc.'+cluster_name, 'kubernetes.default.svc', 'kubernetes.default', 'kubernetes'] +
|
||||
['localhost']
|
||||
}}
|
||||
run_once: true
|
||||
|
||||
- name: gen_certs_vault | Add external load balancer domain name to certificate alt names
|
||||
set_fact:
|
||||
kube_cert_alt_names: "{{ kube_cert_alt_names + [apiserver_loadbalancer_domain_name] }}"
|
||||
when: loadbalancer_apiserver is defined
|
||||
run_once: true
|
||||
|
||||
# Issue master components certs to kube_control_plane hosts
|
||||
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
|
||||
vars:
|
||||
issue_cert_common_name: "kubernetes"
|
||||
issue_cert_alt_names: "{{ kube_cert_alt_names }}"
|
||||
issue_cert_file_group: "{{ kube_cert_group }}"
|
||||
issue_cert_file_owner: kube
|
||||
issue_cert_hosts: "{{ groups['kube_control_plane'] }}"
|
||||
issue_cert_run_once: true
|
||||
issue_cert_ip_sans: >-
|
||||
[
|
||||
{%- for host in groups['kube_control_plane'] -%}
|
||||
"{{ hostvars[host]['ansible_default_ipv4']['address'] }}",
|
||||
{%- if hostvars[host]['ip'] is defined -%}
|
||||
"{{ hostvars[host]['ip'] }}",
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
{%- if supplementary_addresses_in_ssl_keys is defined -%}
|
||||
{%- for ip_item in supplementary_addresses_in_ssl_keys -%}
|
||||
"{{ ip_item }}",
|
||||
{%- endfor -%}
|
||||
{%- endif -%}
|
||||
"127.0.0.1","::1","{{ kube_apiserver_ip }}"
|
||||
]
|
||||
issue_cert_path: "{{ item }}"
|
||||
issue_cert_role: kube_control_plane
|
||||
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
|
||||
with_items: "{{ kube_master_components_certs_needed|d([]) }}"
|
||||
when: inventory_hostname in groups['kube_control_plane']
|
||||
notify: set secret_changed
|
||||
|
||||
# Issue node certs to k8s-cluster nodes
|
||||
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
|
||||
vars:
|
||||
# Need to strip out the 'node-' prefix from the cert name so it can be used
|
||||
# with the node authorization plugin ( CN matches kubelet node name )
|
||||
issue_cert_common_name: "system:node:{{ item.rsplit('/', 1)[1].rsplit('.', 1)[0] | regex_replace('^node-', '') }}"
|
||||
issue_cert_copy_ca: "{{ item == kube_node_certs_needed|first }}"
|
||||
issue_cert_file_group: "{{ kube_cert_group }}"
|
||||
issue_cert_file_owner: kube
|
||||
issue_cert_hosts: "{{ groups['k8s-cluster'] }}"
|
||||
issue_cert_path: "{{ item }}"
|
||||
issue_cert_role: kube-node
|
||||
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
|
||||
with_items: "{{ kube_node_certs_needed|d([]) }}"
|
||||
when: inventory_hostname in groups['k8s-cluster']
|
||||
|
||||
# Issue proxy certs to k8s-cluster nodes
|
||||
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
|
||||
vars:
|
||||
issue_cert_common_name: "system:kube-proxy"
|
||||
issue_cert_copy_ca: "{{ item == kube_proxy_certs_needed|first }}"
|
||||
issue_cert_file_group: "{{ kube_cert_group }}"
|
||||
issue_cert_file_owner: kube
|
||||
issue_cert_hosts: "{{ groups['k8s-cluster'] }}"
|
||||
issue_cert_path: "{{ item }}"
|
||||
issue_cert_role: kube-proxy
|
||||
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
|
||||
with_items: "{{ kube_proxy_certs_needed|d([]) }}"
|
||||
when: inventory_hostname in groups['k8s-cluster']
|
||||
|
||||
# Issue front proxy cert to kube_control_plane hosts
|
||||
- include_tasks: ../../../vault/tasks/shared/issue_cert.yml
|
||||
vars:
|
||||
issue_cert_common_name: "front-proxy-client"
|
||||
issue_cert_copy_ca: "{{ item == kube_front_proxy_clients_certs_needed|first }}"
|
||||
issue_cert_ca_filename: front-proxy-ca.pem
|
||||
issue_cert_alt_names: "{{ kube_cert_alt_names }}"
|
||||
issue_cert_file_group: "{{ kube_cert_group }}"
|
||||
issue_cert_file_owner: kube
|
||||
issue_cert_hosts: "{{ groups['kube_control_plane'] }}"
|
||||
issue_cert_ip_sans: >-
|
||||
[
|
||||
{%- for host in groups['kube_control_plane'] -%}
|
||||
"{{ hostvars[host]['ansible_default_ipv4']['address'] }}",
|
||||
{%- if hostvars[host]['ip'] is defined -%}
|
||||
"{{ hostvars[host]['ip'] }}",
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
{%- if supplementary_addresses_in_ssl_keys is defined -%}
|
||||
{%- for ip_item in supplementary_addresses_in_ssl_keys -%}
|
||||
"{{ ip_item }}",
|
||||
{%- endfor -%}
|
||||
{%- endif -%}
|
||||
"127.0.0.1","::1","{{ kube_apiserver_ip }}"
|
||||
]
|
||||
issue_cert_path: "{{ item }}"
|
||||
issue_cert_role: front-proxy-client
|
||||
issue_cert_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||
issue_cert_mount_path: "{{ kube_vault_mount_path }}"
|
||||
with_items: "{{ kube_front_proxy_clients_certs_needed|d([]) }}"
|
||||
when: inventory_hostname in groups['kube_control_plane']
|
||||
notify: set secret_changed
|
|
@ -1,89 +0,0 @@
|
|||
---
|
||||
|
||||
- name: sync_kube_master_certs | Create list of needed kube admin certs
|
||||
set_fact:
|
||||
kube_admin_cert_list: "{{ kube_admin_cert_list|d([]) + ['admin-' + inventory_hostname + '.pem'] }}"
|
||||
|
||||
- include_tasks: ../../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "{{ item }}"
|
||||
sync_file_dir: "{{ kube_cert_dir }}"
|
||||
sync_file_group: "{{ kube_cert_group }}"
|
||||
sync_file_hosts: [ "{{ inventory_hostname }}" ]
|
||||
sync_file_is_cert: true
|
||||
sync_file_owner: kube
|
||||
with_items: "{{ kube_admin_cert_list|d([]) }}"
|
||||
|
||||
- name: sync_kube_master_certs | Set facts for kube admin sync_file results
|
||||
set_fact:
|
||||
kube_admin_certs_needed: "{{ kube_admin_certs_needed|default([]) + [item.path] }}"
|
||||
with_items: "{{ sync_file_results|d([]) }}"
|
||||
when: item.no_srcs|bool
|
||||
|
||||
- name: sync_kube_master_certs | Unset sync_file_results after kube admin certs
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
- include_tasks: ../../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "{{ item }}"
|
||||
sync_file_dir: "{{ kube_cert_dir }}"
|
||||
sync_file_group: "{{ kube_cert_group }}"
|
||||
sync_file_hosts: "{{ groups['kube_control_plane'] }}"
|
||||
sync_file_is_cert: true
|
||||
sync_file_owner: kube
|
||||
with_items: ["apiserver.pem", "kube-scheduler.pem", "kube-controller-manager.pem", "service-account.pem"]
|
||||
|
||||
- name: sync_kube_master_certs | Set facts for kube master components sync_file results
|
||||
set_fact:
|
||||
kube_master_components_certs_needed: "{{ kube_master_components_certs_needed|d([]) + [item.path] }}"
|
||||
with_items: "{{ sync_file_results|d([]) }}"
|
||||
when: item.no_srcs|bool
|
||||
|
||||
- name: sync_kube_master_certs | Unset sync_file_results after kube master components cert
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
- include_tasks: ../../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: front-proxy-ca.pem
|
||||
sync_file_dir: "{{ kube_cert_dir }}"
|
||||
sync_file_group: "{{ kube_cert_group }}"
|
||||
sync_file_hosts: "{{ groups['kube_control_plane'] }}"
|
||||
sync_file_owner: kube
|
||||
|
||||
- name: sync_kube_master_certs | Unset sync_file_results after front-proxy-ca.pem
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
- include_tasks: ../../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "{{ item }}"
|
||||
sync_file_dir: "{{ kube_cert_dir }}"
|
||||
sync_file_group: "{{ kube_cert_group }}"
|
||||
sync_file_hosts: "{{ groups['kube_control_plane'] }}"
|
||||
sync_file_is_cert: true
|
||||
sync_file_owner: kube
|
||||
with_items: ["front-proxy-client.pem"]
|
||||
|
||||
- name: sync_kube_master_certs | Set facts for front-proxy-client certs sync_file results
|
||||
set_fact:
|
||||
kube_front_proxy_clients_certs_needed: "{{ kube_front_proxy_clients_certs_needed|d([]) + [item.path] }}"
|
||||
with_items: "{{ sync_file_results|d([]) }}"
|
||||
when: item.no_srcs|bool
|
||||
|
||||
- name: sync_kube_master_certs | Unset sync_file_results after front-proxy-client sync
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
- include_tasks: ../../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: ca.pem
|
||||
sync_file_dir: "{{ kube_cert_dir }}"
|
||||
sync_file_group: "{{ kube_cert_group }}"
|
||||
sync_file_hosts: "{{ groups['kube_control_plane'] }}"
|
||||
sync_file_owner: kube
|
||||
|
||||
- name: sync_kube_master_certs | Unset sync_file_results after ca.pem
|
||||
set_fact:
|
||||
sync_file_results: []
|
|
@ -1,60 +0,0 @@
|
|||
---
|
||||
|
||||
- name: sync_kube_node_certs | Create list of needed certs
|
||||
set_fact:
|
||||
kube_node_cert_list: "{{ kube_node_cert_list|default([]) + ['node-' + inventory_hostname + '.pem'] }}"
|
||||
|
||||
- include_tasks: ../../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "{{ item }}"
|
||||
sync_file_dir: "{{ kube_cert_dir }}"
|
||||
sync_file_group: "{{ kube_cert_group }}"
|
||||
sync_file_hosts: [ "{{ inventory_hostname }}" ]
|
||||
sync_file_is_cert: true
|
||||
sync_file_owner: kube
|
||||
with_items: "{{ kube_node_cert_list|default([]) }}"
|
||||
|
||||
- name: sync_kube_node_certs | Set facts for kube_control_plane sync_file results
|
||||
set_fact:
|
||||
kube_node_certs_needed: "{{ kube_node_certs_needed|default([]) + [item.path] }}"
|
||||
with_items: "{{ sync_file_results|d([]) }}"
|
||||
when: item.no_srcs|bool
|
||||
|
||||
- name: sync_kube_node_certs | Unset sync_file_results after kube node certs
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
- include_tasks: ../../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: ca.pem
|
||||
sync_file_dir: "{{ kube_cert_dir }}"
|
||||
sync_file_group: "{{ kube_cert_group }}"
|
||||
sync_file_hosts: "{{ groups['k8s-cluster'] }}"
|
||||
sync_file_owner: kube
|
||||
|
||||
- name: sync_kube_node_certs | Unset sync_file_results after ca.pem
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
- name: sync_kube_node_certs | Create list of needed kube-proxy certs
|
||||
set_fact:
|
||||
kube_proxy_cert_list: "{{ kube_proxy_cert_list|default([]) + ['kube-proxy-' + inventory_hostname + '.pem'] }}"
|
||||
|
||||
- include_tasks: ../../../vault/tasks/shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "{{ item }}"
|
||||
sync_file_dir: "{{ kube_cert_dir }}"
|
||||
sync_file_group: "{{ kube_cert_group }}"
|
||||
sync_file_hosts: [ "{{ inventory_hostname }}" ]
|
||||
sync_file_owner: kube
|
||||
with_items: "{{ kube_proxy_cert_list|default([]) }}"
|
||||
|
||||
- name: sync_kube_node_certs | Set facts for kube-proxy sync_file results
|
||||
set_fact:
|
||||
kube_proxy_certs_needed: "{{ kube_proxy_certs_needed|default([]) + [item.path] }}"
|
||||
with_items: "{{ sync_file_results|d([]) }}"
|
||||
when: item.no_srcs|bool
|
||||
|
||||
- name: sync_kube_node_certs | Unset sync_file_results after kube proxy certs
|
||||
set_fact:
|
||||
sync_file_results: []
|
|
@ -1,200 +0,0 @@
|
|||
---
|
||||
vault_bootstrap: false
|
||||
vault_deployment_type: docker
|
||||
|
||||
vault_adduser_vars:
|
||||
comment: "Hashicorp Vault User"
|
||||
createhome: no
|
||||
name: vault
|
||||
shell: /sbin/nologin
|
||||
system: yes
|
||||
|
||||
# This variables redefined in kubespray-defaults for using shared tasks
|
||||
# in etcd and kubernetes/secrets roles
|
||||
vault_base_dir: /etc/vault
|
||||
vault_cert_dir: "{{ vault_base_dir }}/ssl"
|
||||
vault_config_dir: "{{ vault_base_dir }}/config"
|
||||
vault_roles_dir: "{{ vault_base_dir }}/roles"
|
||||
vault_secrets_dir: "{{ vault_base_dir }}/secrets"
|
||||
vault_lib_dir: "/var/lib/vault"
|
||||
vault_log_dir: "/var/log/vault"
|
||||
|
||||
vault_version: 0.10.1
|
||||
vault_binary_checksum: 66f0f1b0b221d664dd5913f8697409d7401df4bb2a19c7277e8fbad152063fae
|
||||
vault_download_url: "https://releases.hashicorp.com/vault/{{ vault_version }}/vault_{{ vault_version }}_linux_{{ image_arch }}.zip"
|
||||
|
||||
# hvac==0.7.0 is broken at the moment
|
||||
hvac_version: 0.6.4
|
||||
|
||||
# Arch of Docker images and needed packages
|
||||
image_arch: "{{host_architecture}}"
|
||||
|
||||
vault_download_vars:
|
||||
container: "{{ vault_deployment_type != 'host' }}"
|
||||
dest: "vault/vault_{{ vault_version }}_linux_{{ image_arch }}.zip"
|
||||
enabled: true
|
||||
mode: "0755"
|
||||
owner: "vault"
|
||||
repo: "{{ vault_image_repo }}"
|
||||
sha256: "{{ vault_binary_checksum if vault_deployment_type == 'host' else vault_digest_checksum|d(none) }}"
|
||||
source_url: "{{ vault_download_url }}"
|
||||
tag: "{{ vault_image_tag }}"
|
||||
unarchive: true
|
||||
url: "{{ vault_download_url }}"
|
||||
version: "{{ vault_version }}"
|
||||
|
||||
vault_container_name: kube-hashicorp-vault
|
||||
vault_temp_container_name: vault-temp
|
||||
vault_image_repo: "vault"
|
||||
vault_image_tag: "{{ vault_version }}"
|
||||
|
||||
vault_bind_address: 0.0.0.0
|
||||
vault_port: 8200
|
||||
vault_etcd_url: "{{ etcd_access_addresses }}"
|
||||
|
||||
# By default lease
|
||||
vault_default_lease_ttl: 70080h
|
||||
vault_max_lease_ttl: 87600h
|
||||
|
||||
vault_temp_config:
|
||||
backend:
|
||||
file:
|
||||
path: /vault/file
|
||||
default_lease_ttl: "{{ vault_default_lease_ttl }}"
|
||||
listener:
|
||||
tcp:
|
||||
address: "{{ vault_bind_address }}:{{ vault_port }}"
|
||||
tls_disable: "true"
|
||||
max_lease_ttl: "{{ vault_max_lease_ttl }}"
|
||||
|
||||
vault_config:
|
||||
backend:
|
||||
etcd:
|
||||
address: "{{ vault_etcd_url }}"
|
||||
ha_enabled: "true"
|
||||
redirect_addr: "https://{{ inventory_hostname }}:{{ vault_port }}"
|
||||
tls_ca_file: "{{ etcd_cert_dir }}/ca.pem"
|
||||
tls_cert_file: "{{ etcd_cert_dir}}/node-{{ inventory_hostname }}.pem"
|
||||
tls_key_file: "{{ etcd_cert_dir}}/node-{{ inventory_hostname }}-key.pem"
|
||||
cluster_name: "kubernetes-vault"
|
||||
default_lease_ttl: "{{ vault_default_lease_ttl }}"
|
||||
max_lease_ttl: "{{ vault_max_lease_ttl }}"
|
||||
ui: "true"
|
||||
listener:
|
||||
tcp:
|
||||
address: "{{ vault_bind_address }}:{{ vault_port }}"
|
||||
tls_cert_file: "{{ vault_cert_dir }}/api.pem"
|
||||
tls_key_file: "{{ vault_cert_dir }}/api-key.pem"
|
||||
|
||||
vault_secret_shares: 1
|
||||
vault_secret_threshold: 1
|
||||
|
||||
vault_successful_http_codes: ["200", "429", "500", "501", "503"]
|
||||
|
||||
vault_ca_options:
|
||||
vault:
|
||||
common_name: vault
|
||||
format: pem
|
||||
ttl: "{{ vault_max_lease_ttl }}"
|
||||
exclude_cn_from_sans: true
|
||||
alt_names: "vault.kube-system.svc.{{ dns_domain }},vault.kube-system.svc,vault.kube-system,vault"
|
||||
etcd:
|
||||
common_name: etcd
|
||||
format: pem
|
||||
ttl: "{{ vault_max_lease_ttl }}"
|
||||
exclude_cn_from_sans: true
|
||||
kube:
|
||||
common_name: kube
|
||||
format: pem
|
||||
ttl: "{{ vault_max_lease_ttl }}"
|
||||
exclude_cn_from_sans: true
|
||||
|
||||
vault_client_headers:
|
||||
Accept: "application/json"
|
||||
Content-Type: "application/json"
|
||||
|
||||
etcd_cert_dir: /etc/ssl/etcd/ssl
|
||||
kube_cert_dir: /etc/kubernetes/ssl
|
||||
|
||||
vault_pki_mounts:
|
||||
userpass:
|
||||
name: userpass
|
||||
default_lease_ttl: "{{ vault_default_lease_ttl }}"
|
||||
max_lease_ttl: "{{ vault_max_lease_ttl }}"
|
||||
description: "Userpass"
|
||||
cert_dir: "{{ vault_cert_dir }}"
|
||||
roles:
|
||||
- name: userpass
|
||||
group: userpass
|
||||
password: "{{ lookup('password', credentials_dir + '/vault/userpass.creds length=15') }}"
|
||||
policy_rules: default
|
||||
role_options:
|
||||
allow_any_name: true
|
||||
|
||||
vault:
|
||||
name: vault
|
||||
default_lease_ttl: "{{ vault_default_lease_ttl }}"
|
||||
max_lease_ttl: "{{ vault_max_lease_ttl }}"
|
||||
description: "Vault Root CA"
|
||||
cert_dir: "{{ vault_cert_dir }}"
|
||||
roles:
|
||||
- name: vault
|
||||
group: vault
|
||||
password: "{{ lookup('password', credentials_dir + '/vault/vault.creds length=15') }}"
|
||||
policy_rules: default
|
||||
role_options:
|
||||
allow_any_name: true
|
||||
etcd:
|
||||
name: etcd
|
||||
default_lease_ttl: "{{ vault_default_lease_ttl }}"
|
||||
max_lease_ttl: "{{ vault_max_lease_ttl }}"
|
||||
description: "Etcd Root CA"
|
||||
cert_dir: "{{ etcd_cert_dir }}"
|
||||
roles:
|
||||
- name: etcd
|
||||
group: etcd
|
||||
password: "{{ lookup('password', credentials_dir + '/vault/etcd.creds length=15') }}"
|
||||
policy_rules: default
|
||||
role_options:
|
||||
allow_any_name: true
|
||||
enforce_hostnames: false
|
||||
organization: "kube:etcd"
|
||||
kube:
|
||||
name: kube
|
||||
default_lease_ttl: "{{ vault_default_lease_ttl }}"
|
||||
max_lease_ttl: "{{ vault_max_lease_ttl }}"
|
||||
description: "Kubernetes Root CA"
|
||||
cert_dir: "{{ kube_cert_dir }}"
|
||||
roles:
|
||||
- name: kube_control_plane
|
||||
group: kube_control_plane
|
||||
password: "{{ lookup('password', credentials_dir + '/vault/kube_control_plane.creds length=15') }}"
|
||||
policy_rules: default
|
||||
role_options:
|
||||
allow_any_name: true
|
||||
enforce_hostnames: false
|
||||
organization: "system:masters"
|
||||
- name: front-proxy-client
|
||||
group: kube_control_plane
|
||||
password: "{{ lookup('password', credentials_dir + '/vault/kube-proxy.creds length=15') }}"
|
||||
policy_rules: default
|
||||
role_options:
|
||||
allow_any_name: true
|
||||
enforce_hostnames: false
|
||||
organization: "system:front-proxy-client"
|
||||
- name: kube-node
|
||||
group: k8s-cluster
|
||||
password: "{{ lookup('password', credentials_dir + '/vault/kube-node.creds length=15') }}"
|
||||
policy_rules: default
|
||||
role_options:
|
||||
allow_any_name: true
|
||||
enforce_hostnames: false
|
||||
organization: "system:nodes"
|
||||
- name: kube-proxy
|
||||
group: k8s-cluster
|
||||
password: "{{ lookup('password', credentials_dir + '/vault/kube-proxy.creds length=15') }}"
|
||||
policy_rules: default
|
||||
role_options:
|
||||
allow_any_name: true
|
||||
enforce_hostnames: false
|
||||
organization: "system:node-proxier"
|
|
@ -1,50 +0,0 @@
|
|||
---
|
||||
- name: restart vault
|
||||
command: /bin/true
|
||||
notify:
|
||||
- restart vault service
|
||||
- wait for vault up
|
||||
- unseal vault
|
||||
|
||||
- name: wait for vault up
|
||||
uri:
|
||||
url: "{{ vault_leader_url | default('https://localhost:8200') }}/v1/sys/health"
|
||||
headers: "{{ vault_client_headers }}"
|
||||
status_code: "{{ vault_successful_http_codes | join(',') }}"
|
||||
register: vault_health_check
|
||||
until: vault_health_check is succeeded
|
||||
retries: 10
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
run_once: yes
|
||||
notify: set facts about local Vault health
|
||||
|
||||
- name: wait for vault up nowait
|
||||
uri:
|
||||
url: "{{ vault_leader_url | default('https://localhost:8200') }}/v1/sys/health"
|
||||
headers: "{{ vault_client_headers }}"
|
||||
status_code: "{{ vault_successful_http_codes | join(',') }}"
|
||||
register: vault_health_check
|
||||
run_once: yes
|
||||
failed_when: false
|
||||
notify: set facts about local Vault health
|
||||
|
||||
- name: set facts about local Vault health
|
||||
set_fact:
|
||||
vault_is_running: "{{ vault_health_check.get('status', '-1') in vault_successful_http_codes }}"
|
||||
vault_cluster_is_initialized: "{{ vault_health_check.get('json', {}).get('initialized', false) }}"
|
||||
vault_is_sealed: "{{ vault_health_check.get('json', {}).get('sealed', true) }}"
|
||||
|
||||
- name: restart vault service
|
||||
systemd:
|
||||
daemon_reload: true
|
||||
enabled: yes
|
||||
name: vault
|
||||
state: restarted
|
||||
|
||||
- name: unseal vault
|
||||
hashivault_unseal:
|
||||
url: "{{ vault_leader_url | default('https://localhost:8200') }}"
|
||||
token: "{{ vault_root_token }}"
|
||||
ca_cert: "{{ vault_cert_dir }}/ca.pem"
|
||||
keys: "{{ item }}"
|
||||
with_items: "{{ vault_unseal_keys|default([]) }}"
|
|
@ -1,4 +0,0 @@
|
|||
---
|
||||
dependencies:
|
||||
- role: adduser
|
||||
user: "{{ vault_adduser_vars }}"
|
|
@ -1,32 +0,0 @@
|
|||
---
|
||||
|
||||
- name: "bootstrap/ca_trust | pull CA from cert from {{groups.vault|first}}"
|
||||
command: "cat {{ vault_cert_dir }}/ca.pem"
|
||||
register: vault_cert_file_cat
|
||||
delegate_to: "{{ groups['vault']|first }}"
|
||||
|
||||
# This part is mostly stolen from the etcd role
|
||||
- name: bootstrap/ca_trust | target ca-certificate store file
|
||||
set_fact:
|
||||
ca_cert_path: >-
|
||||
{% if ansible_os_family == "Debian" -%}
|
||||
/usr/local/share/ca-certificates/vault-ca.crt
|
||||
{%- elif ansible_os_family == "RedHat" -%}
|
||||
/etc/pki/ca-trust/source/anchors/vault-ca.crt
|
||||
{%- elif ansible_os_family in ["Flatcar Container Linux by Kinvolk"] -%}
|
||||
/etc/ssl/certs/vault-ca.pem
|
||||
{%- endif %}
|
||||
|
||||
- name: bootstrap/ca_trust | add CA to trusted CA dir
|
||||
copy:
|
||||
content: "{{ vault_cert_file_cat.get('stdout') }}\n"
|
||||
dest: "{{ ca_cert_path }}"
|
||||
register: vault_ca_cert
|
||||
|
||||
- name: bootstrap/ca_trust | update ca-certificates (Debian/Ubuntu/Flatcar)
|
||||
command: update-ca-certificates
|
||||
when: vault_ca_cert.changed and ansible_os_family in ["Debian", "Flatcar Container Linux by Kinvolk"]
|
||||
|
||||
- name: bootstrap/ca_trust | update ca-certificates (RedHat)
|
||||
command: update-ca-trust extract
|
||||
when: vault_ca_cert.changed and ansible_os_family == "RedHat"
|
|
@ -1,13 +0,0 @@
|
|||
---
|
||||
- include_tasks: ../shared/create_mount.yml
|
||||
vars:
|
||||
create_mount_path: "/{{ item.name }}"
|
||||
create_mount_default_lease_ttl: "{{ item.default_lease_ttl }}"
|
||||
create_mount_max_lease_ttl: "{{ item.max_lease_ttl }}"
|
||||
create_mount_description: "{{ item.description }}"
|
||||
create_mount_cert_dir: "{{ item.cert_dir }}"
|
||||
create_mount_config_ca_needed: "{{ item.config_ca }}"
|
||||
with_items:
|
||||
- "{{ vault_pki_mounts.userpass|combine({'config_ca': not vault_ca_cert_needed}) }}"
|
||||
- "{{ vault_pki_mounts.vault|combine({'config_ca': not vault_ca_cert_needed}) }}"
|
||||
- "{{ vault_pki_mounts.etcd|combine({'config_ca': not vault_etcd_ca_cert_needed}) }}"
|
|
@ -1,10 +0,0 @@
|
|||
---
|
||||
- include_tasks: ../shared/create_role.yml
|
||||
vars:
|
||||
create_role_name: "{{ item.name }}"
|
||||
create_role_group: "{{ item.group }}"
|
||||
create_role_policy_rules: "{{ item.policy_rules }}"
|
||||
create_role_password: "{{ item.password }}"
|
||||
create_role_options: "{{ item.role_options }}"
|
||||
create_role_mount_path: "/{{ mount.name }}"
|
||||
with_items: "{{ mount.roles }}"
|
|
@ -1,21 +0,0 @@
|
|||
---
|
||||
- import_tasks: ../shared/issue_cert.yml
|
||||
vars:
|
||||
issue_cert_common_name: "{{ vault_pki_mounts.vault.roles[0].name }}"
|
||||
issue_cert_alt_names: "{{ groups['vault'] + ['localhost'] + (vault_ca_options['vault']['alt_names'].split(','))|default() }}"
|
||||
issue_cert_hosts: "{{ groups['vault'] }}"
|
||||
issue_cert_ip_sans: >-
|
||||
[
|
||||
{%- for host in groups['vault'] -%}
|
||||
"{{ hostvars[host]['ansible_default_ipv4']['address'] }}",
|
||||
{%- if hostvars[host]['ip'] is defined -%}
|
||||
"{{ hostvars[host]['ip'] }}",
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
"127.0.0.1","::1"
|
||||
]
|
||||
issue_cert_mount_path: "/{{ vault_pki_mounts.vault.name }}"
|
||||
issue_cert_path: "{{ vault_cert_dir }}/api.pem"
|
||||
issue_cert_role: "{{ vault_pki_mounts.vault.roles[0].name }}"
|
||||
issue_cert_url: "{{ vault_leader_url }}"
|
||||
when: vault_api_cert_needed
|
|
@ -1,72 +0,0 @@
|
|||
---
|
||||
- import_tasks: ../shared/check_etcd.yml
|
||||
vars:
|
||||
vault_etcd_needed: no
|
||||
when: inventory_hostname in groups.vault
|
||||
|
||||
- import_tasks: ../shared/check_vault.yml
|
||||
when: inventory_hostname in groups.vault
|
||||
|
||||
- import_tasks: sync_secrets.yml
|
||||
when: inventory_hostname in groups.vault
|
||||
|
||||
- import_tasks: ../shared/find_leader.yml
|
||||
when: inventory_hostname in groups.vault and vault_cluster_is_initialized
|
||||
|
||||
- import_tasks: sync_vault_certs.yml
|
||||
when: inventory_hostname in groups.vault
|
||||
|
||||
- import_tasks: sync_etcd_certs.yml
|
||||
when: inventory_hostname in groups.etcd
|
||||
|
||||
- import_tasks: start_vault_temp.yml
|
||||
when: inventory_hostname == groups.vault|first and not vault_cluster_is_initialized
|
||||
|
||||
- name: vault | Set fact about vault leader url
|
||||
set_fact:
|
||||
vault_leader_url: "{{ hostvars[groups.vault|first]['vault_leader_url'] }}"
|
||||
when: not vault_cluster_is_initialized
|
||||
|
||||
- import_tasks: create_mounts.yml
|
||||
when: inventory_hostname == groups.vault|first and not vault_cluster_is_initialized
|
||||
|
||||
- include_tasks: ../shared/auth_backend.yml
|
||||
vars:
|
||||
auth_backend_description: A Username/Password Auth Backend primarily used for services needing to issue certificates
|
||||
auth_backend_path: userpass
|
||||
auth_backend_type: userpass
|
||||
when: inventory_hostname == groups.vault|first and not vault_cluster_is_initialized
|
||||
|
||||
- include_tasks: create_roles.yml
|
||||
with_items:
|
||||
- "{{ vault_pki_mounts.vault }}"
|
||||
- "{{ vault_pki_mounts.etcd }}"
|
||||
loop_control:
|
||||
loop_var: mount
|
||||
when: inventory_hostname == groups.vault|first and not vault_cluster_is_initialized
|
||||
|
||||
- include_tasks: ../shared/gen_ca.yml
|
||||
vars:
|
||||
gen_ca_cert_dir: "{{ vault_pki_mounts.vault.cert_dir }}"
|
||||
gen_ca_mount_path: "/{{ vault_pki_mounts.vault.name }}"
|
||||
gen_ca_vault_headers: "{{ vault_headers }}"
|
||||
gen_ca_vault_options: "{{ vault_ca_options.vault }}"
|
||||
gen_ca_copy_group: "kube_control_plane"
|
||||
when: >-
|
||||
inventory_hostname in groups.vault
|
||||
and not vault_cluster_is_initialized
|
||||
and vault_ca_cert_needed
|
||||
|
||||
- include_tasks: ../shared/gen_ca.yml
|
||||
vars:
|
||||
gen_ca_cert_dir: "{{ vault_pki_mounts.etcd.cert_dir }}"
|
||||
gen_ca_mount_path: "/{{ vault_pki_mounts.etcd.name }}"
|
||||
gen_ca_vault_headers: "{{ vault_headers }}"
|
||||
gen_ca_vault_options: "{{ vault_ca_options.etcd }}"
|
||||
gen_ca_copy_group: "etcd"
|
||||
when: inventory_hostname in groups.etcd and not vault_cluster_is_initialized and vault_etcd_ca_cert_needed
|
||||
|
||||
- import_tasks: gen_vault_certs.yml
|
||||
when: inventory_hostname in groups.vault and not vault_cluster_is_initialized and vault_api_cert_needed
|
||||
|
||||
- import_tasks: ca_trust.yml
|
|
@ -1,42 +0,0 @@
|
|||
---
|
||||
- name: bootstrap/start_vault_temp | Ensure vault-temp isn't already running
|
||||
shell: if docker rm -f {{ vault_temp_container_name }} 2>&1 1>/dev/null;then echo true;else echo false;fi
|
||||
register: vault_temp_stop_check
|
||||
changed_when: "'true' in vault_temp_stop_check.stdout"
|
||||
|
||||
- name: bootstrap/start_vault_temp | Start single node Vault with file backend
|
||||
command: >
|
||||
docker run -d --cap-add=IPC_LOCK --name {{ vault_temp_container_name }}
|
||||
-p {{ vault_port }}:{{ vault_port }}
|
||||
-e 'VAULT_LOCAL_CONFIG={{ vault_temp_config|to_json }}'
|
||||
-v /etc/vault:/etc/vault
|
||||
{{ vault_image_repo }}:{{ vault_version }} server
|
||||
|
||||
- name: bootstrap/start_vault_temp | Start again single node Vault with file backend
|
||||
command: docker start {{ vault_temp_container_name }}
|
||||
|
||||
- name: bootstrap/start_vault_temp | Initialize vault-temp
|
||||
hashivault_init:
|
||||
url: "http://localhost:{{ vault_port }}/"
|
||||
secret_shares: 1
|
||||
secret_threshold: 1
|
||||
until: "vault_temp_init is succeeded"
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
register: vault_temp_init
|
||||
|
||||
# NOTE: vault_headers and vault_url are used by subsequent issue calls
|
||||
- name: bootstrap/start_vault_temp | Set needed vault facts
|
||||
set_fact:
|
||||
vault_leader_url: "http://{{ inventory_hostname }}:{{ vault_port }}"
|
||||
vault_temp_unseal_keys: "{{ vault_temp_init.keys_base64 }}"
|
||||
vault_root_token: "{{ vault_temp_init.root_token }}"
|
||||
vault_headers: "{{ vault_client_headers|combine({'X-Vault-Token': vault_temp_init.root_token}) }}"
|
||||
|
||||
- name: bootstrap/start_vault_temp | Unseal vault-temp
|
||||
hashivault_unseal:
|
||||
url: "http://localhost:{{ vault_port }}/"
|
||||
token: "{{ vault_root_token }}"
|
||||
keys: "{{ item }}"
|
||||
with_items: "{{ vault_temp_unseal_keys|default([]) }}"
|
||||
no_log: true
|
|
@ -1,16 +0,0 @@
|
|||
---
|
||||
|
||||
- include_tasks: ../shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "ca.pem"
|
||||
sync_file_dir: "{{ etcd_cert_dir }}"
|
||||
sync_file_hosts: "{{ groups.etcd }}"
|
||||
sync_file_is_cert: true
|
||||
|
||||
- name: bootstrap/sync_etcd_certs | Set facts for etcd sync_file results
|
||||
set_fact:
|
||||
vault_etcd_ca_cert_needed: "{{ sync_file_results[0]['no_srcs'] }}"
|
||||
|
||||
- name: bootstrap/sync_etcd_certs | Unset sync_file_results after ca.pem sync
|
||||
set_fact:
|
||||
sync_file_results: []
|
|
@ -1,52 +0,0 @@
|
|||
---
|
||||
|
||||
- include_tasks: ../shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "{{ item }}"
|
||||
sync_file_dir: "{{ vault_secrets_dir }}"
|
||||
sync_file_hosts: "{{ groups.vault }}"
|
||||
with_items:
|
||||
- root_token
|
||||
- unseal_keys
|
||||
|
||||
- name: bootstrap/sync_secrets | Set fact based on sync_file_results
|
||||
set_fact:
|
||||
vault_secrets_available: "{{ vault_secrets_available|default(true) and not item.no_srcs }}"
|
||||
with_items: "{{ sync_file_results|d([]) }}"
|
||||
|
||||
- name: bootstrap/sync_secrets | Reset sync_file_results to avoid variable bleed
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
- name: bootstrap/sync_secrets | Print out warning message if secrets are not available and vault is initialized
|
||||
pause:
|
||||
prompt: >
|
||||
Vault orchestration may not be able to proceed. The Vault cluster is initialized, but
|
||||
'root_token' or 'unseal_keys' were not found in {{ vault_secrets_dir }}. These are
|
||||
needed for many vault orchestration steps.
|
||||
when: vault_cluster_is_initialized and not vault_secrets_available
|
||||
|
||||
- name: bootstrap/sync_secrets | Cat root_token from a vault host
|
||||
command: "cat {{ vault_secrets_dir }}/root_token"
|
||||
register: vault_root_token_cat
|
||||
run_once: yes
|
||||
when: vault_secrets_available
|
||||
|
||||
- name: bootstrap/sync_secrets | Cat unseal_keys from a vault host
|
||||
command: "cat {{ vault_secrets_dir }}/unseal_keys"
|
||||
register: vault_unseal_keys_cat
|
||||
run_once: yes
|
||||
when: vault_secrets_available
|
||||
|
||||
- name: bootstrap/sync_secrets | Set needed facts for Vault API interaction when Vault is already running
|
||||
set_fact:
|
||||
vault_root_token: "{{ vault_root_token_cat.stdout }}"
|
||||
vault_unseal_keys: "{{ vault_unseal_keys_cat.stdout_lines }}"
|
||||
run_once: yes
|
||||
when: vault_secrets_available
|
||||
|
||||
# FIXME: Remove all uri calls
|
||||
- name: bootstrap/sync_secrets | Update vault_headers if we have the root_token
|
||||
set_fact:
|
||||
vault_headers: "{{ vault_client_headers | combine({'X-Vault-Token': vault_root_token}) }}"
|
||||
when: vault_secrets_available
|
|
@ -1,53 +0,0 @@
|
|||
---
|
||||
- include_tasks: ../shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "ca.pem"
|
||||
sync_file_dir: "{{ vault_cert_dir }}"
|
||||
sync_file_hosts: "{{ groups.vault }}"
|
||||
sync_file_owner: vault
|
||||
sync_file_group: root
|
||||
sync_file_is_cert: true
|
||||
|
||||
- name: bootstrap/sync_vault_certs | Set facts for vault sync_file results
|
||||
set_fact:
|
||||
vault_ca_cert_needed: "{{ sync_file_results[0]['no_srcs'] }}"
|
||||
|
||||
- name: bootstrap/sync_vault_certs | Unset sync_file_results after ca.pem sync
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
# FIXME: Distribute ca.pem alone in a better way
|
||||
- include_tasks: ../shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "ca.pem"
|
||||
sync_file_dir: "{{ vault_cert_dir }}"
|
||||
sync_file_hosts: "{{ groups['kube_control_plane'] }}"
|
||||
sync_file_owner: vault
|
||||
sync_file_group: root
|
||||
sync_file_is_cert: false
|
||||
|
||||
- name: bootstrap/sync_vault_certs | Set facts for vault sync_file results
|
||||
set_fact:
|
||||
vault_ca_cert_needed: "{{ sync_file_results[0]['no_srcs'] }}"
|
||||
|
||||
- name: bootstrap/sync_vault_certs | Unset sync_file_results after ca.pem sync
|
||||
set_fact:
|
||||
sync_file_results: []
|
||||
|
||||
|
||||
- include_tasks: ../shared/sync_file.yml
|
||||
vars:
|
||||
sync_file: "api.pem"
|
||||
sync_file_dir: "{{ vault_cert_dir }}"
|
||||
sync_file_hosts: "{{ groups.vault }}"
|
||||
sync_file_owner: vault
|
||||
sync_file_group: root
|
||||
sync_file_is_cert: true
|
||||
|
||||
- name: bootstrap/sync_vault_certs | Set fact if Vault's API cert is needed
|
||||
set_fact:
|
||||
vault_api_cert_needed: "{{ sync_file_results[0]['no_srcs'] }}"
|
||||
|
||||
- name: bootstrap/sync_vault_certs | Unset sync_file_results after api.pem sync
|
||||
set_fact:
|
||||
sync_file_results: []
|
|
@ -1,9 +0,0 @@
|
|||
---
|
||||
|
||||
- name: cluster/binary | Copy vault binary from downloaddir
|
||||
copy:
|
||||
src: "{{ local_release_dir }}/vault/vault"
|
||||
dest: "{{ bin_dir }}/vault"
|
||||
remote_src: true
|
||||
mode: "0755"
|
||||
owner: vault
|
|
@ -1,23 +0,0 @@
|
|||
---
|
||||
- name: cluster/configure | Ensure the vault directories exist
|
||||
file:
|
||||
dest: "{{ item }}"
|
||||
owner: vault
|
||||
mode: 0750
|
||||
state: directory
|
||||
recurse: true
|
||||
with_items:
|
||||
- "{{ vault_base_dir }}"
|
||||
- "{{ vault_cert_dir }}"
|
||||
- "{{ vault_config_dir }}"
|
||||
- "{{ vault_roles_dir }}"
|
||||
- "{{ vault_secrets_dir }}"
|
||||
- "{{ vault_log_dir }}"
|
||||
- "{{ vault_lib_dir }}"
|
||||
|
||||
- name: cluster/configure | Lay down the configuration file
|
||||
copy:
|
||||
content: "{{ vault_config | to_nice_json(indent=4) }}"
|
||||
dest: "{{ vault_config_dir }}/config.json"
|
||||
mode: 0640
|
||||
register: vault_config_change
|
|
@ -1,13 +0,0 @@
|
|||
---
|
||||
- include_tasks: ../shared/create_mount.yml
|
||||
vars:
|
||||
create_mount_path: "/{{ item.name }}"
|
||||
create_mount_default_lease_ttl: "{{ item.default_lease_ttl }}"
|
||||
create_mount_max_lease_ttl: "{{ item.max_lease_ttl }}"
|
||||
create_mount_description: "{{ item.description }}"
|
||||
create_mount_cert_dir: "{{ item.cert_dir }}"
|
||||
create_mount_config_ca_needed: item.name != vault_pki_mounts.kube.name
|
||||
with_items:
|
||||
- "{{ vault_pki_mounts.vault }}"
|
||||
- "{{ vault_pki_mounts.etcd }}"
|
||||
- "{{ vault_pki_mounts.kube }}"
|
|
@ -1,10 +0,0 @@
|
|||
---
|
||||
- include_tasks: ../shared/create_role.yml
|
||||
vars:
|
||||
create_role_name: "{{ item.name }}"
|
||||
create_role_group: "{{ item.group }}"
|
||||
create_role_password: "{{ item.password }}"
|
||||
create_role_policy_rules: "{{ item.policy_rules }}"
|
||||
create_role_options: "{{ item.role_options }}"
|
||||
create_role_mount_path: "/{{ mount.name }}"
|
||||
with_items: "{{ mount.roles }}"
|
|
@ -1,43 +0,0 @@
|
|||
---
|
||||
- name: cluster/init | wait for vault
|
||||
command: /bin/true
|
||||
notify: wait for vault up
|
||||
|
||||
- meta: flush_handlers
|
||||
|
||||
- name: cluster/init | Initialize Vault
|
||||
hashivault_init:
|
||||
url: "https://localhost:{{ vault_port }}/"
|
||||
ca_cert: "{{ vault_cert_dir }}/ca.pem"
|
||||
secret_shares: "{{ vault_secret_shares }}"
|
||||
secret_threshold: "{{ vault_secret_threshold }}"
|
||||
run_once: true
|
||||
register: vault_init_result
|
||||
when: not vault_cluster_is_initialized
|
||||
|
||||
- name: cluster/init | Set facts on the results of the initialization
|
||||
set_fact:
|
||||
vault_unseal_keys: "{{ vault_init_result.keys_base64 }}"
|
||||
vault_root_token: "{{ vault_init_result.root_token }}"
|
||||
vault_headers: "{{ vault_client_headers|combine({'X-Vault-Token': vault_init_result.root_token}) }}"
|
||||
run_once: true
|
||||
when: not vault_cluster_is_initialized
|
||||
|
||||
- name: cluster/init | Ensure all in groups.vault have the unseal_keys locally
|
||||
copy:
|
||||
content: "{{ vault_unseal_keys|join('\n') }}"
|
||||
dest: "{{ vault_secrets_dir }}/unseal_keys"
|
||||
mode: 0640
|
||||
when: not vault_cluster_is_initialized
|
||||
|
||||
- name: cluster/init | Ensure all in groups.vault have the root_token locally
|
||||
copy:
|
||||
content: "{{ vault_root_token }}"
|
||||
dest: "{{ vault_secrets_dir }}/root_token"
|
||||
mode: 0640
|
||||
when: not vault_cluster_is_initialized
|
||||
|
||||
- name: cluster/init | Ensure vault_headers and vault statuses are updated
|
||||
set_fact:
|
||||
vault_cluster_is_initialized: true
|
||||
run_once: true
|
|
@ -1,55 +0,0 @@
|
|||
---
|
||||
- import_tasks: ../shared/check_etcd.yml
|
||||
when: inventory_hostname in groups.vault
|
||||
|
||||
- import_tasks: ../shared/check_vault.yml
|
||||
when: inventory_hostname in groups.vault
|
||||
|
||||
- import_tasks: configure.yml
|
||||
when: inventory_hostname in groups.vault
|
||||
|
||||
- import_tasks: binary.yml
|
||||
when: inventory_hostname in groups.vault and vault_deployment_type == "host"
|
||||
|
||||
- import_tasks: systemd.yml
|
||||
when: inventory_hostname in groups.vault
|
||||
|
||||
- import_tasks: ../shared/find_leader.yml
|
||||
when: inventory_hostname in groups.vault
|
||||
|
||||
- import_tasks: init.yml
|
||||
when: inventory_hostname in groups.vault
|
||||
|
||||
- import_tasks: unseal.yml
|
||||
when: inventory_hostname in groups.vault
|
||||
|
||||
- import_tasks: ../shared/find_leader.yml
|
||||
when: inventory_hostname in groups.vault
|
||||
|
||||
- import_tasks: create_mounts.yml
|
||||
when: inventory_hostname == groups.vault|first
|
||||
|
||||
- include_tasks: ../shared/gen_ca.yml
|
||||
vars:
|
||||
gen_ca_cert_dir: "{{ vault_pki_mounts.kube.cert_dir }}"
|
||||
gen_ca_mount_path: "/{{ vault_pki_mounts.kube.name }}"
|
||||
gen_ca_vault_headers: "{{ vault_headers }}"
|
||||
gen_ca_vault_options: "{{ vault_ca_options.kube }}"
|
||||
gen_ca_copy_group: "kube_control_plane"
|
||||
when: inventory_hostname in groups.vault
|
||||
|
||||
- include_tasks: ../shared/auth_backend.yml
|
||||
vars:
|
||||
auth_backend_description: A Username/Password Auth Backend primarily used for services needing to issue certificates
|
||||
auth_backend_path: userpass
|
||||
auth_backend_type: userpass
|
||||
when: inventory_hostname == groups.vault|first
|
||||
|
||||
- include_tasks: create_roles.yml
|
||||
with_items:
|
||||
- "{{ vault_pki_mounts.vault }}"
|
||||
- "{{ vault_pki_mounts.etcd }}"
|
||||
- "{{ vault_pki_mounts.kube }}"
|
||||
loop_control:
|
||||
loop_var: mount
|
||||
when: inventory_hostname in groups.vault
|
|
@ -1,29 +0,0 @@
|
|||
---
|
||||
- name: cluster/systemd | Copy down vault.service systemd file
|
||||
template:
|
||||
src: "{{ vault_deployment_type }}.service.j2"
|
||||
dest: /etc/systemd/system/vault.service
|
||||
backup: yes
|
||||
register: vault_systemd_placement
|
||||
notify: restart vault
|
||||
|
||||
- name: Create vault service systemd directory
|
||||
file:
|
||||
path: /etc/systemd/system/vault.service.d
|
||||
state: directory
|
||||
|
||||
- name: cluster/systemd | Add vault proxy env vars
|
||||
template:
|
||||
src: "http-proxy.conf.j2"
|
||||
dest: /etc/systemd/system/vault.service.d/http-proxy.conf
|
||||
backup: yes
|
||||
when: http_proxy is defined or https_proxy is defined
|
||||
notify: restart vault
|
||||
|
||||
- name: cluster/systemd | Enable vault.service
|
||||
systemd:
|
||||
daemon_reload: true
|
||||
enabled: yes
|
||||
name: vault
|
||||
state: started
|
||||
notify: wait for vault up
|
|
@ -1,16 +0,0 @@
|
|||
---
|
||||
|
||||
- name: cluster/unseal | Current sealed state
|
||||
debug:
|
||||
msg: "Sealed? {{ vault_is_sealed }}"
|
||||
|
||||
- name: cluster/unseal | Unseal Vault
|
||||
hashivault_unseal:
|
||||
url: "https://localhost:{{ vault_port }}/"
|
||||
token: "{{ vault_root_token }}"
|
||||
ca_cert: "{{ vault_cert_dir }}/ca.pem"
|
||||
keys: "{{ item }}"
|
||||
no_log: true
|
||||
with_items: "{{ vault_unseal_keys|default([]) }}"
|
||||
notify: wait for vault up
|
||||
when: vault_is_sealed
|
|
@ -1,25 +0,0 @@
|
|||
---
|
||||
# The Vault role is typically a two step process:
|
||||
# 1. Bootstrap
|
||||
# This starts a temporary Vault to generate certs for Vault itself. This
|
||||
# includes a Root CA for the cluster, assuming one doesn't exist already.
|
||||
# The temporary instance will remain running after Bootstrap, to provide a
|
||||
# running Vault for the Etcd role to generate certs against.
|
||||
# 2. Cluster
|
||||
# Once Etcd is started, then the Cluster tasks can start up a long-term
|
||||
# Vault cluster using Etcd as the backend. The same Root CA is mounted as
|
||||
# used during step 1, allowing all certs to have the same chain of trust.
|
||||
|
||||
- name: install hvac
|
||||
pip:
|
||||
name: "hvac=={{ hvac_version }}"
|
||||
state: "present"
|
||||
extra_args: "{{ pip_extra_args | default(omit) }}"
|
||||
|
||||
## Bootstrap
|
||||
- include_tasks: bootstrap/main.yml
|
||||
when: cert_management == 'vault' and vault_bootstrap | d()
|
||||
|
||||
## Cluster
|
||||
- include_tasks: cluster/main.yml
|
||||
when: cert_management == 'vault' and not vault_bootstrap | d()
|
|
@ -1,10 +0,0 @@
|
|||
---
|
||||
- name: shared/auth_backend | Enable auth backend {{ auth_backend_path }}
|
||||
hashivault_auth_enable:
|
||||
url: "{{ vault_leader_url }}"
|
||||
token: "{{ vault_root_token }}"
|
||||
ca_cert: "{{ vault_cert_dir }}/ca.pem"
|
||||
name: "{{ auth_backend_type }}"
|
||||
mount_point: "{{ auth_backend_path }}"
|
||||
description: "{{ auth_backend_description|d('') }}"
|
||||
register: result
|
|
@ -1,19 +0,0 @@
|
|||
---
|
||||
|
||||
- include_tasks: ../shared/pki_mount.yml
|
||||
vars:
|
||||
pki_mount_path: auth-pki
|
||||
pki_mount_options:
|
||||
description: PKI mount to generate certs for the Cert Auth Backend
|
||||
config:
|
||||
default_lease_ttl: "{{ vault_default_lease_ttl }}"
|
||||
max_lease_ttl: "{{ vault_max_lease_ttl }}"
|
||||
|
||||
- name: shared/auth_mount | Create a dummy role for issuing certs from auth-pki
|
||||
hashivault_approle_role_create:
|
||||
url: "{{ vault_leader_url }}"
|
||||
token: "{{ vault_root_token }}"
|
||||
ca_cert: "{{ vault_cert_dir }}/ca.pem"
|
||||
name: "auth-pki/roles/dummy"
|
||||
policies:
|
||||
allow_any_name: true
|
|
@ -1,30 +0,0 @@
|
|||
---
|
||||
|
||||
- name: check_etcd | Check if etcd is up and reachable
|
||||
uri:
|
||||
url: "{{ vault_etcd_url.split(',') | first }}/health"
|
||||
validate_certs: no
|
||||
client_cert: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem"
|
||||
client_key: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem"
|
||||
|
||||
return_content: yes
|
||||
until: vault_etcd_health_check.status == 200 or vault_etcd_health_check.status == 401
|
||||
retries: 3
|
||||
delay: 2
|
||||
delegate_to: "{{ groups['etcd'][0] }}"
|
||||
run_once: true
|
||||
failed_when: false
|
||||
register: vault_etcd_health_check
|
||||
|
||||
- name: check_etcd | Set fact based off the etcd_health_check response
|
||||
set_fact:
|
||||
vault_etcd_available: "{{ vault_etcd_health_check.content }}"
|
||||
- set_fact:
|
||||
vault_etcd_available: "{{ vault_etcd_available.health|d()|bool }}"
|
||||
|
||||
- name: check_etcd | Fail if etcd is not available and needed
|
||||
fail:
|
||||
msg: >
|
||||
Unable to start Vault cluster! Etcd is not available at
|
||||
{{ vault_etcd_url.split(',') | first }} however it is needed by Vault as a backend.
|
||||
when: vault_etcd_needed|d() and not vault_etcd_available
|
|
@ -1,52 +0,0 @@
|
|||
---
|
||||
# Stop temporary Vault if it's running (can linger if playbook fails out)
|
||||
- name: stop vault-temp container
|
||||
shell: docker stop {{ vault_temp_container_name }}
|
||||
failed_when: false
|
||||
register: vault_temp_stop
|
||||
changed_when: vault_temp_stop is succeeded
|
||||
|
||||
# Check if vault is reachable on the localhost
|
||||
- name: check_vault | Attempt to pull local https Vault health
|
||||
command: /bin/true
|
||||
notify:
|
||||
- wait for vault up nowait
|
||||
- set facts about local Vault health
|
||||
|
||||
- meta: flush_handlers
|
||||
|
||||
- name: check_vault | Set facts about local Vault health
|
||||
set_fact:
|
||||
vault_is_running: "{{ vault_health_check.get('status', '-1') in vault_successful_http_codes }}"
|
||||
|
||||
- name: check_vault | Set facts about local Vault health
|
||||
set_fact:
|
||||
vault_is_initialized: "{{ vault_health_check.get('json', {}).get('initialized', false) }}"
|
||||
vault_is_sealed: "{{ vault_health_check.get('json', {}).get('sealed', true) }}"
|
||||
# vault_in_standby: "{{ vault_health_check.get('json', {}).get('standby', true) }}"
|
||||
# vault_run_version: "{{ vault_local_service_health.get('json', {}).get('version', '') }}"
|
||||
|
||||
- name: check_vault | Check is vault is initialized in etcd if vault is not running
|
||||
command: |-
|
||||
curl \
|
||||
--cacert {{ etcd_cert_dir }}/ca.pem \
|
||||
--cert {{ etcd_cert_dir}}/node-{{ inventory_hostname }}.pem \
|
||||
--key {{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem \
|
||||
-X POST -d '{"key": "{{ "/vault/core/seal-config" | b64encode }}"}' \
|
||||
{{ etcd_access_addresses.split(',') | first }}/v3alpha/kv/range
|
||||
register: vault_etcd_exists
|
||||
retries: 4
|
||||
until: vault_etcd_exists.status == 200
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
run_once: true
|
||||
when: not vault_is_running and vault_etcd_available
|
||||
changed_when: false
|
||||
|
||||
- name: check_vault | Set fact about the Vault cluster's initialization state
|
||||
set_fact:
|
||||
vault_cluster_is_initialized: >-
|
||||
{{ vault_is_initialized or
|
||||
hostvars[item]['vault_is_initialized'] or
|
||||
('value' in vault_etcd_exists.stdout|default('')) }}
|
||||
with_items: "{{ groups.vault }}"
|
||||
run_once: true
|
|
@ -1,29 +0,0 @@
|
|||
---
|
||||
- name: config_ca | Read root CA cert for Vault
|
||||
command: "cat {{ config_ca_ca_pem }}"
|
||||
register: vault_ca_cert_cat
|
||||
|
||||
- name: config_ca | Pull current CA cert from Vault
|
||||
hashivault_read:
|
||||
url: "{{ vault_leader_url }}"
|
||||
token: "{{ vault_root_token }}"
|
||||
ca_cert: "{{ vault_cert_dir }}/ca.pem"
|
||||
secret: "{{ config_ca_mount_path }}/ca"
|
||||
key: "pem"
|
||||
register: vault_pull_current_ca
|
||||
failed_when: false
|
||||
|
||||
- name: config_ca | Read root CA key for Vault
|
||||
command: "cat {{ config_ca_ca_key }}"
|
||||
register: vault_ca_key_cat
|
||||
when: vault_ca_cert_cat.stdout.strip() != vault_pull_current_ca.get("data","").strip()
|
||||
|
||||
- name: config_ca | Configure pki mount to use the found root CA cert and key
|
||||
hashivault_write:
|
||||
url: "{{ vault_leader_url }}"
|
||||
token: "{{ vault_root_token }}"
|
||||
ca_cert: "{{ vault_cert_dir }}/ca.pem"
|
||||
secret: "{{ config_ca_mount_path }}/config/ca"
|
||||
data:
|
||||
pem_bundle: "{{ vault_ca_cert_cat.stdout + '\n' + vault_ca_key_cat.stdout }}"
|
||||
when: vault_ca_cert_cat.stdout.strip() != vault_pull_current_ca.get("data","").strip()
|
|
@ -1,16 +0,0 @@
|
|||
---
|
||||
- include_tasks: ../shared/pki_mount.yml
|
||||
vars:
|
||||
pki_mount_path: "{{ create_mount_path }}"
|
||||
pki_mount_options:
|
||||
config:
|
||||
default_lease_ttl: "{{ create_mount_default_lease_ttl }}"
|
||||
max_lease_ttl: "{{ create_mount_max_lease_ttl }}"
|
||||
description: "{{ create_mount_description }}"
|
||||
|
||||
- include_tasks: ../shared/config_ca.yml
|
||||
vars:
|
||||
config_ca_ca_pem: "{{ create_mount_cert_dir }}/ca.pem"
|
||||
config_ca_ca_key: "{{ create_mount_cert_dir }}/ca-key.pem"
|
||||
config_ca_mount_path: "{{ create_mount_path }}"
|
||||
when: create_mount_config_ca_needed
|
|
@ -1,42 +0,0 @@
|
|||
---
|
||||
- name: create_role | Create a policy for the new role
|
||||
hashivault_policy_set:
|
||||
url: "{{ vault_leader_url }}"
|
||||
token: "{{ vault_root_token }}"
|
||||
ca_cert: "{{ vault_cert_dir }}/ca.pem"
|
||||
name: "{{ create_role_name }}"
|
||||
rules: >-
|
||||
{%- if create_role_policy_rules|d("default") == "default" -%}
|
||||
{{
|
||||
{ 'path': {
|
||||
create_role_mount_path + '/issue/' + create_role_name: {'policy': 'write'},
|
||||
create_role_mount_path + '/roles/' + create_role_name: {'policy': 'read'}
|
||||
}} | to_json + '\n'
|
||||
}}
|
||||
{%- else -%}
|
||||
{{ create_role_policy_rules | to_json + '\n' }}
|
||||
{%- endif -%}
|
||||
|
||||
- name: create_role | Create {{ create_role_name }} role in the {{ create_role_mount_path }} pki mount
|
||||
hashivault_write:
|
||||
url: "{{ vault_leader_url }}"
|
||||
token: "{{ vault_root_token }}"
|
||||
ca_cert: "{{ vault_cert_dir }}/ca.pem"
|
||||
secret: "{{ create_role_mount_path }}/roles/{{ create_role_name }}"
|
||||
data: |
|
||||
{%- if create_role_options|d("default") == "default" -%}
|
||||
{
|
||||
allow_any_name: true
|
||||
}
|
||||
{%- else -%}
|
||||
{{ create_role_options | to_json }}
|
||||
{%- endif -%}
|
||||
|
||||
## Userpass based auth method
|
||||
|
||||
- include_tasks: gen_userpass.yml
|
||||
vars:
|
||||
gen_userpass_password: "{{ create_role_password }}"
|
||||
gen_userpass_policies: "{{ create_role_name }}"
|
||||
gen_userpass_role: "{{ create_role_name }}"
|
||||
gen_userpass_username: "{{ create_role_name }}"
|
|
@ -1,21 +0,0 @@
|
|||
---
|
||||
|
||||
- name: find_leader | Find the current http Vault leader
|
||||
uri:
|
||||
url: "{{ vault_config.listener.tcp.tls_disable|d()|ternary('http', 'https') }}://localhost:{{ vault_port }}/v1/sys/health"
|
||||
headers: "{{ hostvars[groups.vault|first]['vault_headers'] }}"
|
||||
method: HEAD
|
||||
status_code: 200,429,501,503
|
||||
register: vault_leader_check
|
||||
until: "vault_leader_check is succeeded"
|
||||
retries: 10
|
||||
|
||||
- name: find_leader | Set fact for current http leader
|
||||
set_fact:
|
||||
vault_leader_url: "{{ vault_config.listener.tcp.tls_disable|d()|ternary('http', 'https') }}://{{ inventory_hostname }}:{{ vault_port }}"
|
||||
with_items: "{{ groups.vault }}"
|
||||
when: "hostvars[item]['vault_leader_check'].get('status') in [200,501,503]"
|
||||
# run_once: true
|
||||
|
||||
- name: find_leader| show vault_leader_url
|
||||
debug: var=vault_leader_url verbosity=2
|
|
@ -1,38 +0,0 @@
|
|||
---
|
||||
- name: "bootstrap/gen_ca | Ensure cert_dir {{ gen_ca_cert_dir }} exists on necessary hosts"
|
||||
file:
|
||||
mode: 0755
|
||||
path: "{{ gen_ca_cert_dir }}"
|
||||
state: directory
|
||||
delegate_to: "{{ item }}"
|
||||
with_items: "{{ (groups[gen_ca_copy_group|default('vault')]) | union(groups['vault']) }}"
|
||||
|
||||
- name: "bootstrap/gen_ca | Generate {{ gen_ca_mount_path }} root CA"
|
||||
hashivault_write:
|
||||
url: "{{ vault_leader_url }}"
|
||||
token: "{{ vault_root_token }}"
|
||||
ca_cert: "{{ vault_cert_dir }}/ca.pem"
|
||||
secret: "{{ gen_ca_mount_path }}/root/generate/exported"
|
||||
data: "{{ gen_ca_vault_options }}"
|
||||
run_once: true
|
||||
no_log: true
|
||||
register: vault_ca_gen
|
||||
|
||||
- name: "bootstrap/gen_ca | Copy {{ gen_ca_mount_path }} root CA cert locally"
|
||||
copy:
|
||||
content: "{{ vault_ca_gen['data']['data']['certificate'] }}"
|
||||
dest: "{{ gen_ca_cert_dir }}/ca.pem"
|
||||
mode: 0644
|
||||
when: '"data" in vault_ca_gen.keys()'
|
||||
delegate_to: "{{ item }}"
|
||||
with_items: "{{ (groups[gen_ca_copy_group|default('vault')]) | union(groups['vault']) }}"
|
||||
|
||||
|
||||
- name: "bootstrap/gen_ca | Copy {{ gen_ca_mount_path }} root CA key to necessary hosts"
|
||||
copy:
|
||||
content: "{{ vault_ca_gen['data']['data']['private_key']}}"
|
||||
dest: "{{ gen_ca_cert_dir }}/ca-key.pem"
|
||||
mode: 0640
|
||||
when: '"data" in vault_ca_gen.keys()'
|
||||
delegate_to: "{{ item }}"
|
||||
with_items: "{{ (groups[gen_ca_copy_group|default('vault')]) | union(groups['vault']) }}"
|
|
@ -1,25 +0,0 @@
|
|||
---
|
||||
- name: shared/gen_userpass | Create the Username/Password combo for the role
|
||||
hashivault_userpass_create:
|
||||
url: "{{ vault_leader_url }}"
|
||||
token: "{{ vault_root_token }}"
|
||||
ca_cert: "{{ vault_cert_dir }}/ca.pem"
|
||||
name: "{{ gen_userpass_username }}"
|
||||
pass: "{{ gen_userpass_password }}"
|
||||
policies:
|
||||
- "{{ gen_userpass_role }}"
|
||||
run_once: true
|
||||
|
||||
- name: shared/gen_userpass | Ensure destination directory exists
|
||||
file:
|
||||
path: "{{ vault_roles_dir }}/{{ gen_userpass_role }}"
|
||||
state: directory
|
||||
|
||||
- name: shared/gen_userpass | Copy credentials to all hosts in the group
|
||||
copy:
|
||||
content: >
|
||||
{{
|
||||
{'username': gen_userpass_username,
|
||||
'password': gen_userpass_password} | to_nice_json(indent=4)
|
||||
}}
|
||||
dest: "{{ vault_roles_dir }}/{{ gen_userpass_role }}/userpass"
|
|
@ -1,124 +0,0 @@
|
|||
---
|
||||
|
||||
# This could be a role or custom module
|
||||
|
||||
# Vars:
|
||||
# issue_cert_alt_name: Requested Subject Alternative Names, in a list.
|
||||
# issue_cert_common_name: Common Name included in the cert
|
||||
# issue_cert_copy_ca: Copy issuing CA cert needed
|
||||
# issue_cert_ca_filename: Filename for copied issuing CA cert (default ca.pem)
|
||||
# issue_cert_dir_mode: Mode of the placed cert directory
|
||||
# issue_cert_file_group: Group of the placed cert file and directory
|
||||
# issue_cert_file_mode: Mode of the placed cert file
|
||||
# issue_cert_file_owner: Owner of the placed cert file and directory
|
||||
# issue_cert_format: Format for returned data. Can be pem, der, or pem_bundle
|
||||
# issue_cert_hosts: List of hosts to distribute the cert to
|
||||
# issue_cert_ip_sans: Requested IP Subject Alternative Names, in a list
|
||||
# issue_cert_mount_path: Mount point in Vault to make the request to
|
||||
# issue_cert_path: Full path to the cert, include its name
|
||||
# issue_cert_role: The Vault role to issue the cert with
|
||||
# issue_cert_url: Url to reach Vault, including protocol and port
|
||||
|
||||
- name: issue_cert | Ensure target directory exists
|
||||
file:
|
||||
path: "{{ issue_cert_path | dirname }}"
|
||||
state: directory
|
||||
group: "{{ issue_cert_file_group | d('root' )}}"
|
||||
mode: "{{ issue_cert_dir_mode | d('0755') }}"
|
||||
owner: "{{ issue_cert_file_owner | d('root') }}"
|
||||
|
||||
- name: "issue_cert | Read in the local credentials"
|
||||
command: cat {{ vault_roles_dir }}/{{ issue_cert_role }}/userpass
|
||||
register: vault_creds_cat
|
||||
delegate_to: "{{ groups.vault|first }}"
|
||||
run_once: true
|
||||
|
||||
- name: gen_certs_vault | Set facts for read Vault Creds
|
||||
set_fact:
|
||||
user_vault_creds: "{{ vault_creds_cat.stdout|from_json }}"
|
||||
delegate_to: "{{ groups.vault|first }}"
|
||||
run_once: true
|
||||
|
||||
- name: gen_certs_vault | Ensure vault cert dir exists
|
||||
file:
|
||||
path: "{{ vault_cert_dir }}"
|
||||
state: directory
|
||||
recurse: yes
|
||||
owner: "vault"
|
||||
group: "root"
|
||||
mode: 0755
|
||||
|
||||
- name: gen_certs_vault | install hvac
|
||||
pip:
|
||||
name: "hvac"
|
||||
state: "present"
|
||||
extra_args: "{{ pip_extra_args | default(omit) }}"
|
||||
|
||||
- name: gen_certs_vault | Pull vault CA
|
||||
get_url:
|
||||
url: "{{ issue_cert_url }}/v1/vault/ca/pem"
|
||||
dest: "{{ vault_cert_dir }}/ca.pem"
|
||||
validate_certs: no
|
||||
when: '"https" in issue_cert_url'
|
||||
|
||||
- name: gen_certs_vault | Log into Vault and obtain a scoped token
|
||||
hashivault_token_create:
|
||||
url: "{{ issue_cert_url }}"
|
||||
token: "{{ vault_root_token | default(hostvars[groups.vault|first]['vault_root_token']) }}"
|
||||
ca_cert: "{{ vault_cert_dir }}/ca.pem"
|
||||
policies: "{{ user_vault_creds.username }}"
|
||||
display_name: "{{ user_vault_creds.username }}"
|
||||
register: vault_client_token_request
|
||||
run_once: true
|
||||
|
||||
- name: gen_certs_vault | Pull token from request
|
||||
set_fact:
|
||||
vault_client_token: "{{ vault_client_token_request['token']['auth']['client_token'] }}"
|
||||
run_once: true
|
||||
|
||||
- name: "issue_cert | Generate {{ issue_cert_path }} for {{ issue_cert_role }} role"
|
||||
hashivault_write:
|
||||
url: "{{ issue_cert_url }}"
|
||||
token: "{{ vault_client_token }}"
|
||||
ca_cert: "{% if 'https' in issue_cert_url %}{{ vault_cert_dir }}/ca.pem{% endif %}"
|
||||
secret: "{{ issue_cert_mount_path|d('/pki') }}/issue/{{ issue_cert_role }}"
|
||||
data:
|
||||
alt_names: "{{ issue_cert_alt_names | d([]) | join(',') }}"
|
||||
common_name: "{{ issue_cert_common_name | d(issue_cert_path.rsplit('/', 1)[1].rsplit('.', 1)[0]) }}"
|
||||
format: "{{ issue_cert_format | d('pem') }}"
|
||||
ip_sans: "{{ issue_cert_ip_sans | default([]) | join(',') }}"
|
||||
register: issue_cert_result
|
||||
run_once: "{{ issue_cert_run_once | d(false) }}"
|
||||
|
||||
- name: "issue_cert | Copy {{ issue_cert_path }} cert to all hosts"
|
||||
copy:
|
||||
content: "{{ issue_cert_result['data']['data']['certificate'] }}\n"
|
||||
dest: "{{ issue_cert_path }}"
|
||||
group: "{{ issue_cert_file_group | d('root' )}}"
|
||||
mode: "{{ issue_cert_file_mode | d('0644') }}"
|
||||
owner: "{{ issue_cert_file_owner | d('root') }}"
|
||||
|
||||
- name: "issue_cert | Copy key for {{ issue_cert_path }} to all hosts"
|
||||
copy:
|
||||
content: "{{ issue_cert_result['data']['data']['private_key'] }}"
|
||||
dest: "{{ issue_cert_path.rsplit('.', 1)|first }}-key.{{ issue_cert_path.rsplit('.', 1)|last }}"
|
||||
group: "{{ issue_cert_file_group | d('root' )}}"
|
||||
mode: "{{ issue_cert_file_mode | d('0640') }}"
|
||||
owner: "{{ issue_cert_file_owner | d('root') }}"
|
||||
|
||||
- name: issue_cert | Copy issuing CA cert
|
||||
copy:
|
||||
content: "{{ issue_cert_result['data']['data']['issuing_ca'] }}\n"
|
||||
dest: "{{ issue_cert_path | dirname }}/{{ issue_cert_ca_filename | default('ca.pem') }}"
|
||||
group: "{{ issue_cert_file_group | d('root' )}}"
|
||||
mode: "{{ issue_cert_file_mode | d('0644') }}"
|
||||
owner: "{{ issue_cert_file_owner | d('root') }}"
|
||||
when: issue_cert_copy_ca|default(false)
|
||||
|
||||
- name: issue_cert | Copy certificate serial to all hosts
|
||||
copy:
|
||||
content: "{{ issue_cert_result['data']['data']['serial_number'] }}"
|
||||
dest: "{{ issue_cert_path.rsplit('.', 1)|first }}.serial"
|
||||
group: "{{ issue_cert_file_group | d('root' )}}"
|
||||
mode: "{{ issue_cert_file_mode | d('0640') }}"
|
||||
owner: "{{ issue_cert_file_owner | d('root') }}"
|
|
@ -1,12 +0,0 @@
|
|||
---
|
||||
|
||||
- name: shared/mount | Enable {{ pki_mount_path }} PKI mount
|
||||
hashivault_secret_enable:
|
||||
url: "{{ vault_leader_url }}"
|
||||
token: "{{ vault_root_token }}"
|
||||
ca_cert: "{{ vault_cert_dir }}/ca.pem"
|
||||
name: "{{ pki_mount_path }}"
|
||||
backend: "pki"
|
||||
config: "{{ pki_mount_options }}"
|
||||
register: secret_enable_result
|
||||
failed_when: 'secret_enable_result.rc !=0 and "existing mount" not in secret_enable_result.msg'
|
|
@ -1,46 +0,0 @@
|
|||
---
|
||||
- name: "sync_file | Cat the file"
|
||||
command: "cat {{ sync_file_path }}"
|
||||
register: sync_file_cat
|
||||
when: inventory_hostname == sync_file_srcs|first
|
||||
|
||||
- name: "sync_file | Cat the key file"
|
||||
command: "cat {{ sync_file_key_path }}"
|
||||
register: sync_file_key_cat
|
||||
when: sync_file_is_cert|d() and inventory_hostname == sync_file_srcs|first
|
||||
|
||||
- name: "sync_file | Set facts for file contents"
|
||||
set_fact:
|
||||
sync_file_contents: "{{ hostvars[sync_file_srcs|first].get('sync_file_cat', {}).get('stdout') }}"
|
||||
|
||||
- name: "sync_file | Set fact for key contents"
|
||||
set_fact:
|
||||
sync_file_key_contents: "{{ hostvars[sync_file_srcs|first].get('sync_file_key_cat', {}).get('stdout') }}"
|
||||
when: sync_file_is_cert|d()
|
||||
|
||||
- name: "sync_file | Ensure the directory exists"
|
||||
file:
|
||||
group: "{{ sync_file_group|d('root') }}"
|
||||
mode: "{{ sync_file_dir_mode|default('0750') }}"
|
||||
owner: "{{ sync_file_owner|d('root') }}"
|
||||
path: "{{ sync_file_dir }}"
|
||||
state: directory
|
||||
when: inventory_hostname not in sync_file_srcs
|
||||
|
||||
- name: "sync_file | Copy {{ sync_file_path }} to hosts that don't have it"
|
||||
copy:
|
||||
content: "{{ sync_file_contents }}"
|
||||
dest: "{{ sync_file_path }}"
|
||||
group: "{{ sync_file_group|d('root') }}"
|
||||
mode: "{{ sync_file_mode|default('0640') }}"
|
||||
owner: "{{ sync_file_owner|d('root') }}"
|
||||
when: inventory_hostname not in sync_file_srcs
|
||||
|
||||
- name: "sync_file | Copy {{ sync_file_key_path }} to hosts that don't have it"
|
||||
copy:
|
||||
content: "{{ sync_file_key_contents }}"
|
||||
dest: "{{ sync_file_key_path }}"
|
||||
group: "{{ sync_file_group|d('root') }}"
|
||||
mode: "{{ sync_file_mode|default('0640') }}"
|
||||
owner: "{{ sync_file_owner|d('root') }}"
|
||||
when: sync_file_is_cert|d() and inventory_hostname not in sync_file_srcs
|
|
@ -1,17 +0,0 @@
|
|||
---
|
||||
|
||||
- include_tasks: sync_file.yml
|
||||
vars:
|
||||
sync_file: "auth-ca.pem"
|
||||
sync_file_dir: "{{ vault_cert_dir }}"
|
||||
sync_file_hosts: "{{ groups.vault }}"
|
||||
sync_file_is_cert: true
|
||||
|
||||
- name: shared/sync_auth_certs | Set facts for vault sync_file results
|
||||
set_fact:
|
||||
vault_auth_ca_cert_needed: "{{ sync_file_results[0]['no_srcs'] }}"
|
||||
|
||||
|
||||
- name: shared/sync_auth_certs | Unset sync_file_results after auth-ca.pem sync
|
||||
set_fact:
|
||||
sync_file_results: []
|
|
@ -1,94 +0,0 @@
|
|||
---
|
||||
|
||||
# NOTE: This should be a role (or custom module), but currently include_role is too buggy to use
|
||||
- name: "sync_file | Set facts for directory and file when sync_file_path is defined"
|
||||
set_fact:
|
||||
sync_file_dir: "{{ sync_file_path | dirname }}"
|
||||
sync_file: "{{ sync_file_path | basename }}"
|
||||
when:
|
||||
- sync_file_path is defined
|
||||
- sync_file_path
|
||||
|
||||
- name: "sync_file | Set fact for sync_file_path when undefined"
|
||||
set_fact:
|
||||
sync_file_path: "{{ (sync_file_dir, sync_file)|join('/') }}"
|
||||
when: sync_file_path is not defined or not sync_file_path
|
||||
|
||||
- name: "sync_file | Set fact for key path name"
|
||||
set_fact:
|
||||
sync_file_key_path: "{{ sync_file_path.rsplit('.', 1)|first + '-key.' + sync_file_path.rsplit('.', 1)|last }}"
|
||||
when: sync_file_key_path is not defined or not sync_file_key_path
|
||||
|
||||
- name: "sync_file | Check if {{sync_file_path}} file exists"
|
||||
stat:
|
||||
path: "{{ sync_file_path }}"
|
||||
register: sync_file_stat
|
||||
|
||||
- name: "sync_file | Check if {{ sync_file_key_path }} key file exists"
|
||||
stat:
|
||||
path: "{{ sync_file_key_path }}"
|
||||
register: sync_file_key_stat
|
||||
|
||||
- name: "sync_file | Combine all possible file sync sources"
|
||||
set_fact:
|
||||
sync_file_srcs: "{{ sync_file_srcs|default([]) + [host_item] }}"
|
||||
with_items: "{{ sync_file_hosts|default() | unique }}"
|
||||
loop_control:
|
||||
loop_var: host_item
|
||||
when: sync_file_stat.stat.exists|default()
|
||||
|
||||
- name: "sync_file | Combine all possible key file sync sources"
|
||||
set_fact:
|
||||
sync_file_key_srcs: "{{ sync_file_key_srcs|default([]) + [host_item] }}"
|
||||
with_items: "{{ sync_file_hosts|default() | unique }}"
|
||||
loop_control:
|
||||
loop_var: host_item
|
||||
when: sync_file_key_stat.stat.exists|default()
|
||||
|
||||
- name: "sync_file | Remove sync sources with files that do not match sync_file_srcs|first"
|
||||
set_fact:
|
||||
_: "{% if inventory_hostname in sync_file_srcs %}{{ sync_file_srcs.remove(inventory_hostname) }}{% endif %}"
|
||||
when:
|
||||
- sync_file_srcs|d([])|length > 1
|
||||
- inventory_hostname != sync_file_srcs|first
|
||||
|
||||
- name: "sync_file | Remove sync sources with keys that do not match sync_file_srcs|first"
|
||||
set_fact:
|
||||
_: "{% if inventory_hostname in sync_file_srcs %}{{ sync_file_srcs.remove(inventory_hostname) }}{% endif %}"
|
||||
when:
|
||||
- sync_file_is_cert|d()
|
||||
- sync_file_key_srcs|d([])|length > 1
|
||||
- inventory_hostname != sync_file_key_srcs|first
|
||||
|
||||
- name: "sync_file | Consolidate file and key sources"
|
||||
set_fact:
|
||||
sync_file_srcs: "{{ sync_file_srcs|d([]) | intersect(sync_file_key_srcs) }}"
|
||||
when: sync_file_is_cert|d()
|
||||
|
||||
- name: "sync_file | Set facts for situations where sync is not needed"
|
||||
set_fact:
|
||||
sync_file_no_srcs: "{{ true if sync_file_srcs|d([])|length == 0 else false }}"
|
||||
sync_file_unneeded: "{{ true if sync_file_srcs|d([])|length == sync_file_hosts|length else false }}"
|
||||
|
||||
- name: "sync_file | Set sync_file_result fact"
|
||||
set_fact:
|
||||
sync_file_result:
|
||||
no_srcs: "{{ sync_file_no_srcs }}"
|
||||
path: "{{ sync_file_path }}"
|
||||
sync_unneeded: "{{ sync_file_unneeded }}"
|
||||
|
||||
- name: "sync_file | Update sync_file_results fact"
|
||||
set_fact:
|
||||
sync_file_results: "{{ sync_file_results|default([]) + [sync_file_result] }}"
|
||||
|
||||
- include_tasks: sync.yml
|
||||
when: not (sync_file_no_srcs or sync_file_unneeded)
|
||||
|
||||
- name: "Unset local vars to avoid variable bleed into next iteration"
|
||||
set_fact:
|
||||
sync_file: ''
|
||||
sync_file_dir: ''
|
||||
sync_file_key_path: ''
|
||||
sync_file_key_srcs: []
|
||||
sync_file_path: ''
|
||||
sync_file_srcs: []
|
|
@ -1,35 +0,0 @@
|
|||
[Unit]
|
||||
Description=hashicorp vault on docker
|
||||
Documentation=https://github.com/hashicorp/vault
|
||||
Wants=docker.socket
|
||||
After=docker.service
|
||||
|
||||
[Service]
|
||||
User=root
|
||||
Restart=always
|
||||
RestartSec=15s
|
||||
TimeoutStartSec=5
|
||||
LimitNOFILE=10000
|
||||
ExecReload={{ docker_bin_dir }}/docker restart {{ vault_container_name }}
|
||||
ExecStop={{ docker_bin_dir }}/docker stop {{ vault_container_name }}
|
||||
ExecStartPre=-{{ docker_bin_dir }}/docker rm -f {{ vault_container_name }}
|
||||
# Container has the following internal mount points:
|
||||
# /vault/file/ # File backend storage location
|
||||
# /vault/logs/ # Log files
|
||||
ExecStart={{ docker_bin_dir }}/docker run \
|
||||
--name {{ vault_container_name }} --net=host \
|
||||
--cap-add=IPC_LOCK \
|
||||
-v {{ vault_cert_dir }}:{{ vault_cert_dir }} \
|
||||
-v {{ vault_config_dir }}:{{ vault_config_dir }} \
|
||||
-v /etc/ssl:/etc/ssl \
|
||||
-v {{ etcd_cert_dir }}:{{ etcd_cert_dir }} \
|
||||
-v {{ vault_log_dir }}:/vault/logs \
|
||||
-v {{ vault_roles_dir }}:{{ vault_roles_dir }} \
|
||||
-v {{ vault_secrets_dir }}:{{ vault_secrets_dir }} \
|
||||
--entrypoint=vault \
|
||||
{{ vault_image_repo }}:{{ vault_image_tag }} \
|
||||
server --config={{ vault_config_dir }}/config.json \
|
||||
--log-level=trace
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -1,15 +0,0 @@
|
|||
[Unit]
|
||||
Description=vault
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
AmbientCapabilities=CAP_IPC_LOCK
|
||||
ExecStart={{ bin_dir }}/vault server --config={{ vault_config_dir }}/config.json
|
||||
LimitNOFILE=40000
|
||||
NotifyAccess=all
|
||||
Restart=always
|
||||
RestartSec=10s
|
||||
User={{ vault_adduser_vars.name }}
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -1,2 +0,0 @@
|
|||
[Service]
|
||||
Environment={% if http_proxy %}"HTTP_PROXY={{ http_proxy }}"{% endif %} {% if https_proxy %}"HTTPS_PROXY={{ https_proxy }}"{% endif %} {% if no_proxy %}"NO_PROXY={{ no_proxy }}"{% endif %}
|
|
@ -1,98 +0,0 @@
|
|||
Hashicorp Vault Role
|
||||
====================
|
||||
|
||||
The vault role have been retired from the main playbook.
|
||||
This role probably requires a LOT of changes in order to work again
|
||||
|
||||
Overview
|
||||
--------
|
||||
|
||||
The Vault role is a two-step process:
|
||||
|
||||
1. Bootstrap
|
||||
|
||||
You cannot start your certificate management service securely with SSL (and
|
||||
the datastore behind it) without having the certificates in-hand already. This
|
||||
presents an unfortunate chicken and egg scenario, with one requiring the other.
|
||||
To solve for this, the Bootstrap step was added.
|
||||
|
||||
This step spins up a temporary instance of Vault to issue certificates for
|
||||
Vault itself. It then leaves the temporary instance running, so that the Etcd
|
||||
role can generate certs for itself as well. Eventually, this may be improved
|
||||
to allow alternate backends (such as Consul), but currently the tasks are
|
||||
hardcoded to only create a Vault role for Etcd.
|
||||
|
||||
1. Cluster
|
||||
|
||||
This step is where the long-term Vault cluster is started and configured. Its
|
||||
first task, is to stop any temporary instances of Vault, to free the port for
|
||||
the long-term. At the end of this task, the entire Vault cluster should be up
|
||||
and ready to go.
|
||||
|
||||
Keys to the Kingdom
|
||||
-------------------
|
||||
|
||||
The two most important security pieces of Vault are the ``root_token``
|
||||
and ``unsealing_keys``. Both of these values are given exactly once, during
|
||||
the initialization of the Vault cluster. For convenience, they are saved
|
||||
to the ``vault_secret_dir`` (default: /etc/vault/secrets) of every host in the
|
||||
vault group.
|
||||
|
||||
It is *highly* recommended that these secrets are removed from the servers after
|
||||
your cluster has been deployed, and kept in a safe location of your choosing.
|
||||
Naturally, the seriousness of the situation depends on what you're doing with
|
||||
your Kubespray cluster, but with these secrets, an attacker will have the ability
|
||||
to authenticate to almost everything in Kubernetes and decode all private
|
||||
(HTTPS) traffic on your network signed by Vault certificates.
|
||||
|
||||
For even greater security, you may want to remove and store elsewhere any
|
||||
CA keys generated as well (e.g. /etc/vault/ssl/ca-key.pem).
|
||||
|
||||
Vault by default encrypts all traffic to and from the datastore backend, all
|
||||
resting data, and uses TLS for its TCP listener. It is recommended that you
|
||||
do not change the Vault config to disable TLS, unless you absolutely have to.
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
To get the Vault role running, you must to do two things at a minimum:
|
||||
|
||||
1. Assign the ``vault`` group to at least 1 node in your inventory
|
||||
1. Change ``cert_management`` to be ``vault`` instead of ``script``
|
||||
|
||||
Nothing else is required, but customization is possible. Check
|
||||
``roles/vault/defaults/main.yml`` for the different variables that can be
|
||||
overridden, most common being ``vault_config``, ``vault_port``, and
|
||||
``vault_deployment_type``.
|
||||
|
||||
As a result of the Vault role will be create separated Root CA for `etcd`,
|
||||
`kubernetes` and `vault`. Also, if you intend to use a Root or Intermediate CA
|
||||
generated elsewhere, you'll need to copy the certificate and key to the hosts in the vault group prior to running the vault role. By default, they'll be located at:
|
||||
|
||||
* vault:
|
||||
* ``/etc/vault/ssl/ca.pem``
|
||||
* ``/etc/vault/ssl/ca-key.pem``
|
||||
* etcd:
|
||||
* ``/etc/ssl/etcd/ssl/ca.pem``
|
||||
* ``/etc/ssl/etcd/ssl/ca-key.pem``
|
||||
* kubernetes:
|
||||
* ``/etc/kubernetes/ssl/ca.pem``
|
||||
* ``/etc/kubernetes/ssl/ca-key.pem``
|
||||
|
||||
Additional Notes:
|
||||
|
||||
* ``groups.vault|first`` is considered the source of truth for Vault variables
|
||||
* ``vault_leader_url`` is used as pointer for the current running Vault
|
||||
* Each service should have its own role and credentials. Currently those
|
||||
credentials are saved to ``/etc/vault/roles/<role>/``. The service will
|
||||
need to read in those credentials, if they want to interact with Vault.
|
||||
|
||||
Potential Work
|
||||
--------------
|
||||
|
||||
* Change the Vault role to not run certain tasks when ``root_token`` and
|
||||
``unseal_keys`` are not present. Alternatively, allow user input for these
|
||||
values when missing.
|
||||
* Add the ability to start temp Vault with Host or Docker
|
||||
* Add a dynamic way to change out the backend role creation during Bootstrap,
|
||||
so other services can be used (such as Consul)
|
|
@ -311,12 +311,6 @@ Upgrade etcd:
|
|||
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=etcd
|
||||
```
|
||||
|
||||
Upgrade vault:
|
||||
|
||||
```ShellSession
|
||||
ansible-playbook -b -i inventory/sample/hosts.ini cluster.yml --tags=vault
|
||||
```
|
||||
|
||||
Upgrade kubelet:
|
||||
|
||||
```ShellSession
|
||||
|
|
|
@ -88,7 +88,6 @@ no_proxy_exclude_workers: false
|
|||
## This setting determines whether certs are generated via scripts.
|
||||
## Chose 'none' if you provide your own certificates.
|
||||
## Option is "script", "none"
|
||||
## note: vault is removed
|
||||
# cert_management: script
|
||||
|
||||
## Set to true to allow pre-checks to fail and continue deployment
|
||||
|
|
|
@ -55,7 +55,7 @@ etcd_memory_limit: "{% if ansible_memtotal_mb < 4096 %}512M{% else %}0{% endif %
|
|||
|
||||
etcd_blkio_weight: 1000
|
||||
|
||||
etcd_node_cert_hosts: "{{ groups['k8s-cluster'] | union(groups.get('calico-rr', [])) | union(groups.get('vault', [])) }}"
|
||||
etcd_node_cert_hosts: "{{ groups['k8s-cluster'] | union(groups.get('calico-rr', [])) }}"
|
||||
|
||||
etcd_compaction_retention: "8"
|
||||
|
||||
|
|
|
@ -251,13 +251,6 @@
|
|||
when: kube_proxy_mode is defined
|
||||
run_once: true
|
||||
|
||||
- name: Stop if vault is chose
|
||||
assert:
|
||||
that: cert_management != 'vault'
|
||||
msg: "Support for vault have been removed, please use 'script' or 'none'"
|
||||
when: cert_management is defined
|
||||
run_once: true
|
||||
|
||||
- name: Stop if unknown cert_management
|
||||
assert:
|
||||
that: cert_management|d('script') in ['script', 'none']
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
state: stopped
|
||||
with_items:
|
||||
- kubelet
|
||||
- vault
|
||||
failed_when: false
|
||||
tags:
|
||||
- services
|
||||
|
@ -16,11 +15,9 @@
|
|||
state: absent
|
||||
with_items:
|
||||
- kubelet.service
|
||||
- vault.service
|
||||
- calico-node.service
|
||||
- containerd.service.d/http-proxy.conf
|
||||
- crio.service.d/http-proxy.conf
|
||||
- vault.service.d/http-proxy.conf
|
||||
- k8s-certs-renew.service
|
||||
- k8s-certs-renew.timer
|
||||
register: services_removed
|
||||
|
@ -270,14 +267,10 @@
|
|||
- /run/kubernetes
|
||||
- /usr/local/share/ca-certificates/etcd-ca.crt
|
||||
- /usr/local/share/ca-certificates/kube-ca.crt
|
||||
- /usr/local/share/ca-certificates/vault-ca.crt
|
||||
- /etc/ssl/certs/etcd-ca.pem
|
||||
- /etc/ssl/certs/kube-ca.pem
|
||||
- /etc/ssl/certs/vault-ca.crt
|
||||
- /etc/pki/ca-trust/source/anchors/etcd-ca.crt
|
||||
- /etc/pki/ca-trust/source/anchors/kube-ca.crt
|
||||
- /etc/pki/ca-trust/source/anchors/vault-ca.crt
|
||||
- /etc/vault
|
||||
- /var/log/pods/
|
||||
- "{{ bin_dir }}/kubelet"
|
||||
- "{{ bin_dir }}/etcd-scripts"
|
||||
|
|
|
@ -56,9 +56,6 @@ instance-1
|
|||
|
||||
[etcd]
|
||||
instance-1
|
||||
|
||||
[vault]
|
||||
instance-1
|
||||
{% elif mode == "ha-recover" %}
|
||||
[kube_control_plane]
|
||||
instance-1
|
||||
|
|
|
@ -15,10 +15,6 @@ node3
|
|||
node1
|
||||
node2
|
||||
|
||||
[vault]
|
||||
node1
|
||||
node2
|
||||
|
||||
[k8s-cluster:children]
|
||||
kube-node
|
||||
kube_control_plane
|
||||
|
|
|
@ -11,9 +11,6 @@
|
|||
|
||||
[etcd]
|
||||
{{droplets.results[2].droplet.name}}
|
||||
|
||||
[vault]
|
||||
{{droplets.results[2].droplet.name}}
|
||||
{% elif mode is defined and mode == "ha" %}
|
||||
[kube_control_plane]
|
||||
{{droplets.results[0].droplet.name}}
|
||||
|
@ -26,10 +23,6 @@
|
|||
{{droplets.results[1].droplet.name}}
|
||||
{{droplets.results[2].droplet.name}}
|
||||
|
||||
[vault]
|
||||
{{droplets.results[1].droplet.name}}
|
||||
{{droplets.results[2].droplet.name}}
|
||||
|
||||
[broken_kube_control_plane]
|
||||
{{droplets.results[1].droplet.name}}
|
||||
|
||||
|
@ -44,9 +37,6 @@
|
|||
|
||||
[etcd]
|
||||
{{droplets.results[0].droplet.name}}
|
||||
|
||||
[vault]
|
||||
{{droplets.results[0].droplet.name}}
|
||||
{% endif %}
|
||||
|
||||
[calico-rr]
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
[etcd]
|
||||
{{node3}}
|
||||
|
||||
[vault]
|
||||
{{node3}}
|
||||
{% elif mode is defined and mode in ["ha", "ha-scale"] %}
|
||||
[kube_control_plane]
|
||||
{{node1}}
|
||||
|
@ -33,11 +31,6 @@
|
|||
{{node2}}
|
||||
{{node3}}
|
||||
|
||||
[vault]
|
||||
{{node1}}
|
||||
{{node2}}
|
||||
{{node3}}
|
||||
|
||||
[broken_kube_control_plane]
|
||||
{{node2}}
|
||||
|
||||
|
@ -53,9 +46,6 @@
|
|||
|
||||
[etcd]
|
||||
{{node1}}
|
||||
|
||||
[vault]
|
||||
{{node1}}
|
||||
{% elif mode == "aio" %}
|
||||
[kube_control_plane]
|
||||
{{node1}}
|
||||
|
@ -65,9 +55,6 @@
|
|||
|
||||
[etcd]
|
||||
{{node1}}
|
||||
|
||||
[vault]
|
||||
{{node1}}
|
||||
{% endif %}
|
||||
|
||||
[k8s-cluster:children]
|
||||
|
|
Loading…
Reference in a new issue