Merge branch 'master' into issue_1107-docker-versioning

This commit is contained in:
David Crook 2017-03-06 16:00:31 -07:00
commit b1d701ae47
39 changed files with 416 additions and 146 deletions

3
Vagrantfile vendored
View file

@ -23,6 +23,7 @@ $etcd_instances = $num_instances
$kube_master_instances = $num_instances == 1 ? $num_instances : ($num_instances - 1) $kube_master_instances = $num_instances == 1 ? $num_instances : ($num_instances - 1)
# All nodes are kube nodes # All nodes are kube nodes
$kube_node_instances = $num_instances $kube_node_instances = $num_instances
$local_release_dir = "/vagrant/temp"
host_vars = {} host_vars = {}
@ -97,7 +98,7 @@ Vagrant.configure("2") do |config|
"ip": ip, "ip": ip,
"flannel_interface": ip, "flannel_interface": ip,
"flannel_backend_type": "host-gw", "flannel_backend_type": "host-gw",
"local_release_dir": "/vagrant/temp", "local_release_dir" => $local_release_dir,
"download_run_once": "False", "download_run_once": "False",
# Override the default 'calico' with flannel. # Override the default 'calico' with flannel.
# inventory/group_vars/k8s-cluster.yml # inventory/group_vars/k8s-cluster.yml

View file

@ -39,17 +39,17 @@
- { role: kargo-defaults, when: "cert_management == 'vault'" } - { role: kargo-defaults, when: "cert_management == 'vault'" }
- { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" } - { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" }
- hosts: etcd:!k8s-cluster - hosts: etcd
any_errors_fatal: true any_errors_fatal: true
roles: roles:
- { role: kargo-defaults} - { role: kargo-defaults}
- { role: etcd, tags: etcd } - { role: etcd, tags: etcd, etcd_cluster_setup: true }
- hosts: k8s-cluster - hosts: k8s-cluster
any_errors_fatal: true any_errors_fatal: true
roles: roles:
- { role: kargo-defaults} - { role: kargo-defaults}
- { role: etcd, tags: etcd } - { role: etcd, tags: etcd, etcd_cluster_setup: false }
- hosts: etcd:k8s-cluster:vault - hosts: etcd:k8s-cluster:vault
any_errors_fatal: true any_errors_fatal: true

View file

@ -0,0 +1,27 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": ["ec2:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": ["elasticloadbalancing:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": ["route53:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::kubernetes-*"
]
}
]
}

View file

@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View file

@ -0,0 +1,45 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::kubernetes-*"
]
},
{
"Effect": "Allow",
"Action": "ec2:Describe*",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:AttachVolume",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": "ec2:DetachVolume",
"Resource": "*"
},
{
"Effect": "Allow",
"Action": ["route53:*"],
"Resource": ["*"]
},
{
"Effect": "Allow",
"Action": [
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:BatchGetImage"
],
"Resource": "*"
}
]
}

View file

@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

22
docs/atomic.md Normal file
View file

@ -0,0 +1,22 @@
Atomic host bootstrap
=====================
Atomic host testing has been done with the network plugin flannel. Change the inventory var `kube_network_plugin: flannel`.
Note: Flannel is the only plugin that has currently been tested with atomic
### Vagrant
* For bootstrapping with Vagrant, use box centos/atomic-host
* Update VagrantFile variable `local_release_dir` to `/var/vagrant/temp`.
* Update `vm_memory = 2048` and `vm_cpus = 2`
* Networking on vagrant hosts has to be brought up manually once they are booted.
```
vagrant ssh
sudo /sbin/ifup enp0s8
```
* For users of vagrant-libvirt download qcow2 format from https://wiki.centos.org/SpecialInterestGroup/Atomic/Download/
Then you can proceed to [cluster deployment](#run-deployment)

View file

@ -3,7 +3,7 @@ AWS
To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`. To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`.
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes/kubernetes/tree/master/cluster/aws/templates/iam). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role. Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kargo/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`. The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`.

View file

@ -102,4 +102,3 @@ Stack](https://github.com/kubernetes-incubator/kargo/blob/master/docs/dns-stack.
Kargo sets up two Kubernetes accounts by default: ``root`` and ``kube``. Their Kargo sets up two Kubernetes accounts by default: ``root`` and ``kube``. Their
passwords default to changeme. You can set this by changing ``kube_api_pwd``. passwords default to changeme. You can set this by changing ``kube_api_pwd``.

View file

@ -47,7 +47,7 @@
## There are some changes specific to the cloud providers ## There are some changes specific to the cloud providers
## for instance we need to encapsulate packets with some network plugins ## for instance we need to encapsulate packets with some network plugins
## If set the possible values are either 'gce', 'aws', 'azure' or 'openstack' ## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', or 'vsphere'
## When openstack is used make sure to source in the openstack credentials ## When openstack is used make sure to source in the openstack credentials
## like you would do when using nova-client before starting the playbook. ## like you would do when using nova-client before starting the playbook.
#cloud_provider: #cloud_provider:

View file

@ -9,3 +9,11 @@
when: bootstrap_os == "centos" when: bootstrap_os == "centos"
- include: setup-pipelining.yml - include: setup-pipelining.yml
- name: check if atomic host
stat:
path: /run/ostree-booted
register: ostree
- set_fact:
is_atomic: "{{ ostree.stat.exists }}"

View file

@ -38,7 +38,7 @@
retries: 4 retries: 4
delay: "{{ retry_stagger | random + 3 }}" delay: "{{ retry_stagger | random + 3 }}"
with_items: "{{ docker_repo_key_info.repo_keys }}" with_items: "{{ docker_repo_key_info.repo_keys }}"
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic)
- name: ensure docker repository is enabled - name: ensure docker repository is enabled
action: "{{ docker_repo_info.pkg_repo }}" action: "{{ docker_repo_info.pkg_repo }}"
@ -46,13 +46,13 @@
repo: "{{item}}" repo: "{{item}}"
state: present state: present
with_items: "{{ docker_repo_info.repos }}" with_items: "{{ docker_repo_info.repos }}"
when: (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]) and (docker_repo_info.repos|length > 0) when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic) and (docker_repo_info.repos|length > 0)
- name: Configure docker repository on RedHat/CentOS - name: Configure docker repository on RedHat/CentOS
template: template:
src: "rh_docker.repo.j2" src: "rh_docker.repo.j2"
dest: "/etc/yum.repos.d/docker.repo" dest: "/etc/yum.repos.d/docker.repo"
when: ansible_distribution in ["CentOS","RedHat"] when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
- name: ensure docker packages are installed - name: ensure docker packages are installed
action: "{{ docker_package_info.pkg_mgr }}" action: "{{ docker_package_info.pkg_mgr }}"
@ -66,7 +66,7 @@
delay: "{{ retry_stagger | random + 3 }}" delay: "{{ retry_stagger | random + 3 }}"
with_items: "{{ docker_package_info.pkgs }}" with_items: "{{ docker_package_info.pkgs }}"
notify: restart docker notify: restart docker
when: (not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"]) and (docker_package_info.pkgs|length > 0) when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic) and (docker_package_info.pkgs|length > 0)
- name: check minimum docker version for docker_dns mode. You need at least docker version >= 1.12 for resolvconf_mode=docker_dns - name: check minimum docker version for docker_dns mode. You need at least docker version >= 1.12 for resolvconf_mode=docker_dns
command: "docker version -f '{{ '{{' }}.Client.Version{{ '}}' }}'" command: "docker version -f '{{ '{{' }}.Client.Version{{ '}}' }}'"

View file

@ -15,7 +15,14 @@
src: docker.service.j2 src: docker.service.j2
dest: /etc/systemd/system/docker.service dest: /etc/systemd/system/docker.service
register: docker_service_file register: docker_service_file
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic)
- name: Write docker.service systemd file for atomic
template:
src: docker_atomic.service.j2
dest: /etc/systemd/system/docker.service
notify: restart docker
when: is_atomic
- name: Write docker options systemd drop-in - name: Write docker options systemd drop-in
template: template:

View file

@ -0,0 +1,38 @@
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.com
After=network.target
Wants=docker-storage-setup.service
[Service]
Type=notify
NotifyAccess=all
EnvironmentFile=-/etc/sysconfig/docker
EnvironmentFile=-/etc/sysconfig/docker-storage
EnvironmentFile=-/etc/sysconfig/docker-network
Environment=GOTRACEBACK=crash
Environment=DOCKER_HTTP_HOST_COMPAT=1
Environment=PATH=/usr/libexec/docker:/usr/bin:/usr/sbin
ExecReload=/bin/kill -s HUP $MAINPID
Delegate=yes
KillMode=process
ExecStart=/usr/bin/dockerd-current \
--add-runtime docker-runc=/usr/libexec/docker/docker-runc-current \
--default-runtime=docker-runc \
--exec-opt native.cgroupdriver=systemd \
--userland-proxy-path=/usr/libexec/docker/docker-proxy-current \
$DOCKER_OPTS \
$DOCKER_STORAGE_OPTIONS \
$DOCKER_NETWORK_OPTIONS \
$DOCKER_DNS_OPTIONS \
$ADD_REGISTRY \
$BLOCK_REGISTRY \
$INSECURE_REGISTRY
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
TimeoutStartSec=1min
Restart=on-abnormal
[Install]
WantedBy=multi-user.target

View file

@ -1,4 +1,7 @@
--- ---
# Set to false to only do certificate management
etcd_cluster_setup: true
etcd_bin_dir: "{{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64/" etcd_bin_dir: "{{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64/"
etcd_data_dir: "/var/lib/etcd" etcd_data_dir: "/var/lib/etcd"

View file

@ -2,7 +2,7 @@
dependencies: dependencies:
- role: adduser - role: adduser
user: "{{ addusers.etcd }}" user: "{{ addusers.etcd }}"
when: not ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] when: not (ansible_os_family in ['CoreOS', 'Container Linux by CoreOS'] or is_atomic)
- role: download - role: download
file: "{{ downloads.etcd }}" file: "{{ downloads.etcd }}"
tags: download tags: download

View file

@ -1,18 +1,11 @@
--- ---
- name: "Check_certs | check if all certs have already been generated on first master" - name: "Check_certs | check if all certs have already been generated on first master"
stat: find:
path: "{{ etcd_cert_dir }}/{{ item }}" paths: "{{ etcd_cert_dir }}"
get_md5: no patterns: "ca.pem,node*.pem"
delegate_to: "{{groups['etcd'][0]}}" delegate_to: "{{groups['etcd'][0]}}"
register: etcdcert_master register: etcdcert_master
run_once: true run_once: true
with_items: >-
['ca.pem',
{% set all_etcd_hosts = groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique %}
{% for host in all_etcd_hosts %}
'node-{{ host }}-key.pem'
{% if not loop.last %}{{','}}{% endif %}
{% endfor %}]
- name: "Check_certs | Set default value for 'sync_certs', 'gen_certs' and 'etcd_secret_changed' to false" - name: "Check_certs | Set default value for 'sync_certs', 'gen_certs' and 'etcd_secret_changed' to false"
set_fact: set_fact:
@ -20,34 +13,56 @@
gen_certs: false gen_certs: false
etcd_secret_changed: false etcd_secret_changed: false
- name: "Check_certs | Set 'gen_certs' to true" - name: "Check certs | check if a cert already exists on node"
set_fact:
gen_certs: true
when: "not {{item.stat.exists}}"
run_once: true
with_items: "{{etcdcert_master.results}}"
- name: "Check certs | check if a cert already exists"
stat: stat:
path: "{{ etcd_cert_dir }}/{{ item }}" path: "{{ etcd_cert_dir }}/{{ item }}"
register: etcdcert register: etcdcert_node
with_items: with_items:
- ca.pem - ca.pem
- node-{{ inventory_hostname }}-key.pem - node-{{ inventory_hostname }}-key.pem
- name: "Check_certs | Set 'gen_certs' to true"
set_fact:
gen_certs: true
when: "not '{{ item }}' in etcdcert_master.files|map(attribute='path') | list"
run_once: true
with_items: >-
['{{etcd_cert_dir}}/ca.pem',
{% set all_etcd_hosts = groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique|sort %}
{% for host in all_etcd_hosts %}
'{{etcd_cert_dir}}/node-{{ host }}-key.pem'
{% if not loop.last %}{{','}}{% endif %}
{% endfor %}]
- name: "Check_certs | Set 'gen_node_certs' to true"
set_fact:
gen_node_certs: |-
{
{% set all_etcd_hosts = groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique|sort -%}
{% set existing_certs = etcdcert_master.files|map(attribute='path')|list|sort %}
{% for host in all_etcd_hosts -%}
{% set host_cert = "%s/node-%s-key.pem"|format(etcd_cert_dir, host) %}
{% if host_cert in existing_certs -%}
"{{ host }}": False,
{% else -%}
"{{ host }}": True,
{% endif -%}
{% endfor %}
}
run_once: true
- name: "Check_certs | Set 'sync_certs' to true" - name: "Check_certs | Set 'sync_certs' to true"
set_fact: set_fact:
sync_certs: true sync_certs: true
when: >- when: >-
{%- set certs = {'sync': False} -%} {%- set certs = {'sync': False} -%}
{% set all_etcd_hosts = groups['k8s-cluster']|union(groups['etcd'])|union(groups['calico-rr']|default([]))|unique %} {% if gen_node_certs[inventory_hostname] or
{% for host in all_etcd_hosts %} (not etcdcert_node.results[0].stat.exists|default(False)) or
{% if host == inventory_hostname %} (not etcdcert_node.results[1].stat.exists|default(False)) or
{% if (not etcdcert.results[0].stat.exists|default(False)) or (etcdcert_node.results[1].stat.checksum|default('') != etcdcert_master.files|selectattr("path", "equalto", etcdcert_node.results[1].stat.path)|first|map(attribute="checksum")|default('')) -%}
(not etcdcert.results[1].stat.exists|default(False)) or
(etcdcert.results[1].stat.checksum|default('') != etcdcert_master.results[loop.index].stat.checksum|default('')) -%}
{%- set _ = certs.update({'sync': True}) -%} {%- set _ = certs.update({'sync': True}) -%}
{% endif %} {% endif %}
{% endif %}
{%- endfor -%}
{{ certs.sync }} {{ certs.sync }}

View file

@ -43,15 +43,15 @@
when: gen_certs|default(false) when: gen_certs|default(false)
- name: Gen_certs | run cert generation script - name: Gen_certs | run cert generation script
command: "{{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}" command: "bash -x {{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}"
environment: environment:
- MASTERS: "{% for m in groups['etcd'] %} - MASTERS: "{% for m in groups['etcd'] %}
{% if hostvars[m].sync_certs|default(false) %} {% if gen_node_certs[m] %}
{{ m }} {{ m }}
{% endif %} {% endif %}
{% endfor %}" {% endfor %}"
- HOSTS: "{% for h in (groups['k8s-cluster'] + groups['calico-rr']|default([]))|unique %} - HOSTS: "{% for h in (groups['k8s-cluster'] + groups['calico-rr']|default([]))|unique %}
{% if hostvars[h].sync_certs|default(false) %} {% if gen_node_certs[h] %}
{{ h }} {{ h }}
{% endif %} {% endif %}
{% endfor %}" {% endfor %}"
@ -107,12 +107,36 @@
sync_certs|default(false) and inventory_hostname not in groups['etcd'] sync_certs|default(false) and inventory_hostname not in groups['etcd']
notify: set etcd_secret_changed notify: set etcd_secret_changed
- name: Gen_certs | Copy certs on masters #NOTE(mattymo): Use temporary file to copy master certs because we have a ~200k
shell: "base64 -d <<< '{{etcd_master_cert_data.stdout|quote}}' | tar xz -C {{ etcd_cert_dir }}" #char limit when using shell command
args:
executable: /bin/bash #FIXME(mattymo): Use tempfile module in ansible 2.3
- name: Gen_certs | Prepare tempfile for unpacking certs
shell: mktemp /tmp/certsXXXXX.tar.gz
register: cert_tempfile
- name: Gen_certs | Write master certs to tempfile
copy:
content: "{{etcd_master_cert_data.stdout}}"
dest: "{{cert_tempfile.stdout}}"
owner: root
mode: "0600"
when: inventory_hostname in groups['etcd'] and sync_certs|default(false) and
inventory_hostname != groups['etcd'][0]
- name: Gen_certs | Unpack certs on masters
shell: "base64 -d < {{ cert_tempfile.stdout }} | tar xz -C {{ etcd_cert_dir }}"
no_log: true no_log: true
changed_when: false changed_when: false
check_mode: no
when: inventory_hostname in groups['etcd'] and sync_certs|default(false) and
inventory_hostname != groups['etcd'][0]
notify: set secret_changed
- name: Gen_certs | Cleanup tempfile
file:
path: "{{cert_tempfile.stdout}}"
state: absent
when: inventory_hostname in groups['etcd'] and sync_certs|default(false) and when: inventory_hostname in groups['etcd'] and sync_certs|default(false) and
inventory_hostname != groups['etcd'][0] inventory_hostname != groups['etcd'][0]
@ -163,4 +187,3 @@
- name: Gen_certs | update ca-certificates (RedHat) - name: Gen_certs | update ca-certificates (RedHat)
command: update-ca-trust extract command: update-ca-trust extract
when: etcd_ca_cert.changed and ansible_os_family == "RedHat" when: etcd_ca_cert.changed and ansible_os_family == "RedHat"

View file

@ -1,9 +0,0 @@
---
- name: Install | Copy etcd binary from downloaddir
command: rsync -piu "{{ etcd_bin_dir }}/etcd" "{{ bin_dir }}/etcd"
register: etcd_copy
changed_when: false
- name: Install | Copy etcdctl binary from downloaddir
command: rsync -piu "{{ etcd_bin_dir }}/etcdctl" "{{ bin_dir }}/etcdctl"
changed_when: false

View file

@ -1,5 +1,6 @@
--- ---
- include: pre_upgrade.yml - include: pre_upgrade.yml
when: etcd_cluster_setup
tags: etcd-pre-upgrade tags: etcd-pre-upgrade
- include: check_certs.yml - include: check_certs.yml
@ -27,19 +28,18 @@
tags: upgrade tags: upgrade
- include: set_cluster_health.yml - include: set_cluster_health.yml
when: is_etcd_master when: is_etcd_master and etcd_cluster_setup
- include: configure.yml - include: configure.yml
when: is_etcd_master when: is_etcd_master and etcd_cluster_setup
- include: refresh_config.yml - include: refresh_config.yml
when: is_etcd_master when: is_etcd_master and etcd_cluster_setup
- name: Restart etcd if binary or certs changed - name: Restart etcd if certs changed
command: /bin/true command: /bin/true
notify: restart etcd notify: restart etcd
when: etcd_deployment_type == "host" and etcd_copy.stdout_lines and is_etcd_master when: is_etcd_master and etcd_secret_changed|default(false)
or etcd_secret_changed|default(false)
# reload-systemd # reload-systemd
- meta: flush_handlers - meta: flush_handlers
@ -49,13 +49,13 @@
name: etcd name: etcd
state: started state: started
enabled: yes enabled: yes
when: is_etcd_master when: is_etcd_master and etcd_cluster_setup
# After etcd cluster is assembled, make sure that # After etcd cluster is assembled, make sure that
# initial state of the cluster is in `existing` # initial state of the cluster is in `existing`
# state insted of `new`. # state insted of `new`.
- include: set_cluster_health.yml - include: set_cluster_health.yml
when: is_etcd_master when: is_etcd_master and etcd_cluster_setup
- include: refresh_config.yml - include: refresh_config.yml
when: is_etcd_master when: is_etcd_master and etcd_cluster_setup

View file

@ -2,4 +2,4 @@
- include: centos-7.yml - include: centos-7.yml
when: ansible_distribution in ["CentOS","RedHat"] and when: ansible_distribution in ["CentOS","RedHat"] and
ansible_distribution_major_version >= 7 ansible_distribution_major_version >= 7 and not is_atomic

View file

@ -52,7 +52,7 @@ spec:
{% endif %} {% endif %}
- --v={{ kube_log_level }} - --v={{ kube_log_level }}
- --allow-privileged=true - --allow-privileged=true
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure"] %} {% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %}
- --cloud-provider={{ cloud_provider }} - --cloud-provider={{ cloud_provider }}
- --cloud-config={{ kube_config_dir }}/cloud_config - --cloud-config={{ kube_config_dir }}/cloud_config
{% elif cloud_provider is defined and cloud_provider == "aws" %} {% elif cloud_provider is defined and cloud_provider == "aws" %}

View file

@ -32,7 +32,7 @@ spec:
- --node-monitor-period={{ kube_controller_node_monitor_period }} - --node-monitor-period={{ kube_controller_node_monitor_period }}
- --pod-eviction-timeout={{ kube_controller_pod_eviction_timeout }} - --pod-eviction-timeout={{ kube_controller_pod_eviction_timeout }}
- --v={{ kube_log_level }} - --v={{ kube_log_level }}
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure"] %} {% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %}
- --cloud-provider={{cloud_provider}} - --cloud-provider={{cloud_provider}}
- --cloud-config={{ kube_config_dir }}/cloud_config - --cloud-config={{ kube_config_dir }}/cloud_config
{% elif cloud_provider is defined and cloud_provider == "aws" %} {% elif cloud_provider is defined and cloud_provider == "aws" %}
@ -54,7 +54,7 @@ spec:
- mountPath: {{ kube_cert_dir }} - mountPath: {{ kube_cert_dir }}
name: ssl-certs-kubernetes name: ssl-certs-kubernetes
readOnly: true readOnly: true
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure"] %} {% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere" ] %}
- mountPath: {{ kube_config_dir }}/cloud_config - mountPath: {{ kube_config_dir }}/cloud_config
name: cloudconfig name: cloudconfig
readOnly: true readOnly: true
@ -63,7 +63,7 @@ spec:
- hostPath: - hostPath:
path: {{ kube_cert_dir }} path: {{ kube_cert_dir }}
name: ssl-certs-kubernetes name: ssl-certs-kubernetes
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure"] %} {% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %}
- hostPath: - hostPath:
path: {{ kube_config_dir }}/cloud_config path: {{ kube_config_dir }}/cloud_config
name: cloudconfig name: cloudconfig

View file

@ -42,7 +42,7 @@ KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kuben
{% endif %} {% endif %}
# Should this cluster be allowed to run privileged docker containers # Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow-privileged=true" KUBE_ALLOW_PRIV="--allow-privileged=true"
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure"] %} {% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %}
KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config" KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config"
{% elif cloud_provider is defined and cloud_provider == "aws" %} {% elif cloud_provider is defined and cloud_provider == "aws" %}
KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }}" KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }}"

View file

@ -50,7 +50,11 @@ spec:
volumes: volumes:
- name: ssl-certs-host - name: ssl-certs-host
hostPath: hostPath:
{% if ansible_os_family == 'RedHat' %}
path: /etc/pki/tls
{% else %}
path: /usr/share/ca-certificates path: /usr/share/ca-certificates
{% endif %}
- name: "kubeconfig" - name: "kubeconfig"
hostPath: hostPath:
path: "{{kube_config_dir}}/node-kubeconfig.yaml" path: "{{kube_config_dir}}/node-kubeconfig.yaml"

View file

@ -29,6 +29,22 @@ openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
openstack_region: "{{ lookup('env','OS_REGION_NAME') }}" openstack_region: "{{ lookup('env','OS_REGION_NAME') }}"
openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')|default(lookup('env','OS_PROJECT_ID'),true) }}" openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID')|default(lookup('env','OS_PROJECT_ID'),true) }}"
# For the vsphere integration, kubelet will need credentials to access
# vsphere apis
# Documentation regarting these values can be found
# https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/providers/vsphere/vsphere.go#L105
vsphere_vcenter_ip: "{{ lookup('env', 'VSPHERE_VCENTER') }}"
vsphere_vcenter_port: "{{ lookup('env', 'VSPHERE_VCENTER_PORT') }}"
vsphere_user: "{{ lookup('env', 'VSPHERE_USER') }}"
vsphere_password: "{{ lookup('env', 'VSPHERE_PASSWORD') }}"
vsphere_datacenter: "{{ lookup('env', 'VSPHERE_DATACENTER') }}"
vsphere_datastore: "{{ lookup('env', 'VSPHERE_DATASTORE') }}"
vsphere_working_dir: "{{ lookup('env', 'VSPHERE_WORKING_DIR') }}"
vsphere_insecure: "{{ lookup('env', 'VSPHERE_INSECURE') }}"
vsphere_scsi_controller_type: pvscsi
# vsphere_public_network is name of the network the VMs are joined to
vsphere_public_network: "{{ lookup('env', 'VSPHERE_PUBLIC_NETWORK')|default('') }}"
# Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content # Container Linux by CoreOS cloud init config file to define /etc/resolv.conf content
# for hostnet pods and infra needs # for hostnet pods and infra needs
resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf resolveconf_cloud_init_conf: /etc/resolveconf_cloud_init.conf

View file

@ -3,3 +3,4 @@ dependencies:
- role: adduser - role: adduser
user: "{{ addusers.kube }}" user: "{{ addusers.kube }}"
tags: kubelet tags: kubelet
when: not is_atomic

View file

@ -17,7 +17,7 @@
line: "{{ loadbalancer_apiserver.address }} {{ apiserver_loadbalancer_domain_name| default('lb-apiserver.kubernetes.local') }}" line: "{{ loadbalancer_apiserver.address }} {{ apiserver_loadbalancer_domain_name| default('lb-apiserver.kubernetes.local') }}"
state: present state: present
backup: yes backup: yes
when: loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined when: loadbalancer_apiserver is defined and loadbalancer_apiserver.address is defined and apiserver_loadbalancer_domain_name is defined
- name: Hosts | localhost ipv4 in hosts file - name: Hosts | localhost ipv4 in hosts file
lineinfile: lineinfile:

View file

@ -64,17 +64,13 @@
- name: check cloud_provider value - name: check cloud_provider value
fail: fail:
msg: "If set the 'cloud_provider' var must be set either to 'generic', 'gce', 'aws', 'azure' or 'openstack'" msg: "If set the 'cloud_provider' var must be set either to 'generic', 'gce', 'aws', 'azure', 'openstack' or 'vsphere'"
when: cloud_provider is defined and cloud_provider not in ['generic', 'gce', 'aws', 'openstack', 'azure'] when: cloud_provider is defined and cloud_provider not in ['generic', 'gce', 'aws', 'azure', 'openstack', 'vsphere']
tags: [cloud-provider, facts] tags: [cloud-provider, facts]
- include: openstack-credential-check.yml - include: "{{ cloud_provider }}-credential-check.yml"
when: cloud_provider is defined and cloud_provider == 'openstack' when: cloud_provider is defined and cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
tags: [cloud-provider, openstack, facts] tags: [cloud-provider, facts]
- include: azure-credential-check.yml
when: cloud_provider is defined and cloud_provider == 'azure'
tags: [cloud-provider, azure, facts]
- name: Create cni directories - name: Create cni directories
file: file:
@ -91,7 +87,7 @@
yum: yum:
update_cache: yes update_cache: yes
name: '*' name: '*'
when: ansible_pkg_mgr == 'yum' when: ansible_pkg_mgr == 'yum' and not is_atomic
tags: bootstrap-os tags: bootstrap-os
- name: Install latest version of python-apt for Debian distribs - name: Install latest version of python-apt for Debian distribs
@ -112,7 +108,7 @@
- name: Install epel-release on RedHat/CentOS - name: Install epel-release on RedHat/CentOS
shell: rpm -qa | grep epel-release || rpm -ivh {{ epel_rpm_download_url }} shell: rpm -qa | grep epel-release || rpm -ivh {{ epel_rpm_download_url }}
when: ansible_distribution in ["CentOS","RedHat"] when: ansible_distribution in ["CentOS","RedHat"] and not is_atomic
changed_when: False changed_when: False
check_mode: no check_mode: no
tags: bootstrap-os tags: bootstrap-os
@ -127,7 +123,7 @@
retries: 4 retries: 4
delay: "{{ retry_stagger | random + 3 }}" delay: "{{ retry_stagger | random + 3 }}"
with_items: "{{required_pkgs | default([]) | union(common_required_pkgs|default([]))}}" with_items: "{{required_pkgs | default([]) | union(common_required_pkgs|default([]))}}"
when: not ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] when: not (ansible_os_family in ["CoreOS", "Container Linux by CoreOS"] or is_atomic)
tags: bootstrap-os tags: bootstrap-os
# Todo : selinux configuration # Todo : selinux configuration
@ -179,23 +175,14 @@
state: present state: present
tags: bootstrap-os tags: bootstrap-os
- name: Write openstack cloud-config - name: Write cloud-config
template: template:
src: openstack-cloud-config.j2 src: "{{ cloud_provider }}-cloud-config.j2"
dest: "{{ kube_config_dir }}/cloud_config" dest: "{{ kube_config_dir }}/cloud_config"
group: "{{ kube_cert_group }}" group: "{{ kube_cert_group }}"
mode: 0640 mode: 0640
when: inventory_hostname in groups['k8s-cluster'] and cloud_provider is defined and cloud_provider == "openstack" when: inventory_hostname in groups['k8s-cluster'] and cloud_provider is defined and cloud_provider in [ 'openstack', 'azure', 'vsphere' ]
tags: [cloud-provider, openstack] tags: [cloud-provider]
- name: Write azure cloud-config
template:
src: azure-cloud-config.j2
dest: "{{ kube_config_dir }}/cloud_config"
group: "{{ kube_cert_group }}"
mode: 0640
when: inventory_hostname in groups['k8s-cluster'] and cloud_provider is defined and cloud_provider == "azure"
tags: [cloud-provider, azure]
- include: etchosts.yml - include: etchosts.yml
tags: [bootstrap-os, etchosts] tags: [bootstrap-os, etchosts]

View file

@ -22,7 +22,7 @@
kube_apiserver_endpoint: |- kube_apiserver_endpoint: |-
{% if not is_kube_master and loadbalancer_apiserver_localhost|default(false) -%} {% if not is_kube_master and loadbalancer_apiserver_localhost|default(false) -%}
https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }} https://localhost:{{ nginx_kube_apiserver_port|default(kube_apiserver_port) }}
{%- elif is_kube_master and loadbalancer_apiserver is not defined -%} {%- elif is_kube_master -%}
http://127.0.0.1:{{ kube_apiserver_insecure_port }} http://127.0.0.1:{{ kube_apiserver_insecure_port }}
{%- else -%} {%- else -%}
{%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%} {%- if loadbalancer_apiserver is defined and loadbalancer_apiserver.port is defined -%}
@ -83,5 +83,17 @@
- set_fact: - set_fact:
peer_with_calico_rr: "{{ 'calico-rr' in groups and groups['calico-rr']|length > 0 }}" peer_with_calico_rr: "{{ 'calico-rr' in groups and groups['calico-rr']|length > 0 }}"
- name: check if atomic host
stat:
path: /run/ostree-booted
register: ostree
- set_fact:
is_atomic: "{{ ostree.stat.exists }}"
- set_fact:
kube_cert_group: "kube"
when: is_atomic
- include: set_resolv_facts.yml - include: set_resolv_facts.yml
tags: [bootstrap-os, resolvconf, facts] tags: [bootstrap-os, resolvconf, facts]

View file

@ -0,0 +1,21 @@
- name: check vsphere environment variables
fail:
msg: "{{ item.name }} is missing"
when: item.value is not defined or item.value == ''
with_items:
- name: vsphere_vcenter_ip
value: "{{ vsphere_vcenter_ip }}"
- name: vsphere_vcenter_port
value: "{{ vsphere_vcenter_port }}"
- name: vsphere_user
value: "{{ vsphere_user }}"
- name: vsphere_password
value: "{{ vsphere_password }}"
- name: vsphere_datacenter
value: "{{ vsphere_datacenter }}"
- name: vsphere_datastore
value: "{{ vsphere_datastore }}"
- name: vsphere_working_dir
value: "{{ vsphere_working_dir }}"
- name: vsphere_insecure
value: "{{ vsphere_insecure }}"

View file

@ -0,0 +1,20 @@
[Global]
datacenter = {{ vsphere_datacenter }}
datastore = {{ vsphere_datastore }}
insecure-flag = {{ vsphere_insecure }}
password = {{ vsphere_password }}
port = {{ vsphere_vcenter_port }}
server = {{ vsphere_vcenter_ip }}
user = {{ vsphere_user }}
working-dir = {{ vsphere_working_dir }}
{% if vsphere_vm_uuid is defined %}
vm-uuid = {{ vsphere_vm_uuid }}
{% endif %}
[Disk]
scsicontrollertype = {{ vsphere_scsi_controller_type }}
{% if vsphere_public_network is defined and vsphere_public_network != "" %}
[Network]
public-network = {{ vsphere_public_network }}
{% endif %}

View file

@ -1,16 +1,11 @@
--- ---
- name: "Check_certs | check if the certs have already been generated on first master" - name: "Check_certs | check if the certs have already been generated on first master"
stat: find:
path: "{{ kube_cert_dir }}/{{ item }}" paths: "{{ kube_cert_dir }}"
patterns: "*.pem"
delegate_to: "{{groups['kube-master'][0]}}" delegate_to: "{{groups['kube-master'][0]}}"
register: kubecert_master register: kubecert_master
run_once: true run_once: true
with_items: >-
['ca.pem',
{% for host in groups['k8s-cluster'] %}
'node-{{ host }}-key.pem'
{% if not loop.last %}{{','}}{% endif %}
{% endfor %}]
- name: "Check_certs | Set default value for 'sync_certs', 'gen_certs', and 'secret_changed' to false" - name: "Check_certs | Set default value for 'sync_certs', 'gen_certs', and 'secret_changed' to false"
set_fact: set_fact:
@ -18,33 +13,53 @@
gen_certs: false gen_certs: false
secret_changed: false secret_changed: false
- name: "Check_certs | Set 'gen_certs' to true" - name: "Check certs | check if a cert already exists on node"
set_fact:
gen_certs: true
when: "not {{ item.stat.exists }}"
run_once: true
with_items: "{{ kubecert_master.results }}"
- name: "Check certs | check if a cert already exists"
stat: stat:
path: "{{ kube_cert_dir }}/{{ item }}" path: "{{ kube_cert_dir }}/{{ item }}"
register: kubecert register: kubecert_node
with_items: with_items:
- ca.pem - ca.pem
- node-{{ inventory_hostname }}-key.pem - node-{{ inventory_hostname }}-key.pem
- name: "Check_certs | Set 'gen_certs' to true"
set_fact:
gen_certs: true
when: "not item in kubecert_master.files|map(attribute='path') | list"
run_once: true
with_items: >-
['{{ kube_cert_dir }}/ca.pem',
{% for host in groups['k8s-cluster'] %}
'{{ kube_cert_dir }}/node-{{ host }}-key.pem'
{% if not loop.last %}{{','}}{% endif %}
{% endfor %}]
- name: "Check_certs | Set 'gen_node_certs' to true"
set_fact:
gen_node_certs: |-
{
{% set existing_certs = kubecert_master.files|map(attribute='path')|list|sort %}
{% for host in groups['k8s-cluster'] -%}
{% set host_cert = "%s/node-%s-key.pem"|format(kube_cert_dir, host) %}
{% if host_cert in existing_certs -%}
"{{ host }}": False,
{% else -%}
"{{ host }}": True,
{% endif -%}
{% endfor %}
}
run_once: true
- name: "Check_certs | Set 'sync_certs' to true" - name: "Check_certs | Set 'sync_certs' to true"
set_fact: set_fact:
sync_certs: true sync_certs: true
when: >- when: >-
{%- set certs = {'sync': False} -%} {%- set certs = {'sync': False} -%}
{%- for host in groups['k8s-cluster'] %} {% if gen_node_certs[inventory_hostname] or
{% if host == inventory_hostname %} (not kubecert_node.results[0].stat.exists|default(False)) or
{% if (not kubecert.results[0].stat.exists|default(False)) or (not kubecert_node.results[1].stat.exists|default(False)) or
(not kubecert.results[1].stat.exists|default(False)) or (kubecert_node.results[1].stat.checksum|default('') != kubecert_master.files|selectattr("path", "equalto", kubecert_node.results[1].stat.path)|first|map(attribute="checksum")|default('')) -%}
(kubecert.results[1].stat.checksum|default('') != kubecert_master.results[loop.index].stat.checksum|default('')) -%}
{%- set _ = certs.update({'sync': True}) -%} {%- set _ = certs.update({'sync': True}) -%}
{% endif %} {% endif %}
{% endif %}
{%- endfor -%}
{{ certs.sync }} {{ certs.sync }}

View file

@ -40,12 +40,12 @@
command: "{{ kube_script_dir }}/make-ssl.sh -f {{ kube_config_dir }}/openssl.conf -d {{ kube_cert_dir }}" command: "{{ kube_script_dir }}/make-ssl.sh -f {{ kube_config_dir }}/openssl.conf -d {{ kube_cert_dir }}"
environment: environment:
- MASTERS: "{% for m in groups['kube-master'] %} - MASTERS: "{% for m in groups['kube-master'] %}
{% if hostvars[m].sync_certs|default(true) %} {% if gen_node_certs[m]|default(false) %}
{{ m }} {{ m }}
{% endif %} {% endif %}
{% endfor %}" {% endfor %}"
- HOSTS: "{% for h in groups['k8s-cluster'] %} - HOSTS: "{% for h in groups['k8s-cluster'] %}
{% if hostvars[h].sync_certs|default(true) %} {% if gen_node_certs[h]|default(true) %}
{{ h }} {{ h }}
{% endif %} {% endif %}
{% endfor %}" {% endfor %}"

View file

@ -71,8 +71,7 @@
delegate_to: "{{groups['kube-master'][0]}}" delegate_to: "{{groups['kube-master'][0]}}"
when: gen_tokens|default(false) when: gen_tokens|default(false)
- include: gen_certs_script.yml - include: "gen_certs_{{ cert_management }}.yml"
when: cert_management == "script"
tags: k8s-secrets tags: k8s-secrets
- include: sync_kube_master_certs.yml - include: sync_kube_master_certs.yml
@ -83,9 +82,5 @@
when: cert_management == "vault" and inventory_hostname in groups['k8s-cluster'] when: cert_management == "vault" and inventory_hostname in groups['k8s-cluster']
tags: k8s-secrets tags: k8s-secrets
- include: gen_certs_vault.yml
when: cert_management == "vault"
tags: k8s-secrets
- include: gen_tokens.yml - include: gen_tokens.yml
tags: k8s-secrets tags: k8s-secrets

View file

@ -12,8 +12,8 @@
## Bootstrap ## Bootstrap
- include: bootstrap/main.yml - include: bootstrap/main.yml
when: vault_bootstrap | d() when: cert_management == 'vault' and vault_bootstrap | d()
## Cluster ## Cluster
- include: cluster/main.yml - include: cluster/main.yml
when: not vault_bootstrap | d() when: cert_management == 'vault' and not vault_bootstrap | d()

View file

@ -39,17 +39,17 @@
- { role: kargo-defaults, when: "cert_management == 'vault'" } - { role: kargo-defaults, when: "cert_management == 'vault'" }
- { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" } - { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" }
- hosts: etcd:!k8s-cluster - hosts: etcd
any_errors_fatal: true any_errors_fatal: true
roles: roles:
- { role: kargo-defaults} - { role: kargo-defaults}
- { role: etcd, tags: etcd } - { role: etcd, tags: etcd, etcd_cluster_setup: true }
- hosts: k8s-cluster - hosts: k8s-cluster
any_errors_fatal: true any_errors_fatal: true
roles: roles:
- { role: kargo-defaults} - { role: kargo-defaults}
- { role: etcd, tags: etcd } - { role: etcd, tags: etcd, etcd_cluster_setup: false }
- hosts: etcd:k8s-cluster:vault - hosts: etcd:k8s-cluster:vault
any_errors_fatal: true any_errors_fatal: true