Merge pull request #591 from kubernetes-incubator/etcdtls

Add etcd tls support
This commit is contained in:
Matthew Mosesohn 2016-11-10 12:32:13 +03:00 committed by GitHub
commit cf5e8dc7d4
37 changed files with 457 additions and 409 deletions

View file

@ -5,10 +5,6 @@ The following components require a highly available endpoints:
* etcd cluster,
* kube-apiserver service instances.
The former provides the
[etcd-proxy](https://coreos.com/etcd/docs/latest/proxy.html) service to access
the cluster members in HA fashion.
The latter relies on a 3rd side reverse proxies, like Nginx or HAProxy, to
achieve the same goal.
@ -57,7 +53,7 @@ type. The following diagram shows how traffic to the apiserver is directed.
A user may opt to use an external loadbalancer (LB) instead. An external LB
provides access for external clients, while the internal LB accepts client
connections only to the localhost, similarly to the etcd-proxy HA endpoints.
connections only to the localhost.
Given a frontend `VIP` address and `IP1, IP2` addresses of backends, here is
an example configuration for a HAProxy service acting as an external LB:
```

View file

@ -62,7 +62,7 @@ ndots: 5
# Enable multiaccess to configure clients to access all of the etcd members directly
# as the "http://hostX:port, http://hostY:port, ..." and ignore the proxy loadbalancers.
# This may be the case if clients support and loadbalance multiple etcd servers natively.
etcd_multiaccess: false
etcd_multiaccess: true
# Assume there are no internal loadbalancers for apiservers exist and listen on
# kube_apiserver_port (default 443)

View file

@ -10,7 +10,7 @@ kube_version: v1.4.3
etcd_version: v3.0.6
#TODO(mattymo): Move calico versions to roles/network_plugins/calico/defaults
# after migration to container download
calico_version: v0.22.0
calico_version: v1.0.0-beta
calico_cni_version: v1.4.2
weave_version: v1.6.1
flannel_version: v0.6.2
@ -39,7 +39,8 @@ flannel_server_helper_image_tag: "{{ flannel_server_helper_version }}"
flannel_image_repo: "quay.io/coreos/flannel"
flannel_image_tag: "{{ flannel_version }}"
calicoctl_image_repo: "calico/ctl"
calicoctl_image_tag: "{{ calico_version }}"
# TODO(mattymo): v1.0.0-beta has different syntax. Needs work to upgrade
calicoctl_image_tag: "v0.22.0"
calico_node_image_repo: "calico/node"
calico_node_image_tag: "{{ calico_version }}"
calico_cni_image_repo: "calico/cni"

View file

@ -1,2 +1,8 @@
---
etcd_bin_dir: "{{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64/"
etcd_config_dir: /etc/ssl/etcd
etcd_cert_dir: "{{ etcd_config_dir }}/ssl"
etcd_cert_group: root
etcd_script_dir: "{{ bin_dir }}/etcd-scripts"

View file

@ -0,0 +1,80 @@
#!/bin/bash
# Author: Smana smainklh@gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o pipefail
usage()
{
cat << EOF
Create self signed certificates
Usage : $(basename $0) -f <config> [-d <ssldir>]
-h | --help : Show this message
-f | --config : Openssl configuration file
-d | --ssldir : Directory where the certificates will be installed
ex :
$(basename $0) -f openssl.conf -d /srv/ssl
EOF
}
# Options parsing
while (($#)); do
case "$1" in
-h | --help) usage; exit 0;;
-f | --config) CONFIG=${2}; shift 2;;
-d | --ssldir) SSLDIR="${2}"; shift 2;;
*)
usage
echo "ERROR : Unknown option"
exit 3
;;
esac
done
if [ -z ${CONFIG} ]; then
echo "ERROR: the openssl configuration file is missing. option -f"
exit 1
fi
if [ -z ${SSLDIR} ]; then
SSLDIR="/etc/ssl/etcd"
fi
tmpdir=$(mktemp -d /tmp/etcd_cacert.XXXXXX)
trap 'rm -rf "${tmpdir}"' EXIT
cd "${tmpdir}"
mkdir -p "${SSLDIR}"
# Root CA
openssl genrsa -out ca-key.pem 2048 > /dev/null 2>&1
openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=etcd-ca" > /dev/null 2>&1
# ETCD member
openssl genrsa -out member-key.pem 2048 > /dev/null 2>&1
openssl req -new -key member-key.pem -out member.csr -subj "/CN=etcd-member" -config ${CONFIG} > /dev/null 2>&1
openssl x509 -req -in member.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out member.pem -days 365 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
# Nodes and Admin
for i in node admin; do
openssl genrsa -out ${i}-key.pem 2048 > /dev/null 2>&1
openssl req -new -key ${i}-key.pem -out ${i}.csr -subj "/CN=kube-${i}" > /dev/null 2>&1
openssl x509 -req -in ${i}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${i}.pem -days 365 -extensions ssl_client -extfile ${CONFIG} > /dev/null 2>&1
done
# Install certs
mv *.pem ${SSLDIR}/

View file

@ -6,21 +6,14 @@
- reload etcd
- wait for etcd up
- name: restart etcd-proxy
command: /bin/true
notify:
- etcd | reload systemd
- reload etcd-proxy
- wait for etcd up
- name: etcd | reload systemd
command: systemctl daemon-reload
when: ansible_service_mgr == "systemd"
- name: wait for etcd up
uri: url="http://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health"
uri: url="https://{% if is_etcd_master %}{{ etcd_address }}{% else %}127.0.0.1{% endif %}:2379/health" validate_certs=no
register: result
until: result.status == 200
until: result.status is defined and result.status == 200
retries: 10
delay: 5
@ -30,8 +23,7 @@
state: restarted
when: is_etcd_master
- name: reload etcd-proxy
service:
name: etcd-proxy
state: restarted
when: is_etcd_proxy
- name: set etcd_secret_changed
set_fact:
etcd_secret_changed: true

View file

@ -0,0 +1,36 @@
---
- name: "Check_certs | check if the certs have already been generated on first master"
stat:
path: "{{ etcd_cert_dir }}/ca.pem"
delegate_to: "{{groups['etcd'][0]}}"
register: etcdcert_master
run_once: true
- name: "Check_certs | Set default value for 'sync_certs' and 'gen_certs' to false"
set_fact:
sync_certs: false
gen_certs: false
- name: "Check_certs | Set 'sync_certs' and 'gen_certs' to true"
set_fact:
gen_certs: true
when: not etcdcert_master.stat.exists
run_once: true
- name: "Check certs | check if a cert already exists"
stat:
path: "{{ etcd_cert_dir }}/ca.pem"
register: etcdcert
- name: "Check_certs | Set 'sync_certs' to true"
set_fact:
sync_certs: true
when: >-
{%- set certs = {'sync': False} -%}
{%- for server in play_hosts
if (not hostvars[server].etcdcert.stat.exists|default(False)) or
(hostvars[server].etcdcert.stat.checksum|default('') != etcdcert_master.stat.checksum|default('')) -%}
{%- set _ = certs.update({'sync': True}) -%}
{%- endfor -%}
{{ certs.sync }}
run_once: true

View file

@ -26,19 +26,3 @@
mode: 0755
when: ansible_service_mgr in ["sysvinit","upstart"] and ansible_os_family == "Debian" and is_etcd_master
notify: restart etcd
- name: Configure | Copy etcd-proxy.service systemd file
template:
src: "etcd-proxy-{{ etcd_deployment_type }}.service.j2"
dest: /etc/systemd/system/etcd-proxy.service
backup: yes
when: ansible_service_mgr == "systemd" and is_etcd_proxy
notify: restart etcd-proxy
- name: Configure | Write etcd-proxy initd script
template:
src: "deb-etcd-proxy-{{ etcd_deployment_type }}.initd.j2"
dest: /etc/init.d/etcd-proxy
owner: root
mode: 0755
when: ansible_service_mgr in ["sysvinit","upstart"] and ansible_os_family == "Debian" and is_etcd_proxy
notify: restart etcd-proxy

View file

@ -0,0 +1,111 @@
---
- name: Gen_certs | create etcd script dir
file:
path: "{{ etcd_script_dir }}"
state: directory
owner: root
when: inventory_hostname == groups['etcd'][0]
- name: Gen_certs | create etcd cert dir
file:
path={{ etcd_cert_dir }}
group={{ etcd_cert_group }}
state=directory
owner=root
recurse=yes
- name: Gen_certs | write openssl config
template:
src: "openssl.conf.j2"
dest: "{{ etcd_config_dir }}/openssl.conf"
run_once: yes
delegate_to: "{{groups['etcd'][0]}}"
when: gen_certs|default(false)
- name: Gen_certs | copy certs generation script
copy:
src: "make-ssl-etcd.sh"
dest: "{{ etcd_script_dir }}/make-ssl-etcd.sh"
mode: 0700
run_once: yes
delegate_to: "{{groups['etcd'][0]}}"
when: gen_certs|default(false)
- name: Gen_certs | run cert generation script
command: "{{ etcd_script_dir }}/make-ssl-etcd.sh -f {{ etcd_config_dir }}/openssl.conf -d {{ etcd_cert_dir }}"
run_once: yes
delegate_to: "{{groups['etcd'][0]}}"
when: gen_certs|default(false)
notify: set etcd_secret_changed
- set_fact:
master_certs: ['ca-key.pem', 'admin.pem', 'admin-key.pem', 'member.pem', 'member-key.pem']
node_certs: ['ca.pem', 'node.pem', 'node-key.pem']
- name: Gen_certs | Gather etcd master certs
shell: "tar cfz - -C {{ etcd_cert_dir }} {{ master_certs|join(' ') }} {{ node_certs|join(' ') }}| base64 --wrap=0"
register: etcd_master_cert_data
delegate_to: "{{groups['etcd'][0]}}"
run_once: true
when: sync_certs|default(false)
notify: set etcd_secret_changed
- name: Gen_certs | Gather etcd node certs
shell: "tar cfz - -C {{ etcd_cert_dir }} {{ node_certs|join(' ') }} | base64 --wrap=0"
register: etcd_node_cert_data
delegate_to: "{{groups['etcd'][0]}}"
run_once: true
when: sync_certs|default(false)
notify: set etcd_secret_changed
- name: Gen_certs | Copy certs on masters
shell: "echo '{{etcd_master_cert_data.stdout|quote}}' | base64 -d | tar xz -C {{ etcd_cert_dir }}"
changed_when: false
when: inventory_hostname in groups['etcd'] and sync_certs|default(false) and
inventory_hostname != groups['etcd'][0]
- name: Gen_certs | Copy certs on nodes
shell: "echo '{{etcd_node_cert_data.stdout|quote}}' | base64 -d | tar xz -C {{ etcd_cert_dir }}"
changed_when: false
when: inventory_hostname in groups['k8s-cluster'] and sync_certs|default(false) and
inventory_hostname not in groups['etcd']
- name: Gen_certs | check certificate permissions
file:
path={{ etcd_cert_dir }}
group={{ etcd_cert_group }}
state=directory
owner=kube
recurse=yes
- name: Gen_certs | set permissions on keys
shell: chmod 0600 {{ etcd_cert_dir}}/*key.pem
when: inventory_hostname in groups['etcd']
changed_when: false
- name: Gen_certs | target ca-certificates directory
set_fact:
ca_cert_dir: |-
{% if ansible_os_family == "Debian" -%}
/usr/local/share/ca-certificates
{%- elif ansible_os_family == "RedHat" -%}
/etc/pki/ca-trust/source/anchors
{%- elif ansible_os_family == "CoreOS" -%}
/etc/ssl/certs
{%- endif %}
- name: Gen_certs | add CA to trusted CA dir
copy:
src: "{{ etcd_cert_dir }}/ca.pem"
dest: "{{ ca_cert_dir }}/etcd-ca.crt"
remote_src: true
register: etcd_ca_cert
- name: Gen_certs | update ca-certificates (Debian/Ubuntu/CoreOS)
command: update-ca-certificates
when: etcd_ca_cert.changed and ansible_os_family in ["Debian", "CoreOS"]
- name: Gen_certs | update ca-certificatesa (RedHat)
command: update-ca-trust extract
when: etcd_ca_cert.changed and ansible_os_family == "RedHat"

View file

@ -1,8 +1,15 @@
---
- include: pre_upgrade.yml
- include: check_certs.yml
- include: gen_certs.yml
- include: install.yml
when: is_etcd_master
- include: set_cluster_health.yml
when: is_etcd_master
- include: configure.yml
when: is_etcd_master
- include: refresh_config.yml
when: is_etcd_master
- name: Ensure etcd is running
service:
@ -11,23 +18,11 @@
enabled: yes
when: is_etcd_master
- name: Ensure etcd-proxy is running
service:
name: etcd-proxy
state: started
enabled: yes
when: is_etcd_proxy
- name: Restart etcd if binary changed
command: /bin/true
notify: restart etcd
when: etcd_deployment_type == "host" and etcd_copy.stdout_lines and is_etcd_master
- name: Restart etcd-proxy if binary changed
command: /bin/true
notify: restart etcd-proxy
when: etcd_deployment_type == "host" and etcd_copy.stdout_lines and is_etcd_proxy
# Reload systemd before starting service
- meta: flush_handlers
@ -35,4 +30,6 @@
# initial state of the cluster is in `existing`
# state insted of `new`.
- include: set_cluster_health.yml
when: is_etcd_master
- include: refresh_config.yml
when: is_etcd_master

View file

@ -0,0 +1,34 @@
- name: "Pre-upgrade | check for etcd-proxy unit file"
stat:
path: /etc/systemd/system/etcd-proxy.service
register: kube_apiserver_service_file
- name: "Pre-upgrade | check for etcd-proxy init script"
stat:
path: /etc/init.d/etcd-proxy
register: kube_apiserver_init_script
- name: "Pre-upgrade | stop etcd-proxy if service defined"
service:
name: etcd-proxy
state: stopped
when: (kube_apiserver_service_file.stat.exists|default(False) or kube_apiserver_init_script.stat.exists|default(False))
- name: "Pre-upgrade | remove etcd-proxy service definition"
file:
path: "{{ item }}"
state: absent
when: (kube_apiserver_service_file.stat.exists|default(False) or kube_apiserver_init_script.stat.exists|default(False))
with_items:
- /etc/systemd/system/etcd-proxy.service
- /etc/init.d/etcd-proxy
- name: "Pre-upgrade | find etcd-proxy container"
command: docker ps -aq --filter "name=etcd-proxy*"
register: etcd_proxy_container
ignore_errors: true
- name: "Pre-upgrade | remove etcd-proxy if it exists"
command: "docker rm -f {{item}}"
with_items: "{{etcd_proxy_container.stdout_lines}}"

View file

@ -5,10 +5,3 @@
dest: /etc/etcd.env
notify: restart etcd
when: is_etcd_master
- name: Refresh config | Create etcd-proxy config file
template:
src: etcd-proxy.j2
dest: /etc/etcd-proxy.env
notify: restart etcd-proxy
when: is_etcd_proxy

View file

@ -19,8 +19,9 @@ DAEMON={{ docker_bin_dir | default("/usr/bin") }}/docker
DAEMON_EXEC=`basename $DAEMON`
DAEMON_ARGS="run --restart=always --env-file=/etc/etcd.env \
--net=host \
-v /usr/share/ca-certificates/:/etc/ssl/certs:ro \
-v /etc/ssl/certs:/etc/ssl/certs:ro \
-v /var/lib/etcd:/var/lib/etcd:rw \
-v {{ etcd_cert_dir }}:{{ etcd_cert_dir }}:ro \
--name={{ etcd_member_name | default("etcd") }} \
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
{% if etcd_after_v3 %}

View file

@ -1,120 +0,0 @@
#!/bin/sh
set -a
### BEGIN INIT INFO
# Provides: etcd-proxy
# Required-Start: $local_fs $network $syslog
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: etcd-proxy
# Description:
# etcd-proxy is a proxy for etcd: distributed, consistent key-value store for shared configuration and service discovery
### END INIT INFO
PATH=/sbin:/usr/sbin:/bin/:/usr/bin
DESC="etcd-proxy"
NAME=etcd-proxy
DAEMON={{ docker_bin_dir | default("/usr/bin") }}/docker
DAEMON_EXEC=`basename $DAEMON`
DAEMON_ARGS="run --restart=always --env-file=/etc/etcd-proxy.env \
--net=host \
--stop-signal=SIGKILL \
-v /usr/share/ca-certificates/:/etc/ssl/certs:ro \
--name={{ etcd_proxy_member_name | default("etcd-proxy") }} \
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
{% if etcd_after_v3 %}
{{ etcd_container_bin_dir }}etcd
{% endif %}"
SCRIPTNAME=/etc/init.d/$NAME
DAEMON_USER=root
STOP_SCHEDULE="${STOP_SCHEDULE:-QUIT/5/TERM/5/KILL/5}"
PID=/var/run/etcd-proxy.pid
# Exit if the binary is not present
[ -x "$DAEMON" ] || exit 0
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
# and status_of_proc is working.
. /lib/lsb/init-functions
do_status()
{
status_of_proc -p $PID "$DAEMON" "$NAME" && exit 0 || exit $?
}
# Function that starts the daemon/service
#
do_start()
{
{{ docker_bin_dir | default("/usr/bin") }}/docker rm -f {{ etcd_proxy_member_name | default("etcd-proxy") }} &>/dev/null || true
sleep 1
start-stop-daemon --background --start --quiet --make-pidfile --pidfile $PID --user $DAEMON_USER --exec $DAEMON -- \
$DAEMON_ARGS \
|| return 2
}
#
# Function that stops the daemon/service
#
do_stop()
{
start-stop-daemon --stop --quiet --retry=$STOP_SCHEDULE --pidfile $PID --name $DAEMON_EXEC
RETVAL="$?"
sleep 1
return "$RETVAL"
}
case "$1" in
start)
log_daemon_msg "Starting $DESC" "$NAME"
do_start
case "$?" in
0|1) log_end_msg 0 || exit 0 ;;
2) log_end_msg 1 || exit 1 ;;
esac
;;
stop)
log_daemon_msg "Stopping $DESC" "$NAME"
if do_stop; then
log_end_msg 0
else
log_failure_msg "Can't stop etcd-proxy"
log_end_msg 1
fi
;;
status)
if do_status; then
log_end_msg 0
else
log_failure_msg "etcd-proxy is not running"
log_end_msg 1
fi
;;
restart|force-reload)
log_daemon_msg "Restarting $DESC" "$NAME"
if do_stop; then
if do_start; then
log_end_msg 0
exit 0
else
rc="$?"
fi
else
rc="$?"
fi
log_failure_msg "Can't restart etcd-proxy"
log_end_msg ${rc}
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
exit 3
;;
esac

View file

@ -1,110 +0,0 @@
#!/bin/sh
set -a
### BEGIN INIT INFO
# Provides: etcd-proxy
# Required-Start: $local_fs $network $syslog
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: etcd-proxy
# Description:
# etcd-proxy is a proxy for etcd: distributed, consistent key-value store for shared configuration and service discovery
### END INIT INFO
PATH=/sbin:/usr/sbin:/bin:/usr/bin
DESC="etcd-proxy"
NAME=etcd-proxy
DAEMON={{ bin_dir }}/etcd
DAEMON_ARGS=""
SCRIPTNAME=/etc/init.d/$NAME
DAEMON_USER=etcd
STOP_SCHEDULE="${STOP_SCHEDULE:-QUIT/5/TERM/5/KILL/5}"
PID=/var/run/etcd-proxy.pid
# Exit if the binary is not present
[ -x "$DAEMON" ] || exit 0
# Read configuration variable file if it is present
[ -f /etc/etcd-proxy.env ] && . /etc/etcd-proxy.env
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
# and status_of_proc is working.
. /lib/lsb/init-functions
do_status()
{
status_of_proc -p $PID "$DAEMON" "$NAME" && exit 0 || exit $?
}
# Function that starts the daemon/service
#
do_start()
{
start-stop-daemon --background --start --quiet --make-pidfile --pidfile $PID --user $DAEMON_USER --exec $DAEMON -- \
$DAEMON_ARGS \
|| return 2
}
#
# Function that stops the daemon/service
#
do_stop()
{
start-stop-daemon --stop --quiet --retry=$STOP_SCHEDULE --pidfile $PID --name $NAME
RETVAL="$?"
sleep 1
return "$RETVAL"
}
case "$1" in
start)
log_daemon_msg "Starting $DESC" "$NAME"
do_start
case "$?" in
0|1) log_end_msg 0 || exit 0 ;;
2) log_end_msg 1 || exit 1 ;;
esac
;;
stop)
log_daemon_msg "Stopping $DESC" "$NAME"
if do_stop; then
log_end_msg 0
else
log_failure_msg "Can't stop etcd-proxy"
log_end_msg 1
fi
;;
status)
if do_status; then
log_end_msg 0
else
log_failure_msg "etcd-proxy is not running"
log_end_msg 1
fi
;;
restart|force-reload)
log_daemon_msg "Restarting $DESC" "$NAME"
if do_stop; then
if do_start; then
log_end_msg 0
exit 0
else
rc="$?"
fi
else
rc="$?"
fi
log_failure_msg "Can't restart etcd-proxy"
log_end_msg ${rc}
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
exit 3
;;
esac

View file

@ -11,7 +11,8 @@ ExecStart={{ docker_bin_dir | default("/usr/bin") }}/docker run --restart=always
{# TODO(mattymo): Allow docker IP binding and disable in envfile
-p 2380:2380 -p 2379:2379 #}
--net=host \
-v /usr/share/ca-certificates/:/etc/ssl/certs:ro \
-v /etc/ssl/certs:/etc/ssl/certs:ro \
-v {{ etcd_cert_dir }}:{{ etcd_cert_dir }}:ro \
-v /var/lib/etcd:/var/lib/etcd:rw \
--name={{ etcd_member_name | default("etcd") }} \
{{ etcd_image_repo }}:{{ etcd_image_tag }} \

View file

@ -1,28 +0,0 @@
[Unit]
Description=etcd-proxy docker wrapper
Wants=docker.socket
After=docker.service
[Service]
User=root
PermissionsStartOnly=true
ExecStart={{ docker_bin_dir | default("/usr/bin") }}/docker run --restart=always \
--env-file=/etc/etcd-proxy.env \
{# TODO(mattymo): Allow docker IP binding and disable in envfile
-p 2380:2380 -p 2379:2379 #}
--net=host \
--stop-signal=SIGKILL \
-v /usr/share/ca-certificates/:/etc/ssl/certs:ro \
--name={{ etcd_proxy_member_name | default("etcd-proxy") }} \
{{ etcd_image_repo }}:{{ etcd_image_tag }} \
{% if etcd_after_v3 %}
{{ etcd_container_bin_dir }}etcd
{% endif %}
ExecStartPre=-{{ docker_bin_dir | default("/usr/bin") }}/docker rm -f {{ etcd_proxy_member_name | default("etcd-proxy") }}
ExecReload={{ docker_bin_dir | default("/usr/bin") }}/docker restart {{ etcd_proxy_member_name | default("etcd-proxy") }}
ExecStop={{ docker_bin_dir | default("/usr/bin") }}/docker stop {{ etcd_proxy_member_name | default("etcd-proxy") }}
Restart=always
RestartSec=15s
[Install]
WantedBy=multi-user.target

View file

@ -1,19 +0,0 @@
[Unit]
Description=etcd-proxy
After=network.target
[Service]
Type=notify
User=etcd
PermissionsStartOnly=true
EnvironmentFile=/etc/etcd-proxy.env
ExecStart={{ bin_dir }}/etcd
ExecStartPre=/bin/mkdir -p /var/lib/etcd-proxy
ExecStartPre=/bin/chown -R etcd: /var/lib/etcd-proxy
NotifyAccess=all
Restart=always
RestartSec=10s
LimitNOFILE=40000
[Install]
WantedBy=multi-user.target

View file

@ -1,5 +0,0 @@
ETCD_DATA_DIR=/var/lib/etcd-proxy
ETCD_PROXY=on
ETCD_LISTEN_CLIENT_URLS={{ etcd_access_endpoint }}
ETCD_NAME={{ etcd_proxy_member_name | default("etcd-proxy") }}
ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }}

View file

@ -3,14 +3,19 @@ ETCD_ADVERTISE_CLIENT_URLS={{ etcd_client_url }}
ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_peer_url }}
ETCD_INITIAL_CLUSTER_STATE={% if etcd_cluster_is_healthy.rc != 0 | bool %}new{% else %}existing{% endif %}
{% if not is_etcd_proxy %}
ETCD_LISTEN_CLIENT_URLS=http://{{ etcd_address }}:2379,http://127.0.0.1:2379
{% else %}
ETCD_LISTEN_CLIENT_URLS=http://{{ etcd_address }}:2379
{% endif %}
ETCD_LISTEN_CLIENT_URLS=https://{{ etcd_address }}:2379,https://127.0.0.1:2379
ETCD_ELECTION_TIMEOUT=10000
ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd
ETCD_LISTEN_PEER_URLS=http://{{ etcd_address }}:2380
ETCD_LISTEN_PEER_URLS=https://{{ etcd_address }}:2380
ETCD_NAME={{ etcd_member_name }}
ETCD_PROXY=off
ETCD_INITIAL_CLUSTER={{ etcd_peer_addresses }}
# TLS settings
ETCD_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem
ETCD_CERT_FILE={{ etcd_cert_dir }}/node.pem
ETCD_KEY_FILE={{ etcd_cert_dir }}/node-key.pem
ETCD_PEER_TRUSTED_CA_FILE={{ etcd_cert_dir }}/ca.pem
ETCD_PEER_CERT_FILE={{ etcd_cert_dir }}/member.pem
ETCD_PEER_KEY_FILE={{ etcd_cert_dir }}/member-key.pem
ETCD_PEER_CLIENT_CERT_AUTH=true

View file

@ -0,0 +1,39 @@
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[ ssl_client ]
extendedKeyUsage = clientAuth, serverAuth
basicConstraints = CA:FALSE
subjectKeyIdentifier=hash
authorityKeyIdentifier=keyid,issuer
subjectAltName = @alt_names
[ v3_ca ]
basicConstraints = CA:TRUE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
authorityKeyIdentifier=keyid:always,issuer
[alt_names]
DNS.1 = localhost
{% for host in groups['etcd'] %}
DNS.{{ 1 + loop.index }} = {{ host }}
{% endfor %}
{% if loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined %}
{% set idx = groups['etcd'] | length | int + 1 %}
DNS.{{ idx | string }} = {{ apiserver_loadbalancer_domain_name }}
{% endif %}
{% for host in groups['etcd'] %}
IP.{{ 2 * loop.index - 1 }} = {{ hostvars[host]['access_ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
IP.{{ 2 * loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }}
{% endfor %}
{% set idx = groups['etcd'] | length | int * 2 + 1 %}
IP.{{ idx }} = 127.0.0.1

View file

@ -26,7 +26,13 @@ spec:
image: calico/kube-policy-controller:latest
env:
- name: ETCD_ENDPOINTS
value: "{{ etcd_endpoint }}"
value: "{{ etcd_access_endpoint }}"
- name: ETCD_CA_CERT_FILE
value: "{{ etcd_cert_dir }}/ca.pem"
- name: ETCD_CERT_FILE
value: "{{ etcd_cert_dir }}/node.pem"
- name: ETCD_KEY_FILE
value: "{{ etcd_cert_dir }}/node-key.pem"
# Location of the Kubernetes API - this shouldn't need to be
# changed so long as it is used in conjunction with
# CONFIGURE_ETC_HOSTS="true".
@ -38,3 +44,12 @@ spec:
# This removes the need for KubeDNS to resolve the Service.
- name: CONFIGURE_ETC_HOSTS
value: "true"
volumeMounts:
- mountPath: {{ etcd_cert_dir }}
name: etcd-certs
readOnly: true
volumes:
- hostPath:
path: {{ etcd_cert_dir }}
name: etcd-certs

View file

@ -28,3 +28,9 @@ kube_apiserver_insecure_bind_address: 127.0.0.1
# Logging directory (sysvinit systems)
kube_log_dir: "/var/log/kubernetes"
# ETCD cert dir for connecting apiserver to etcd
etcd_config_dir: /etc/ssl/etcd
etcd_cert_dir: "{{ etcd_config_dir }}/ssl"

View file

@ -14,12 +14,3 @@
name: kube-apiserver
state: stopped
when: (kube_apiserver_service_file.stat.exists|default(False) or kube_apiserver_init_script.stat.exists|default(False))
- name: "Pre-upgrade | remove kube-apiserver service definition"
file:
path: "{{ item }}"
state: absent
when: (kube_apiserver_service_file.stat.exists|default(False) or kube_apiserver_init_script.stat.exists|default(False))
with_items:
- /etc/systemd/system/kube-apiserver.service
- /etc/init.d/kube-apiserver

View file

@ -14,6 +14,9 @@ spec:
- --advertise-address={{ ip | default(ansible_default_ipv4.address) }}
- --etcd-servers={{ etcd_access_endpoint }}
- --etcd-quorum-read=true
- --etcd-cafile={{ etcd_cert_dir }}/ca.pem
- --etcd-certfile={{ etcd_cert_dir }}/node.pem
- --etcd-keyfile={{ etcd_cert_dir }}/node-key.pem
- --insecure-bind-address={{ kube_apiserver_insecure_bind_address }}
- --apiserver-count={{ kube_apiserver_count }}
- --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota
@ -50,6 +53,9 @@ spec:
- mountPath: /etc/ssl/certs
name: ssl-certs-host
readOnly: true
- mountPath: {{ etcd_cert_dir }}
name: etcd-certs
readOnly: true
- mountPath: /var/log/
name: logfile
volumes:
@ -59,6 +65,9 @@ spec:
- hostPath:
path: /etc/ssl/certs/
name: ssl-certs-host
- hostPath:
path: {{ etcd_cert_dir }}
name: etcd-certs
- hostPath:
path: /var/log/
name: logfile

View file

@ -1,6 +1,10 @@
{
"name": "calico-k8s-network",
"type": "calico",
"etcd_endpoints": "{{ etcd_access_endpoint }}",
"etcd_cert_file": "{{ etcd_cert_dir }}/node.pem",
"etcd_key_file": "{{ etcd_cert_dir }}/node-key.pem",
"etcd_ca_cert_file": "{{ etcd_cert_dir }}/ca.pem",
"log_level": "info",
"ipam": {
"type": "calico-ipam"

View file

@ -45,3 +45,6 @@ openstack_username: "{{ lookup('env','OS_USERNAME') }}"
openstack_password: "{{ lookup('env','OS_PASSWORD') }}"
openstack_region: "{{ lookup('env','OS_REGION_NAME') }}"
openstack_tenant_id: "{{ lookup('env','OS_TENANT_ID') }}"
# All clients access each node individually, instead of using a load balancer.
etcd_multiaccess: true

View file

@ -23,14 +23,14 @@
- set_fact: etcd_address="{{ ip | default(ansible_default_ipv4['address']) }}"
- set_fact: etcd_access_address="{{ access_ip | default(etcd_address) }}"
- set_fact: etcd_peer_url="http://{{ etcd_access_address }}:2380"
- set_fact: etcd_client_url="http://{{ etcd_access_address }}:2379"
- set_fact: etcd_peer_url="https://{{ etcd_access_address }}:2380"
- set_fact: etcd_client_url="https://{{ etcd_access_address }}:2379"
- set_fact: etcd_authority="127.0.0.1:2379"
- set_fact: etcd_endpoint="http://{{ etcd_authority }}"
- set_fact: etcd_endpoint="https://{{ etcd_authority }}"
- set_fact:
etcd_access_addresses: |-
{% for item in groups['etcd'] -%}
http://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2379{% if not loop.last %},{% endif %}
https://{{ item }}:2379{% if not loop.last %},{% endif %}
{%- endfor %}
- set_fact: etcd_access_endpoint="{% if etcd_multiaccess %}{{ etcd_access_addresses }}{% else %}{{ etcd_endpoint }}{% endif %}"
- set_fact:
@ -41,15 +41,8 @@
- set_fact:
etcd_peer_addresses: |-
{% for item in groups['etcd'] -%}
{{ "etcd"+loop.index|string }}=http://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2380{% if not loop.last %},{% endif %}
{{ "etcd"+loop.index|string }}=https://{{ hostvars[item].access_ip | default(hostvars[item].ip | default(hostvars[item].ansible_default_ipv4['address'])) }}:2380{% if not loop.last %},{% endif %}
{%- endfor %}
- set_fact:
etcd_proxy_member_name: |-
{% for host in groups['k8s-cluster'] %}
{% if inventory_hostname == host %}{{"etcd-proxy"+loop.index|string }}{% endif %}
{% endfor %}
- set_fact:
is_etcd_proxy: "{{ inventory_hostname in groups['k8s-cluster'] }}"
- set_fact:
is_etcd_master: "{{ inventory_hostname in groups['etcd'] }}"
- set_fact:

View file

@ -8,3 +8,6 @@ ipip: false
# Set to true if you want your calico cni binaries to overwrite the
# ones from hyperkube while leaving other cni plugins intact.
overwrite_hyperkube_cni: true
calico_cert_dir: /etc/calico/certs
etcd_cert_dir: /etc/ssl/etcd/ssl

View file

@ -12,6 +12,24 @@
- meta: flush_handlers
- name: Calico | Create calico certs directory
file:
dest: "{{ calico_cert_dir }}"
state: directory
mode: 0750
owner: root
group: root
- name: Calico | Link etcd certificates for calico-node
file:
src: "{{ etcd_cert_dir }}/{{ item.s }}"
dest: "{{ calico_cert_dir }}/{{ item.d }}"
state: hard
with_items:
- {s: "ca.pem", d: "ca_cert.crt"}
- {s: "node.pem", d: "cert.crt"}
- {s: "node-key.pem", d: "key.pem"}
- name: Calico | Install calicoctl container script
template:
src: calicoctl-container.j2
@ -41,19 +59,23 @@
when: "{{ overwrite_hyperkube_cni|bool }}"
- name: Calico | wait for etcd
uri: url=http://localhost:2379/health
uri: url=https://localhost:2379/health validate_certs=no
register: result
until: result.status == 200
until: result.status == 200 or result.status == 401
retries: 10
delay: 5
when: inventory_hostname in groups['kube-master']
delegate_to: "{{groups['etcd'][0]}}"
run_once: true
- name: Calico | Check if calico network pool has already been configured
uri:
url: "{{ etcd_endpoint }}/v2/keys/calico/v1/ipam/v4/pool"
return_content: yes
status_code: 200,404
command: |-
curl \
--cacert {{ etcd_cert_dir }}/ca.pem \
--cert {{ etcd_cert_dir}}/admin.pem \
--key {{ etcd_cert_dir }}/admin-key.pem \
https://localhost:2379/v2/keys/calico/v1/ipam/v4/pool
register: calico_conf
delegate_to: "{{groups['etcd'][0]}}"
run_once: true
- name: Calico | Define ipip pool argument
@ -79,21 +101,29 @@
environment:
NO_DEFAULT_POOLS: true
run_once: true
when: calico_conf.status == 404 or "nodes" not in calico_conf.content
when: '"Key not found" in calico_conf.stdout or "nodes" not in calico_conf.stdout'
- name: Calico | Get calico configuration from etcd
uri:
url: "{{ etcd_endpoint }}/v2/keys/calico/v1/ipam/v4/pool"
return_content: yes
register: calico_pools
command: |-
curl \
--cacert {{ etcd_cert_dir }}/ca.pem \
--cert {{ etcd_cert_dir}}/admin.pem \
--key {{ etcd_cert_dir }}/admin-key.pem \
https://localhost:2379/v2/keys/calico/v1/ipam/v4/pool
register: calico_pools_raw
delegate_to: "{{groups['etcd'][0]}}"
run_once: true
- set_fact:
calico_pools: "{{ calico_pools_raw.stdout | from_json }}"
run_once: true
- name: Calico | Check if calico pool is properly configured
fail:
msg: 'Only one network pool must be configured and it must be the subnet {{ kube_pods_subnet }}.
Please erase calico configuration and run the playbook again ("etcdctl rm --recursive /calico/v1/ipam/v4/pool")'
when: ( calico_pools.json['node']['nodes'] | length > 1 ) or
( not calico_pools.json['node']['nodes'][0]['key'] | search(".*{{ kube_pods_subnet | ipaddr('network') }}.*") )
when: ( calico_pools['node']['nodes'] | length > 1 ) or
( not calico_pools['node']['nodes'][0]['key'] | search(".*{{ kube_pods_subnet | ipaddr('network') }}.*") )
run_once: true
- name: Calico | Write /etc/network-environment
@ -131,4 +161,3 @@
shell: "{{ bin_dir }}/calicoctl node bgp peer add {{ item.router_id }} as {{ item.as }}"
with_items: peers
when: peer_with_router|default(false) and inventory_hostname in groups['kube-node']

View file

@ -1,8 +1,8 @@
[Unit]
Description=Calico per-node agent
Documentation=https://github.com/projectcalico/calico-docker
After=docker.service docker.socket etcd-proxy.service
Wants=docker.socket etcd-proxy.service
After=docker.service docker.socket
Wants=docker.socket
[Service]
User=root

View file

@ -1,8 +1,13 @@
#!/bin/bash
/usr/bin/docker run --privileged --rm \
--net=host --pid=host -e ETCD_AUTHORITY={{ etcd_authority }} \
--net=host --pid=host \
-e ETCD_ENDPOINTS={{ etcd_access_endpoint }} \
-e ETCD_CA_CERT_FILE=/etc/calico/certs/ca_cert.crt \
-e ETCD_CERT_FILE=/etc/calico/certs/cert.crt \
-e ETCD_KEY_FILE=/etc/calico/certs/key.pem \
-v /usr/bin/docker:/usr/bin/docker \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /var/run/calico:/var/run/calico \
-v /etc/calico/certs:/etc/calico/certs:ro \
{{ calicoctl_image_repo }}:{{ calicoctl_image_tag}} \
$@

View file

@ -3,7 +3,10 @@
DEFAULT_IPV4={{ip | default(ansible_default_ipv4.address) }}
# The Kubernetes master IP
KUBERNETES_MASTER={{ first_kube_master }}
KUBERNETES_MASTER={{ kube_apiserver_endpoint }}
# IP and port of etcd instance used by Calico
ETCD_AUTHORITY={{ etcd_authority }}
ETCD_ENDPOINTS={{ etcd_access_endpoint }}
ETCD_CA_CERT_FILE=/etc/calico/certs/ca_cert.crt
ETCD_CERT_FILE=/etc/calico/certs/cert.crt
ETCD_KEY_FILE=/etc/calico/certs/key.pem

View file

@ -1,9 +1,11 @@
---
- name: Flannel | Write flannel configuration
template:
src: network.json
dest: /etc/flannel-network.json
backup: yes
- name: Flannel | Set Flannel etcd configuration
command: |-
{{ bin_dir }}/etcdctl --peers={{ etcd_access_addresses }} \
set /{{ cluster_name }}/network/config \
'{ "Network": "{{ kube_pods_subnet }}", "SubnetLen": {{ kube_network_node_prefix }}, "Backend": { "Type": "{{ flannel_backend_type }}" } }'
delegate_to: "{{groups['etcd'][0]}}"
run_once: true
- name: Flannel | Create flannel pod manifest
template:

View file

@ -12,26 +12,16 @@
- name: "subnetenv"
hostPath:
path: "/run/flannel"
- name: "networkconfig"
- name: "etcd-certs"
hostPath:
path: "/etc/flannel-network.json"
path: "{{ etcd_cert_dir }}"
containers:
- name: "flannel-server-helper"
image: "{{ flannel_server_helper_image_repo }}:{{ flannel_server_helper_image_tag }}"
args:
- "--network-config=/etc/flannel-network.json"
- "--etcd-prefix=/{{ cluster_name }}/network"
- "--etcd-server={{ etcd_endpoint }}"
volumeMounts:
- name: "networkconfig"
mountPath: "/etc/flannel-network.json"
imagePullPolicy: "Always"
- name: "flannel-container"
image: "{{ flannel_image_repo }}:{{ flannel_image_tag }}"
command:
- "/bin/sh"
- "-c"
- "/opt/bin/flanneld -etcd-endpoints {{ etcd_access_endpoint }} -etcd-prefix /{{ cluster_name }}/network {% if flannel_interface is defined %}-iface {{ flannel_interface }}{% endif %} {% if flannel_public_ip is defined %}-public-ip {{ flannel_public_ip }}{% endif %}"
- "/opt/bin/flanneld -etcd-endpoints {{ etcd_access_endpoint }} -etcd-prefix /{{ cluster_name }}/network -etcd-cafile {{ etcd_cert_dir }}/ca.pem -etcd-certfile {{ etcd_cert_dir }}/node.pem -etcd-keyfile {{ etcd_cert_dir }}/node-key.pem {% if flannel_interface is defined %}-iface {{ flannel_interface }}{% endif %} {% if flannel_public_ip is defined %}-public-ip {{ flannel_public_ip }}{% endif %}"
ports:
- hostPort: 10253
containerPort: 10253
@ -41,6 +31,8 @@
volumeMounts:
- name: "subnetenv"
mountPath: "/run/flannel"
- name: "etcd-certs"
mountPath: "{{ etcd_cert_dir }}"
securityContext:
privileged: true
hostNetwork: true

View file

@ -1 +0,0 @@
{ "Network": "{{ kube_pods_subnet }}", "SubnetLen": {{ kube_network_node_prefix }}, "Backend": { "Type": "{{ flannel_backend_type }}" } }

View file

@ -5,7 +5,7 @@ local_release_dir: /tmp
kube_version: v1.4.3
etcd_version: v3.0.6
calico_version: v0.22.0
calico_version: v0.23.0
calico_cni_version: v1.4.2
weave_version: v1.6.1