From 3014dfef24effa7f137aab1238dd4b0ef0b50f22 Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Sun, 6 Dec 2015 20:39:03 +0100 Subject: [PATCH 01/34] Clustering etcd for ha masters --- roles/etcd/templates/etcd2.j2 | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/roles/etcd/templates/etcd2.j2 b/roles/etcd/templates/etcd2.j2 index 27143e458..cb3305287 100644 --- a/roles/etcd/templates/etcd2.j2 +++ b/roles/etcd/templates/etcd2.j2 @@ -1,17 +1,16 @@ # etcd2.0 +{% set etcd = {} %} +{% for srv in groups['kube-master'] %} +{% if inventory_hostname == srv %} +{% set _dummy = etcd.update({'name':"master"+loop.index|string}) %} +{% endif %} +{% endfor %} [Service] -{% if inventory_hostname in groups['kube-master'] %} -Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{ ansible_default_ipv4.address }}:2379,http://{{ ansible_default_ipv4.address }}:4001" -Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{ ansible_default_ipv4.address }}:2380" -Environment="ETCD_INITIAL_CLUSTER=master=http://{{ ansible_default_ipv4.address }}:2380" +Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2379" +Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2380" +Environment="ETCD_INITIAL_CLUSTER={% for srv in groups['kube-master'] %}master{{ loop.index|string }}=http://{{ srv }}:2380{% if not loop.last %},{% endif %}{% endfor %}" Environment="ETCD_INITIAL_CLUSTER_STATE=new" Environment="ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd" -Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001" -Environment="ETCD_LISTEN_PEER_URLS=http://:2380,http://{{ ansible_default_ipv4.address }}:7001" -Environment="ETCD_NAME=master" -{% else %} -Environment="ETCD_ADVERTISE_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001" -Environment="ETCD_INITIAL_CLUSTER=master=http://{{ groups['kube-master'][0] }}:2380" -Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001" -Environment="ETCD_PROXY=on" -{% endif %} +Environment="ETCD_LISTEN_CLIENT_URLS=http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2379,http://127.0.0.1:2379" +Environment="ETCD_LISTEN_PEER_URLS=http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2380" +Environment="ETCD_NAME={{ etcd.name }}" From d1e19563b0a4f889f4bbc572acfd040d7b2b3d9d Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Fri, 11 Dec 2015 11:32:13 +0100 Subject: [PATCH 02/34] Master and nodes will run the 'node' role, kube-proxy is run under a container, new script for ssl certs --- roles/kubernetes/common/files/make-ca-cert.sh | 115 ------------------ roles/kubernetes/common/meta/main.yml | 3 - roles/kubernetes/common/tasks/gen_certs.yml | 42 ------- roles/kubernetes/common/tasks/main.yml | 29 ----- .../{common => node}/defaults/main.yml | 8 +- .../{common => node}/files/kube-gen-token.sh | 0 roles/kubernetes/node/files/make-ssl.sh | 107 ++++++++++++++++ roles/kubernetes/node/handlers/main.yml | 12 -- roles/kubernetes/node/meta/main.yml | 3 - roles/kubernetes/node/tasks/config.yml | 53 -------- roles/kubernetes/node/tasks/gen_certs.yml | 28 +++++ .../{common => node}/tasks/gen_tokens.yml | 10 +- roles/kubernetes/node/tasks/install.yml | 17 +-- roles/kubernetes/node/tasks/main.yml | 46 ++++++- .../{common => node}/tasks/secrets.yml | 50 +++++--- .../kubernetes/node/tasks/temp_workaround.yml | 7 +- .../{common => node}/templates/config.j2 | 4 +- roles/kubernetes/node/templates/kubelet.j2 | 14 ++- .../node/templates/kubelet.kubeconfig.j2 | 18 --- .../{systemd-init => }/kubelet.service.j2 | 1 + .../manifests/kube-proxy.manifest.j2 | 42 +++++++ .../node/templates/node-kubeconfig.yaml.j2 | 17 +++ .../kubernetes/node/templates/openssl.conf.j2 | 21 ++++ roles/kubernetes/node/templates/proxy.j2 | 6 - .../node/templates/proxy.kubeconfig.j2 | 18 --- .../systemd-init/kube-proxy.service.j2 | 22 ---- 26 files changed, 319 insertions(+), 374 deletions(-) delete mode 100755 roles/kubernetes/common/files/make-ca-cert.sh delete mode 100644 roles/kubernetes/common/meta/main.yml delete mode 100644 roles/kubernetes/common/tasks/gen_certs.yml delete mode 100644 roles/kubernetes/common/tasks/main.yml rename roles/kubernetes/{common => node}/defaults/main.yml (88%) rename roles/kubernetes/{common => node}/files/kube-gen-token.sh (100%) create mode 100644 roles/kubernetes/node/files/make-ssl.sh delete mode 100644 roles/kubernetes/node/meta/main.yml delete mode 100644 roles/kubernetes/node/tasks/config.yml create mode 100644 roles/kubernetes/node/tasks/gen_certs.yml rename roles/kubernetes/{common => node}/tasks/gen_tokens.yml (74%) rename roles/kubernetes/{common => node}/tasks/secrets.yml (54%) rename roles/kubernetes/{common => node}/templates/config.j2 (89%) delete mode 100644 roles/kubernetes/node/templates/kubelet.kubeconfig.j2 rename roles/kubernetes/node/templates/{systemd-init => }/kubelet.service.j2 (95%) create mode 100644 roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 create mode 100644 roles/kubernetes/node/templates/node-kubeconfig.yaml.j2 create mode 100644 roles/kubernetes/node/templates/openssl.conf.j2 delete mode 100644 roles/kubernetes/node/templates/proxy.j2 delete mode 100644 roles/kubernetes/node/templates/proxy.kubeconfig.j2 delete mode 100644 roles/kubernetes/node/templates/systemd-init/kube-proxy.service.j2 diff --git a/roles/kubernetes/common/files/make-ca-cert.sh b/roles/kubernetes/common/files/make-ca-cert.sh deleted file mode 100755 index 3950eec91..000000000 --- a/roles/kubernetes/common/files/make-ca-cert.sh +++ /dev/null @@ -1,115 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -set -o nounset -set -o pipefail - -# Caller should set in the ev: -# MASTER_IP - this may be an ip or things like "_use_gce_external_ip_" -# DNS_DOMAIN - which will be passed to minions in --cluster_domain -# SERVICE_CLUSTER_IP_RANGE - where all service IPs are allocated -# MASTER_NAME - I'm not sure what it is... - -# Also the following will be respected -# CERT_DIR - where to place the finished certs -# CERT_GROUP - who the group owner of the cert files should be - -cert_ip="${MASTER_IP:="${1}"}" -master_name="${MASTER_NAME:="kubernetes"}" -service_range="${SERVICE_CLUSTER_IP_RANGE:="10.0.0.0/16"}" -dns_domain="${DNS_DOMAIN:="cluster.local"}" -cert_dir="${CERT_DIR:-"/srv/kubernetes"}" -cert_group="${CERT_GROUP:="kube-cert"}" - -# The following certificate pairs are created: -# -# - ca (the cluster's certificate authority) -# - server -# - kubelet -# - kubecfg (for kubectl) -# -# TODO(roberthbailey): Replace easyrsa with a simple Go program to generate -# the certs that we need. - -# TODO: Add support for discovery on other providers? -if [ "$cert_ip" == "_use_gce_external_ip_" ]; then - cert_ip=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip) -fi - -if [ "$cert_ip" == "_use_aws_external_ip_" ]; then - cert_ip=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4) -fi - -if [ "$cert_ip" == "_use_azure_dns_name_" ]; then - cert_ip=$(uname -n | awk -F. '{ print $2 }').cloudapp.net -fi - -tmpdir=$(mktemp -d --tmpdir kubernetes_cacert.XXXXXX) -trap 'rm -rf "${tmpdir}"' EXIT -cd "${tmpdir}" - -# TODO: For now, this is a patched tool that makes subject-alt-name work, when -# the fix is upstream move back to the upstream easyrsa. This is cached in GCS -# but is originally taken from: -# https://github.com/brendandburns/easy-rsa/archive/master.tar.gz -# -# To update, do the following: -# curl -o easy-rsa.tar.gz https://github.com/brendandburns/easy-rsa/archive/master.tar.gz -# gsutil cp easy-rsa.tar.gz gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz -# gsutil acl ch -R -g all:R gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz -# -# Due to GCS caching of public objects, it may take time for this to be widely -# distributed. - -# Calculate the first ip address in the service range -octects=($(echo "${service_range}" | sed -e 's|/.*||' -e 's/\./ /g')) -((octects[3]+=1)) -service_ip=$(echo "${octects[*]}" | sed 's/ /./g') - -# Determine appropriete subject alt names -sans="IP:${cert_ip},IP:${service_ip},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${dns_domain},DNS:${master_name}" - -curl -L -O https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz > /dev/null 2>&1 -tar xzf easy-rsa.tar.gz > /dev/null -cd easy-rsa-master/easyrsa3 - -(./easyrsa init-pki > /dev/null 2>&1 - ./easyrsa --batch "--req-cn=${cert_ip}@$(date +%s)" build-ca nopass > /dev/null 2>&1 - ./easyrsa --subject-alt-name="${sans}" build-server-full "${master_name}" nopass > /dev/null 2>&1 - ./easyrsa build-client-full kubelet nopass > /dev/null 2>&1 - ./easyrsa build-client-full kubecfg nopass > /dev/null 2>&1) || { - # If there was an error in the subshell, just die. - # TODO(roberthbailey): add better error handling here - echo "=== Failed to generate certificates: Aborting ===" - exit 2 - } - -mkdir -p "$cert_dir" - -cp -p pki/ca.crt "${cert_dir}/ca.crt" -cp -p "pki/issued/${master_name}.crt" "${cert_dir}/server.crt" > /dev/null 2>&1 -cp -p "pki/private/${master_name}.key" "${cert_dir}/server.key" > /dev/null 2>&1 -cp -p pki/issued/kubecfg.crt "${cert_dir}/kubecfg.crt" -cp -p pki/private/kubecfg.key "${cert_dir}/kubecfg.key" -cp -p pki/issued/kubelet.crt "${cert_dir}/kubelet.crt" -cp -p pki/private/kubelet.key "${cert_dir}/kubelet.key" - -CERTS=("ca.crt" "server.key" "server.crt" "kubelet.key" "kubelet.crt" "kubecfg.key" "kubecfg.crt") -for cert in "${CERTS[@]}"; do - chgrp "${cert_group}" "${cert_dir}/${cert}" - chmod 660 "${cert_dir}/${cert}" -done diff --git a/roles/kubernetes/common/meta/main.yml b/roles/kubernetes/common/meta/main.yml deleted file mode 100644 index 87756afe1..000000000 --- a/roles/kubernetes/common/meta/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -dependencies: - - { role: etcd } diff --git a/roles/kubernetes/common/tasks/gen_certs.yml b/roles/kubernetes/common/tasks/gen_certs.yml deleted file mode 100644 index 74fd4458c..000000000 --- a/roles/kubernetes/common/tasks/gen_certs.yml +++ /dev/null @@ -1,42 +0,0 @@ ---- -#- name: Get create ca cert script from Kubernetes -# get_url: -# url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/generate-cert/make-ca-cert.sh -# dest={{ kube_script_dir }}/make-ca-cert.sh mode=0500 -# force=yes - -- name: certs | install cert generation script - copy: - src=make-ca-cert.sh - dest={{ kube_script_dir }} - mode=0500 - changed_when: false - -# FIXME This only generates a cert for one master... -- name: certs | run cert generation script - command: - "{{ kube_script_dir }}/make-ca-cert.sh {{ inventory_hostname }}" - args: - creates: "{{ kube_cert_dir }}/server.crt" - environment: - MASTER_IP: "{{ hostvars[inventory_hostname]['ip'] | default(hostvars[inventory_hostname]['ansible_default_ipv4']['address']) }}" - MASTER_NAME: "{{ inventory_hostname }}" - DNS_DOMAIN: "{{ dns_domain }}" - SERVICE_CLUSTER_IP_RANGE: "{{ kube_service_addresses }}" - CERT_DIR: "{{ kube_cert_dir }}" - CERT_GROUP: "{{ kube_cert_group }}" - -- name: certs | check certificate permissions - file: - path={{ item }} - group={{ kube_cert_group }} - owner=kube - mode=0440 - with_items: - - "{{ kube_cert_dir }}/ca.crt" - - "{{ kube_cert_dir }}/server.crt" - - "{{ kube_cert_dir }}/server.key" - - "{{ kube_cert_dir }}/kubecfg.crt" - - "{{ kube_cert_dir }}/kubecfg.key" - - "{{ kube_cert_dir }}/kubelet.crt" - - "{{ kube_cert_dir }}/kubelet.key" diff --git a/roles/kubernetes/common/tasks/main.yml b/roles/kubernetes/common/tasks/main.yml deleted file mode 100644 index 76d3bbc80..000000000 --- a/roles/kubernetes/common/tasks/main.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -- name: define alias command for kubectl all - lineinfile: - dest=/etc/bash.bashrc - line="alias kball='{{ bin_dir }}/kubectl --all-namespaces -o wide'" - regexp='^alias kball=.*$' - state=present - insertafter=EOF - create=True - -- name: create kubernetes config directory - file: path={{ kube_config_dir }} state=directory - -- name: create kubernetes script directory - file: path={{ kube_script_dir }} state=directory - -- name: Make sure manifest directory exists - file: path={{ kube_manifest_dir }} state=directory - -- name: write the global config file - template: - src: config.j2 - dest: "{{ kube_config_dir }}/config" - notify: - - restart daemons - -- include: secrets.yml - tags: - - secrets diff --git a/roles/kubernetes/common/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml similarity index 88% rename from roles/kubernetes/common/defaults/main.yml rename to roles/kubernetes/node/defaults/main.yml index 69d619ae0..d48d72b66 100644 --- a/roles/kubernetes/common/defaults/main.yml +++ b/roles/kubernetes/node/defaults/main.yml @@ -12,7 +12,7 @@ kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" kube_config_dir: /etc/kubernetes # This is where all the cert scripts and certs will be located -kube_cert_dir: "{{ kube_config_dir }}/certs" +kube_cert_dir: "{{ kube_config_dir }}/ssl" # This is where all of the bearer tokens will be stored kube_token_dir: "{{ kube_config_dir }}/tokens" @@ -32,12 +32,16 @@ dns_domain: "{{ cluster_name }}" kube_proxy_mode: userspace +hyperkube_image: + name: gcr.io/google_containers/hyperkube + tag: v1.1.2 + # IP address of the DNS server. # Kubernetes will create a pod with several containers, serving as the DNS # server and expose it under this IP address. The IP address must be from # the range specified as kube_service_addresses. This magic will actually # pick the 10th ip address in the kube_service_addresses range and use that. -# dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address') }}" +dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address') }}" # kube_api_runtime_config: # - extensions/v1beta1/daemonsets=true diff --git a/roles/kubernetes/common/files/kube-gen-token.sh b/roles/kubernetes/node/files/kube-gen-token.sh similarity index 100% rename from roles/kubernetes/common/files/kube-gen-token.sh rename to roles/kubernetes/node/files/kube-gen-token.sh diff --git a/roles/kubernetes/node/files/make-ssl.sh b/roles/kubernetes/node/files/make-ssl.sh new file mode 100644 index 000000000..9ab0a49df --- /dev/null +++ b/roles/kubernetes/node/files/make-ssl.sh @@ -0,0 +1,107 @@ +#!/bin/bash + +# Author: skahlouc@skahlouc-laptop +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o pipefail + +usage() +{ + cat << EOF +Create self signed certificates + +Usage : $(basename $0) -f [-c ] [-d ] [-g ] + -h | --help : Show this message + -f | --config : Openssl configuration file + -c | --cloud : Cloud provider (GCE, AWS or AZURE) + -d | --ssldir : Directory where the certificates will be installed + -g | --sslgrp : Group of the certificates + + ex : + $(basename $0) -f openssl.conf -c GCE -d /srv/ssl -g kube +EOF +} + +# Options parsing +while (($#)); do + case "$1" in + -h | --help) usage; exit 0;; + -f | --config) CONFIG=${2}; shift 2;; + -c | --cloud) CLOUD=${2}; shift 2;; + -d | --ssldir) SSLDIR="${2}"; shift 2;; + -g | --group) SSLGRP="${2}"; shift 2;; + *) + usage + echo "ERROR : Unknown option" + exit 3 + ;; + esac +done + +if [ -z ${CONFIG} ]; then + echo "ERROR: the openssl configuration file is missing. option -f" + exit 1 +fi +if [ -z ${SSLDIR} ]; then + SSLDIR="/etc/kubernetes/certs" +fi +if [ -z ${SSLGRP} ]; then + SSLGRP="kube-cert" +fi + +#echo "config=$CONFIG, cloud=$CLOUD, certdir=$SSLDIR, certgroup=$SSLGRP" + +SUPPORTED_CLOUDS="GCE AWS AZURE" + +# TODO: Add support for discovery on other providers? +if [ "${CLOUD}" == "GCE" ]; then + CLOUD_IP=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip) +fi + +if [ "${CLOUD}" == "AWS" ]; then + CLOUD_IP=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4) +fi + +if [ "${CLOUD}" == "AZURE" ]; then + CLOUD_IP=$(uname -n | awk -F. '{ print $2 }').cloudapp.net +fi + +tmpdir=$(mktemp -d --tmpdir kubernetes_cacert.XXXXXX) +trap 'rm -rf "${tmpdir}"' EXIT +cd "${tmpdir}" + +mkdir -p "${SSLDIR}" + +# Root CA +openssl genrsa -out ca-key.pem 2048 > /dev/null 2>&1 +openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=kube-ca" > /dev/null 2>&1 + +# Apiserver +openssl genrsa -out apiserver-key.pem 2048 > /dev/null 2>&1 +openssl req -new -key apiserver-key.pem -out apiserver.csr -subj "/CN=kube-apiserver" -config ${CONFIG} > /dev/null 2>&1 +openssl x509 -req -in apiserver.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out apiserver.pem -days 365 -extensions v3_req -extfile ${CONFIG} > /dev/null 2>&1 + +# Nodes and Admin +for i in node admin; do + openssl genrsa -out ${i}-key.pem 2048 > /dev/null 2>&1 + openssl req -new -key ${i}-key.pem -out ${i}.csr -subj "/CN=kube-${i}" > /dev/null 2>&1 + openssl x509 -req -in ${i}.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out ${i}.pem -days 365 > /dev/null 2>&1 +done + +# Install certs +mv *.pem ${SSLDIR}/ +chgrp ${SSLGRP} ${SSLDIR}/* +chmod 600 ${SSLDIR}/*-key.pem +chown root:root ${SSLDIR}/*-key.pem diff --git a/roles/kubernetes/node/handlers/main.yml b/roles/kubernetes/node/handlers/main.yml index 9abb8ff25..162c4cde1 100644 --- a/roles/kubernetes/node/handlers/main.yml +++ b/roles/kubernetes/node/handlers/main.yml @@ -4,7 +4,6 @@ notify: - reload systemd - restart reloaded-kubelet - - restart reloaded-proxy - name: reload systemd command: systemctl daemon-reload @@ -19,14 +18,3 @@ service: name: kubelet state: restarted - -- name: restart proxy - command: /bin/true - notify: - - reload systemd - - restart reloaded-proxy - -- name: restart reloaded-proxy - service: - name: kube-proxy - state: restarted diff --git a/roles/kubernetes/node/meta/main.yml b/roles/kubernetes/node/meta/main.yml deleted file mode 100644 index 31675692c..000000000 --- a/roles/kubernetes/node/meta/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -dependencies: - - { role: kubernetes/common } diff --git a/roles/kubernetes/node/tasks/config.yml b/roles/kubernetes/node/tasks/config.yml deleted file mode 100644 index c1d5f29b2..000000000 --- a/roles/kubernetes/node/tasks/config.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -- name: Get the node token values - slurp: - src: "{{ kube_token_dir }}/{{ item }}-{{ inventory_hostname }}.token" - with_items: - - "system:kubelet" - - "system:proxy" - register: tokens - run_once: true - delegate_to: "{{ groups['kube-master'][0] }}" - -- name: Set token facts - set_fact: - kubelet_token: "{{ tokens.results[0].content|b64decode }}" - proxy_token: "{{ tokens.results[1].content|b64decode }}" - -- name: Create kubelet environment vars dir - file: path=/etc/systemd/system/kubelet.service.d state=directory - -- name: Write kubelet config file - template: src=kubelet.j2 dest=/etc/systemd/system/kubelet.service.d/10-kubelet.conf backup=yes - notify: - - restart kubelet - -- name: write the kubecfg (auth) file for kubelet - template: src=kubelet.kubeconfig.j2 dest={{ kube_config_dir }}/kubelet.kubeconfig backup=yes - notify: - - restart kubelet - -- name: Create proxy environment vars dir - file: path=/etc/systemd/system/kube-proxy.service.d state=directory - -- name: Write proxy config file - template: src=proxy.j2 dest=/etc/systemd/system/kube-proxy.service.d/10-proxy-cluster.conf backup=yes - notify: - - restart proxy - -- name: write the kubecfg (auth) file for kube-proxy - template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig backup=yes - notify: - - restart proxy - -- name: Enable kubelet - service: - name: kubelet - enabled: yes - state: started - -- name: Enable proxy - service: - name: kube-proxy - enabled: yes - state: started diff --git a/roles/kubernetes/node/tasks/gen_certs.yml b/roles/kubernetes/node/tasks/gen_certs.yml new file mode 100644 index 000000000..a4f70ce54 --- /dev/null +++ b/roles/kubernetes/node/tasks/gen_certs.yml @@ -0,0 +1,28 @@ +--- +- name: certs | install cert generation script + copy: + src=make-ssl.sh + dest={{ kube_script_dir }} + mode=0500 + changed_when: false + +- name: certs | write openssl config + template: + src: "openssl.conf.j2" + dest: "{{ kube_config_dir }}/.openssl.conf" + +- name: certs | run cert generation script + shell: > + {{ kube_script_dir }}/make-ssl.sh + -f {{ kube_config_dir }}/.openssl.conf + -g {{ kube_cert_group }} + -d {{ kube_cert_dir }} + args: + creates: "{{ kube_cert_dir }}/apiserver.pem" + +- name: certs | check certificate permissions + file: + path={{ kube_cert_dir }} + group={{ kube_cert_group }} + owner=kube + recurse=yes diff --git a/roles/kubernetes/common/tasks/gen_tokens.yml b/roles/kubernetes/node/tasks/gen_tokens.yml similarity index 74% rename from roles/kubernetes/common/tasks/gen_tokens.yml rename to roles/kubernetes/node/tasks/gen_tokens.yml index cf77d4399..f2e5625f9 100644 --- a/roles/kubernetes/common/tasks/gen_tokens.yml +++ b/roles/kubernetes/node/tasks/gen_tokens.yml @@ -10,21 +10,17 @@ environment: TOKEN_DIR: "{{ kube_token_dir }}" with_nested: - - [ "system:controller_manager", "system:scheduler", "system:kubectl", 'system:proxy' ] - - "{{ groups['kube-master'][0] }}" + - [ "system:kubectl" ] + - "{{ groups['kube-master'] }}" register: gentoken changed_when: "'Added' in gentoken.stdout" - notify: - - restart daemons - name: tokens | generate tokens for node components command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}" environment: TOKEN_DIR: "{{ kube_token_dir }}" with_nested: - - [ 'system:kubelet', 'system:proxy' ] + - [ 'system:kubelet' ] - "{{ groups['kube-node'] }}" register: gentoken changed_when: "'Added' in gentoken.stdout" - notify: - - restart daemons diff --git a/roles/kubernetes/node/tasks/install.yml b/roles/kubernetes/node/tasks/install.yml index 0772393ff..e1f45460a 100644 --- a/roles/kubernetes/node/tasks/install.yml +++ b/roles/kubernetes/node/tasks/install.yml @@ -1,20 +1,13 @@ --- -- name: Write kube-proxy systemd init file - template: src=systemd-init/kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service backup=yes - notify: restart daemons - - name: Write kubelet systemd init file - template: src=systemd-init/kubelet.service.j2 dest=/etc/systemd/system/kubelet.service backup=yes - notify: restart daemons + template: src=kubelet.service.j2 dest=/etc/systemd/system/kubelet.service backup=yes + notify: restart kubelet -- name: Install kubernetes binaries +- name: Install kubelet binary copy: - src={{ local_release_dir }}/kubernetes/bin/{{ item }} + src={{ local_release_dir }}/kubernetes/bin/kubelet dest={{ bin_dir }} owner=kube mode=u+x - with_items: - - kube-proxy - - kubelet notify: - - restart daemons + - restart kubelet diff --git a/roles/kubernetes/node/tasks/main.yml b/roles/kubernetes/node/tasks/main.yml index e0efbaf73..7b5e29da9 100644 --- a/roles/kubernetes/node/tasks/main.yml +++ b/roles/kubernetes/node/tasks/main.yml @@ -1,4 +1,48 @@ --- +- name: create kubernetes config directory + file: path={{ kube_config_dir }} state=directory + +- name: create kubernetes script directory + file: path={{ kube_script_dir }} state=directory + +- name: Make sure manifest directory exists + file: path={{ kube_manifest_dir }} state=directory + +- include: secrets.yml + tags: + - secrets + - include: install.yml -- include: config.yml + +- name: write the global config file + template: + src: config.j2 + dest: "{{ kube_config_dir }}/config" + notify: + - restart kubelet + +- name: Create kubelet environment vars dir + file: path=/etc/systemd/system/kubelet.service.d state=directory + +- name: Write kubelet config file + template: src=kubelet.j2 dest=/etc/systemd/system/kubelet.service.d/10-kubelet.conf backup=yes + notify: + - restart kubelet + +- name: write the kubecfg (auth) file for kubelet + template: src=node-kubeconfig.yaml.j2 dest={{ kube_config_dir }}/node-kubeconfig.yaml backup=yes + notify: + - restart kubelet + +- name: Write proxy manifest + template: + src: manifests/kube-proxy.manifest.j2 + dest: "{{ kube_manifest_dir }}/kube-proxy.manifest" + +- name: Enable kubelet + service: + name: kubelet + enabled: yes + state: started + - include: temp_workaround.yml diff --git a/roles/kubernetes/common/tasks/secrets.yml b/roles/kubernetes/node/tasks/secrets.yml similarity index 54% rename from roles/kubernetes/common/tasks/secrets.yml rename to roles/kubernetes/node/tasks/secrets.yml index c61e17d9b..1fdb99f98 100644 --- a/roles/kubernetes/common/tasks/secrets.yml +++ b/roles/kubernetes/node/tasks/secrets.yml @@ -29,26 +29,36 @@ run_once: true when: inventory_hostname == groups['kube-master'][0] -- name: Read back the CA certificate - slurp: - src: "{{ kube_cert_dir }}/ca.crt" - register: ca_cert - run_once: true - delegate_to: "{{ groups['kube-master'][0] }}" - -- name: certs | register the CA certificate as a fact for later use - set_fact: - kube_ca_cert: "{{ ca_cert.content|b64decode }}" - -- name: certs | write CA certificate everywhere - copy: content="{{ kube_ca_cert }}" dest="{{ kube_cert_dir }}/ca.crt" - notify: - - restart daemons - -- debug: msg="{{groups['kube-master'][0]}} == {{inventory_hostname}}" - tags: - - debug - - include: gen_tokens.yml run_once: true when: inventory_hostname == groups['kube-master'][0] + +# Sync certs between nodes +- user: + name: '{{ansible_user_id}}' + generate_ssh_key: yes + delegate_to: "{{ groups['kube-master'][0] }}" + run_once: yes + +- name: 'get ssh keypair' + slurp: path=~/.ssh/id_rsa.pub + register: public_key + delegate_to: "{{ groups['kube-master'][0] }}" + +- name: 'setup keypair on nodes' + authorized_key: + user: '{{ansible_user_id}}' + key: "{{public_key.content|b64decode }}" + +- name: synchronize certificates for nodes + synchronize: + src: "{{ item }}" + dest: "{{ kube_cert_dir }}" + recursive: yes + delete: yes + rsync_opts: [ '--one-file-system'] + with_items: + - "{{ kube_cert_dir}}/ca.pem" + - "{{ kube_cert_dir}}/node.pem" + - "{{ kube_cert_dir}}/node-key.pem" + delegate_to: "{{ groups['kube-master'][0] }}" diff --git a/roles/kubernetes/node/tasks/temp_workaround.yml b/roles/kubernetes/node/tasks/temp_workaround.yml index 8dcefe5e8..a6ef09f4d 100644 --- a/roles/kubernetes/node/tasks/temp_workaround.yml +++ b/roles/kubernetes/node/tasks/temp_workaround.yml @@ -1,5 +1,2 @@ -- name: Warning Temporary workaround !!! Disable kubelet and kube-proxy on node startup - service: name={{ item }} enabled=no - with_items: - - kubelet - - kube-proxy +- name: Warning Temporary workaround !!! Disable kubelet on node startup + service: name=kubelet enabled=no diff --git a/roles/kubernetes/common/templates/config.j2 b/roles/kubernetes/node/templates/config.j2 similarity index 89% rename from roles/kubernetes/common/templates/config.j2 rename to roles/kubernetes/node/templates/config.j2 index 526160a7b..f68dffb3d 100644 --- a/roles/kubernetes/common/templates/config.j2 +++ b/roles/kubernetes/node/templates/config.j2 @@ -17,10 +17,10 @@ KUBE_LOGTOSTDERR="--logtostderr=true" # journal message level, 0 is debug -KUBE_LOG_LEVEL="--v=5" +KUBE_LOG_LEVEL="{{ kube_log_level | default('--v=2') }}" # Should this cluster be allowed to run privileged docker containers KUBE_ALLOW_PRIV="--allow_privileged=true" # How the replication controller, scheduler, and proxy -KUBE_MASTER="--master=https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}" +KUBE_MASTER="--master=https://{{ groups['kube-master'][0] }}:{{ kube_apiserver_port }}" diff --git a/roles/kubernetes/node/templates/kubelet.j2 b/roles/kubernetes/node/templates/kubelet.j2 index 0a516b5cc..b062a055a 100644 --- a/roles/kubernetes/node/templates/kubelet.j2 +++ b/roles/kubernetes/node/templates/kubelet.j2 @@ -1,18 +1,24 @@ [Service] Environment="KUBE_LOGTOSTDERR=--logtostderr=true" -Environment="KUBE_LOG_LEVEL=--v=0" +Environment="KUBE_LOG_LEVEL={{ kube_log_level | default('--v=2') }}" Environment="KUBE_ALLOW_PRIV=--allow_privileged=true" -Environment="KUBE_MASTER=--master=https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}" +{% if inventory_hostname in groups['kube-master'] %} +Environment="KUBELET_API_SERVER=--api_servers=http://{{ hostvars[inventory_hostname]['ip'] | default(hostvars[inventory_hostname]['ansible_default_ipv4']['address']) }}:{{ kube_apiserver_insecure_port }}" +{% else %} +Environment="KUBELET_API_SERVER=--api_servers=https://{{ groups['kube-master'][0]}}:{{ kube_apiserver_port }}" +{% endif %} # The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces) Environment="KUBELET_ADDRESS=--address=0.0.0.0" # The port for the info server to serve on # Environment="KUBELET_PORT=--port=10250" # You may leave this blank to use the actual hostname Environment="KUBELET_HOSTNAME=--hostname_override={{ inventory_hostname }}" +{% if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] %} +Environment="KUBELET_REGISTER_NODE=--register-node=false" +{% endif %} # location of the api-server -Environment="KUBELET_API_SERVER=--api_servers=https://{{ groups['kube-master'][0]}}:{{ kube_master_port }}" {% if dns_setup %} -Environment="KUBELET_ARGS=--cluster_dns={{ dns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }}" +Environment="KUBELET_ARGS=--cluster_dns={{ dns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --config={{ kube_manifest_dir }}" {% else %} Environment="KUBELET_ARGS=--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }}" {% endif %} diff --git a/roles/kubernetes/node/templates/kubelet.kubeconfig.j2 b/roles/kubernetes/node/templates/kubelet.kubeconfig.j2 deleted file mode 100644 index 28eda1e03..000000000 --- a/roles/kubernetes/node/templates/kubelet.kubeconfig.j2 +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Config -current-context: kubelet-to-{{ cluster_name }} -preferences: {} -clusters: -- cluster: - certificate-authority: {{ kube_cert_dir }}/ca.crt - server: https://{{ groups['kube-master'][0] }}:{{kube_master_port}} - name: {{ cluster_name }} -contexts: -- context: - cluster: {{ cluster_name }} - user: kubelet - name: kubelet-to-{{ cluster_name }} -users: -- name: kubelet - user: - token: {{ kubelet_token }} diff --git a/roles/kubernetes/node/templates/systemd-init/kubelet.service.j2 b/roles/kubernetes/node/templates/kubelet.service.j2 similarity index 95% rename from roles/kubernetes/node/templates/systemd-init/kubelet.service.j2 rename to roles/kubernetes/node/templates/kubelet.service.j2 index 338b4b23c..c09ff795d 100644 --- a/roles/kubernetes/node/templates/systemd-init/kubelet.service.j2 +++ b/roles/kubernetes/node/templates/kubelet.service.j2 @@ -19,6 +19,7 @@ ExecStart={{ bin_dir }}/kubelet \ $KUBELET_HOSTNAME \ $KUBE_ALLOW_PRIV \ $KUBELET_ARGS \ + $KUBELET_REGISTER_NODE \ $KUBELET_NETWORK_PLUGIN Restart=on-failure diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 new file mode 100644 index 000000000..5d8aef5c0 --- /dev/null +++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kube-proxy + namespace: kube-system +spec: + hostNetwork: true + containers: + - name: kube-proxy + image: {{ hyperkube_image.name }}:{{ hyperkube_image.tag }} + command: + - /hyperkube + - proxy + - --v={{ kube_log_level | default('2') }} +{% if inventory_hostname in groups['kube-master'] %} + - --master=http://127.0.0.1:8080 +{% else %} + - --master=https://{{ groups['kube-master'][0] }}:{{kube_apiserver_port }} + - --kubeconfig=/etc/kubernetes/node-kubeconfig.yaml +{% endif %} + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/ssl/certs + name: ssl-certs-host + readOnly: true + - mountPath: /etc/kubernetes/node-kubeconfig.yaml + name: "kubeconfig" + readOnly: true + - mountPath: /etc/kubernetes/ssl + name: "etc-kube-ssl" + readOnly: true + volumes: + - name: ssl-certs-host + hostPath: + path: /usr/share/ca-certificates + - name: "kubeconfig" + hostPath: + path: "/etc/kubernetes/node-kubeconfig.yaml" + - name: "etc-kube-ssl" + hostPath: + path: "/etc/kubernetes/ssl" diff --git a/roles/kubernetes/node/templates/node-kubeconfig.yaml.j2 b/roles/kubernetes/node/templates/node-kubeconfig.yaml.j2 new file mode 100644 index 000000000..d21b8eef3 --- /dev/null +++ b/roles/kubernetes/node/templates/node-kubeconfig.yaml.j2 @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Config +clusters: +- name: local + cluster: + certificate-authority: {{ kube_cert_dir }}/ca.pem +users: +- name: kubelet + user: + client-certificate: {{ kube_cert_dir }}/node.pem + client-key: {{ kube_cert_dir }}/node-key.pem +contexts: +- context: + cluster: local + user: kubelet + name: kubelet-{{ cluster_name }} +current-context: kubelet-{{ cluster_name }} diff --git a/roles/kubernetes/node/templates/openssl.conf.j2 b/roles/kubernetes/node/templates/openssl.conf.j2 new file mode 100644 index 000000000..05015651f --- /dev/null +++ b/roles/kubernetes/node/templates/openssl.conf.j2 @@ -0,0 +1,21 @@ +[req] +req_extensions = v3_req +distinguished_name = req_distinguished_name +[req_distinguished_name] +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +subjectAltName = @alt_names +[alt_names] +DNS.1 = kubernetes +DNS.2 = kubernetes.default +DNS.3 = kubernetes.default.svc.{{ dns_domain }} +{% for host in groups['kube-master'] %} +IP.{{ loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }} +{% endfor %} +{% set idx = groups['kube-master'] | length | int + 1 %} +IP.{{ idx | string }} = {{ kube_apiserver_ip }} +{% if kube_loadbalancer_ip is defined | default('') %} +{% set idx = idx | int + 1 %} +IP.{{ idx | string }} = {{ kube_loadbalancer }} +{% endif %} diff --git a/roles/kubernetes/node/templates/proxy.j2 b/roles/kubernetes/node/templates/proxy.j2 deleted file mode 100644 index f529d7d5e..000000000 --- a/roles/kubernetes/node/templates/proxy.j2 +++ /dev/null @@ -1,6 +0,0 @@ -### -# kubernetes proxy config - -# default config should be adequate -[Service] -Environment="KUBE_PROXY_ARGS=--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig --proxy-mode={{kube_proxy_mode}}" diff --git a/roles/kubernetes/node/templates/proxy.kubeconfig.j2 b/roles/kubernetes/node/templates/proxy.kubeconfig.j2 deleted file mode 100644 index 78d181631..000000000 --- a/roles/kubernetes/node/templates/proxy.kubeconfig.j2 +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Config -current-context: proxy-to-{{ cluster_name }} -preferences: {} -contexts: -- context: - cluster: {{ cluster_name }} - user: proxy - name: proxy-to-{{ cluster_name }} -clusters: -- cluster: - certificate-authority: {{ kube_cert_dir }}/ca.crt - server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }} - name: {{ cluster_name }} -users: -- name: proxy - user: - token: {{ proxy_token }} diff --git a/roles/kubernetes/node/templates/systemd-init/kube-proxy.service.j2 b/roles/kubernetes/node/templates/systemd-init/kube-proxy.service.j2 deleted file mode 100644 index b1170c5d8..000000000 --- a/roles/kubernetes/node/templates/systemd-init/kube-proxy.service.j2 +++ /dev/null @@ -1,22 +0,0 @@ -[Unit] -Description=Kubernetes Kube-Proxy Server -Documentation=https://github.com/GoogleCloudPlatform/kubernetes -{% if kube_network_plugin is defined and kube_network_plugin == "calico" %} -After=docker.service calico-node.service -{% else %} -After=docker.service -{% endif %} - -[Service] -EnvironmentFile=/etc/kubernetes/config -EnvironmentFile=/etc/network-environment -ExecStart={{ bin_dir }}/kube-proxy \ - $KUBE_LOGTOSTDERR \ - $KUBE_LOG_LEVEL \ - $KUBE_MASTER \ - $KUBE_PROXY_ARGS -Restart=on-failure -LimitNOFILE=65536 - -[Install] -WantedBy=multi-user.target From d9a8de487fa1c1b2eee5c0e7333ab54062ff5f99 Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Fri, 11 Dec 2015 11:40:56 +0100 Subject: [PATCH 03/34] review roles order --- cluster.yml | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/cluster.yml b/cluster.yml index 63ad7de5d..8e0792a2b 100644 --- a/cluster.yml +++ b/cluster.yml @@ -4,17 +4,21 @@ roles: - { role: download, tags: download } -- hosts: k8s-cluster - roles: - - { role: etcd, tags: etcd } - - { role: docker, tags: docker } - - { role: network_plugin, tags: ['calico', 'flannel', 'network'] } - - { role: dnsmasq, tags: dnsmasq } - +# etcd must be running on master(s) before going on - hosts: kube-master roles: - - { role: kubernetes/master, tags: master } + - { role: etcd, tags: etcd } + +- hosts: k8s-cluster + roles: + - { role: docker, tags: docker } + - { role: dnsmasq, tags: dnsmasq } + - { role: network_plugin, tags: ['calico', 'flannel', 'network'] } - hosts: kube-node roles: - { role: kubernetes/node, tags: node } + +- hosts: kube-master + roles: + - { role: kubernetes/master, tags: master } From 83c1105192082605d0394c5945402e2813805224 Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Fri, 11 Dec 2015 11:42:25 +0100 Subject: [PATCH 04/34] Configuring calico pool once, before starting calico-node --- roles/network_plugin/tasks/calico.yml | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/roles/network_plugin/tasks/calico.yml b/roles/network_plugin/tasks/calico.yml index c461f5607..818dab709 100644 --- a/roles/network_plugin/tasks/calico.yml +++ b/roles/network_plugin/tasks/calico.yml @@ -9,6 +9,12 @@ - name: Calico | Create calicoctl symlink (needed by kubelet) file: src=/usr/local/bin/calicoctl dest=/usr/bin/calicoctl state=link +- name: Calico | Configure calico-node desired pool + shell: calicoctl pool add {{ kube_pods_subnet }} + environment: + ETCD_AUTHORITY: "{{ groups['kube-master'][0] }}:2379" + run_once: true + - name: Calico | Write calico-node systemd init file template: src=calico/calico-node.service.j2 dest=/etc/systemd/system/calico-node.service register: newservice @@ -24,18 +30,6 @@ - name: Calico | Enable calico-node service: name=calico-node enabled=yes state=started -- name: Calico | Configure calico-node desired pool - shell: calicoctl pool add {{ kube_pods_subnet }} - environment: - ETCD_AUTHORITY: "{{ groups['kube-master'][0] }}:4001" - run_once: true - -- name: Calico | Configure calico-node remove default pool - shell: calicoctl pool remove 192.168.0.0/16 - environment: - ETCD_AUTHORITY: "{{ groups['kube-master'][0] }}:4001" - run_once: true - - name: Calico | Disable node mesh shell: calicoctl bgp node-mesh off when: peer_with_router|default(false) and inventory_hostname in groups['kube-node'] From b66cc67b6f0321e24618e0921ead4d29622188c3 Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Fri, 11 Dec 2015 11:44:41 +0100 Subject: [PATCH 05/34] Configure network-environment with a single template --- roles/network_plugin/tasks/flannel.yml | 4 -- roles/network_plugin/tasks/main.yml | 7 ++-- .../templates/flannel/network-environment.j2 | 1 - .../{calico => }/network-environment.j2 | 42 ++++++++++--------- 4 files changed, 26 insertions(+), 28 deletions(-) delete mode 100644 roles/network_plugin/templates/flannel/network-environment.j2 rename roles/network_plugin/templates/{calico => }/network-environment.j2 (66%) diff --git a/roles/network_plugin/tasks/flannel.yml b/roles/network_plugin/tasks/flannel.yml index fc06c55ce..a43be9e40 100644 --- a/roles/network_plugin/tasks/flannel.yml +++ b/roles/network_plugin/tasks/flannel.yml @@ -44,10 +44,6 @@ run_once: true delegate_to: "{{ groups['kube-master'][0] }}" -- name: Write network-environment - template: src=flannel/network-environment.j2 dest=/etc/network-environment mode=u+x - notify: restart flannel - - name: Launch Flannel service: name=flannel state=started enabled=yes notify: diff --git a/roles/network_plugin/tasks/main.yml b/roles/network_plugin/tasks/main.yml index 4b6c8c66a..e3ebf305f 100644 --- a/roles/network_plugin/tasks/main.yml +++ b/roles/network_plugin/tasks/main.yml @@ -4,13 +4,12 @@ when: ( kube_network_plugin is defined and kube_network_plugin == "calico" and kube_network_plugin == "flannel" ) or kube_network_plugin is not defined +- name: Write network-environment + template: src=network-environment.j2 dest=/etc/network-environment mode=u+x + - include: flannel.yml when: kube_network_plugin == "flannel" -- name: Calico | Write network-environment - template: src=calico/network-environment.j2 dest=/etc/network-environment mode=u+x - when: kube_network_plugin == "calico" - - include: calico.yml when: kube_network_plugin == "calico" diff --git a/roles/network_plugin/templates/flannel/network-environment.j2 b/roles/network_plugin/templates/flannel/network-environment.j2 deleted file mode 100644 index ac0b171d4..000000000 --- a/roles/network_plugin/templates/flannel/network-environment.j2 +++ /dev/null @@ -1 +0,0 @@ -FLANNEL_ETCD_PREFIX="--etcd-prefix=/{{ cluster_name }}/network" diff --git a/roles/network_plugin/templates/calico/network-environment.j2 b/roles/network_plugin/templates/network-environment.j2 similarity index 66% rename from roles/network_plugin/templates/calico/network-environment.j2 rename to roles/network_plugin/templates/network-environment.j2 index 2407f1ecb..2b89eb7e8 100755 --- a/roles/network_plugin/templates/calico/network-environment.j2 +++ b/roles/network_plugin/templates/network-environment.j2 @@ -1,19 +1,23 @@ -#! /usr/bin/bash -# This node's IPv4 address -CALICO_IPAM=true -DEFAULT_IPV4={{ip | default(ansible_default_ipv4.address) }} - -{% if inventory_hostname in groups['kube-node'] %} -# The kubernetes master IP -KUBERNETES_MASTER={{ groups['kube-master'][0] }} - -# Location of etcd cluster used by Calico. By default, this uses the etcd -# instance running on the Kubernetes Master -ETCD_AUTHORITY={{ groups['kube-master'][0] }}:4001 - -# The kubernetes-apiserver location - used by the calico plugin -KUBE_API_ROOT=http://{{ groups['kube-master'][0] }}:{{kube_master_insecure_port}}/api/v1/ - -# Location of the calicoctl binary - used by the calico plugin -CALICOCTL_PATH="{{ bin_dir }}/calicoctl" -{% endif %} +#! /usr/bin/bash +{% if kube_network_plugin == "calico" %} +# This node's IPv4 address +CALICO_IPAM=true +DEFAULT_IPV4={{ip | default(ansible_default_ipv4.address) }} + +{% if inventory_hostname in groups['kube-node'] %} +# The kubernetes master IP +KUBERNETES_MASTER={{ groups['kube-master'][0] }} + +# Location of etcd cluster used by Calico. By default, this uses the etcd +# instance running on the Kubernetes Master +ETCD_AUTHORITY={{ groups['kube-master'][0] }}:2379 + +# The kubernetes-apiserver location - used by the calico plugin +KUBE_API_ROOT=http://{{ groups['kube-master'][0] }}:{{kube_apiserver_insecure_port}}/api/v1/ + +# Location of the calicoctl binary - used by the calico plugin +CALICOCTL_PATH="{{ bin_dir }}/calicoctl" +{% endif %} +{% else %} +FLANNEL_ETCD_PREFIX="--etcd-prefix=/{{ cluster_name }}/network" +{% endif %} From e0ec3e7241d9e3aa2f8c9c7718326b4752fea015 Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Fri, 11 Dec 2015 11:46:02 +0100 Subject: [PATCH 06/34] Using one var file per environment is simplier --- environments/production/group_vars/all.yml | 62 +++++++++++++++++++ .../production/group_vars/k8s-cluster.yml | 60 ------------------ 2 files changed, 62 insertions(+), 60 deletions(-) delete mode 100644 environments/production/group_vars/k8s-cluster.yml diff --git a/environments/production/group_vars/all.yml b/environments/production/group_vars/all.yml index 04ca4c467..581fa1434 100644 --- a/environments/production/group_vars/all.yml +++ b/environments/production/group_vars/all.yml @@ -4,3 +4,65 @@ bin_dir: /usr/local/bin # Where the binaries will be downloaded. # Note: ensure that you've enough disk space (about 1G) local_release_dir: "/tmp/releases" + +# Users to create for basic auth in Kubernetes API via HTTP +# kube_users: +# kube: +# pass: changeme +# role: admin +# root: +# pass: changeme +# role: admin + +# Kubernetes cluster name, also will be used as DNS domain +# cluster_name: cluster.local + +# set this variable to calico if needed. keep it empty if flannel is used +# kube_network_plugin: calico + +# Kubernetes internal network for services, unused block of space. +# kube_service_addresses: 10.233.0.0/18 + +# internal network. When used, it will assign IP +# addresses from this range to individual pods. +# This network must be unused in your network infrastructure! +# kube_pods_subnet: 10.233.64.0/18 + +# internal network total size (optional). This is the prefix of the +# entire network. Must be unused in your environment. +# kube_network_prefix: 18 + +# internal network node size allocation (optional). This is the size allocated +# to each node on your network. With these defaults you should have +# room for 4096 nodes with 254 pods per node. +# kube_network_node_prefix: 24 + +# With calico it is possible to distributed routes with border routers of the datacenter. +# peer_with_router: false +# Warning : enabling router peering will disable calico's default behavior ('node mesh'). +# The subnets of each nodes will be distributed by the datacenter router + +# The port the API Server will be listening on. +# kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" +# kube_apiserver_port: 443 # (https) +# kube_apiserver_insecure_port: 8080 # (http) + +# Internal DNS configuration. +# Kubernetes can create and mainatain its own DNS server to resolve service names +# into appropriate IP addresses. It's highly advisable to run such DNS server, +# as it greatly simplifies configuration of your applications - you can use +# service names instead of magic environment variables. +# You still must manually configure all your containers to use this DNS server, +# Kubernetes won't do this for you (yet). + +# Upstream dns servers used by dnsmasq +# upstream_dns_servers: +# - 8.8.8.8 +# - 4.4.8.8 +# +# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md +# dns_setup: true +# dns_domain: "{{ cluster_name }}" +# +# # Ip address of the kubernetes dns service +# dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address') }}" diff --git a/environments/production/group_vars/k8s-cluster.yml b/environments/production/group_vars/k8s-cluster.yml deleted file mode 100644 index efaf3cca6..000000000 --- a/environments/production/group_vars/k8s-cluster.yml +++ /dev/null @@ -1,60 +0,0 @@ -# Users to create for basic auth in Kubernetes API via HTTP -# kube_users: -# kube: -# pass: changeme -# role: admin -# root: -# pass: changeme -# role: admin - -# Kubernetes cluster name, also will be used as DNS domain -# cluster_name: cluster.local - -# set this variable to calico if needed. keep it empty if flannel is used -# kube_network_plugin: calico - -# Kubernetes internal network for services, unused block of space. -# kube_service_addresses: 10.233.0.0/18 - -# internal network. When used, it will assign IP -# addresses from this range to individual pods. -# This network must be unused in your network infrastructure! -# kube_pods_subnet: 10.233.64.0/18 - -# internal network total size (optional). This is the prefix of the -# entire network. Must be unused in your environment. -# kube_network_prefix: 18 - -# internal network node size allocation (optional). This is the size allocated -# to each node on your network. With these defaults you should have -# room for 4096 nodes with 254 pods per node. -# kube_network_node_prefix: 24 - -# With calico it is possible to distributed routes with border routers of the datacenter. -# peer_with_router: false -# Warning : enabling router peering will disable calico's default behavior ('node mesh'). -# The subnets of each nodes will be distributed by the datacenter router - -# The port the API Server will be listening on. -# kube_master_port: 443 # (https) -# kube_master_insecure_port: 8080 # (http) - -# Internal DNS configuration. -# Kubernetes can create and mainatain its own DNS server to resolve service names -# into appropriate IP addresses. It's highly advisable to run such DNS server, -# as it greatly simplifies configuration of your applications - you can use -# service names instead of magic environment variables. -# You still must manually configure all your containers to use this DNS server, -# Kubernetes won't do this for you (yet). - -# Upstream dns servers used by dnsmasq -# upstream_dns_servers: -# - 8.8.8.8 -# - 4.4.8.8 -# -# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md -# dns_setup: true -# dns_domain: "{{ cluster_name }}" -# -# # Ip address of the kubernetes dns service -# dns_server: 10.233.0.10 From 3981b739245b0df51c393a9629cda1a488b69339 Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Fri, 11 Dec 2015 11:47:51 +0100 Subject: [PATCH 07/34] download only required kubernetes binaries --- roles/download/defaults/main.yml | 16 +++++---- roles/download/tasks/kubernetes.yml | 56 +++++++---------------------- 2 files changed, 22 insertions(+), 50 deletions(-) diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index 9a42c2a11..d21b43752 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -1,13 +1,15 @@ --- -etcd_download_url: https://github.com/coreos/etcd/releases/download -flannel_download_url: https://github.com/coreos/flannel/releases/download -kube_download_url: https://github.com/GoogleCloudPlatform/kubernetes/releases/download -calico_download_url: https://github.com/Metaswitch/calico-docker/releases/download - etcd_version: v2.2.2 flannel_version: 0.5.5 kube_version: v1.1.2 -kube_sha1: 69d110d371752c6492d2f8695aa7a47be5b6ed4e +kubectl_checksum: "e0585c6e63f796d87b34cd1f16554892a49421b98a2862a896b2b7ebf1439ace" +kubelet_checksum: "6c029d34888e1ec4b9ab4b500b0712388984340488c5f3c19e2c759d1003cbff" + +calico_version: v0.12.0 + +etcd_download_url: "https://github.com/coreos/etcd/releases/download" +flannel_download_url: "https://github.com/coreos/flannel/releases/download" +kube_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kube_version }}/bin/linux/amd64" +calico_download_url: "https://github.com/Metaswitch/calico-docker/releases/download" -calico_version: v0.11.0 diff --git a/roles/download/tasks/kubernetes.yml b/roles/download/tasks/kubernetes.yml index de6359d14..0985a17d3 100644 --- a/roles/download/tasks/kubernetes.yml +++ b/roles/download/tasks/kubernetes.yml @@ -1,47 +1,17 @@ --- -- name: Create kubernetes release directory +- name: Create kubernetes binary directory local_action: file - path={{ local_release_dir }}/kubernetes + path="{{ local_release_dir }}/kubernetes/bin" state=directory + recurse=yes -- name: Check if kubernetes release archive has been downloaded - local_action: stat - path={{ local_release_dir }}/kubernetes/kubernetes.tar.gz - register: k_tar - -# issues with get_url module and redirects, to be tested again in the near future -- name: Download kubernetes - local_action: shell - curl -o {{ local_release_dir }}/kubernetes/kubernetes.tar.gz -Ls {{ kube_download_url }}/{{ kube_version }}/kubernetes.tar.gz - when: not k_tar.stat.exists or k_tar.stat.checksum != "{{ kube_sha1 }}" - register: dl_kube - -- name: Compare kubernetes archive checksum - local_action: stat - path={{ local_release_dir }}/kubernetes/kubernetes.tar.gz - register: k_tar - failed_when: k_tar.stat.checksum != "{{ kube_sha1 }}" - when: dl_kube|changed - -- name: Extract kubernetes archive - local_action: unarchive - src={{ local_release_dir }}/kubernetes/kubernetes.tar.gz - dest={{ local_release_dir }}/kubernetes copy=no - when: dl_kube|changed - -- name: Extract kubernetes binaries archive - local_action: unarchive - src={{ local_release_dir }}/kubernetes/kubernetes/server/kubernetes-server-linux-amd64.tar.gz - dest={{ local_release_dir }}/kubernetes copy=no - when: dl_kube|changed - -- name: Pick up only kubernetes binaries - local_action: synchronize - src={{ local_release_dir }}/kubernetes/kubernetes/server/bin - dest={{ local_release_dir }}/kubernetes - when: dl_kube|changed - -- name: Delete unused kubernetes files - local_action: file - path={{ local_release_dir }}/kubernetes/kubernetes state=absent - when: dl_kube|changed +- name: Download kubelet and kubectl + local_action: get_url + url="{{ kube_download_url }}/{{ item.name }}" + dest="{{ local_release_dir }}/kubernetes/bin" + sha256sum="{{ item.checksum }}" + with_items: + - name: kubelet + checksum: "{{ kubelet_checksum }}" + - name: kubectl + checksum: "{{ kubectl_checksum }}" From b23b8aa3de650d9852412d383e0520bf04f2f649 Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Fri, 11 Dec 2015 11:48:43 +0100 Subject: [PATCH 08/34] dnsmasq with multi master arch --- roles/dnsmasq/tasks/main.yml | 11 +++++++---- roles/dnsmasq/templates/resolv.conf.j2 | 4 ++++ 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/roles/dnsmasq/tasks/main.yml b/roles/dnsmasq/tasks/main.yml index ab534dfb2..7ba02c36c 100644 --- a/roles/dnsmasq/tasks/main.yml +++ b/roles/dnsmasq/tasks/main.yml @@ -21,16 +21,17 @@ apt: name: "{{ item }}" state: present + update_cache: yes with_items: - dnsmasq - bind9utils - when: inventory_hostname in groups['kube-master'][0] + when: inventory_hostname in groups['kube-master'] - name: ensure dnsmasq.d directory exists file: path: /etc/dnsmasq.d state: directory - when: inventory_hostname in groups['kube-master'][0] + when: inventory_hostname in groups['kube-master'] - name: configure dnsmasq template: @@ -39,14 +40,14 @@ mode: 755 notify: - restart dnsmasq - when: inventory_hostname in groups['kube-master'][0] + when: inventory_hostname in groups['kube-master'] - name: enable dnsmasq service: name: dnsmasq state: started enabled: yes - when: inventory_hostname in groups['kube-master'][0] + when: inventory_hostname in groups['kube-master'] - name: update resolv.conf with new DNS setup template: @@ -56,3 +57,5 @@ - name: disable resolv.conf modification by dhclient copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient-enter-hooks.d/nodnsupdate mode=u+x + +- meta: flush_handlers diff --git a/roles/dnsmasq/templates/resolv.conf.j2 b/roles/dnsmasq/templates/resolv.conf.j2 index d10a6fc92..f0b475b02 100644 --- a/roles/dnsmasq/templates/resolv.conf.j2 +++ b/roles/dnsmasq/templates/resolv.conf.j2 @@ -1,5 +1,9 @@ ; generated by ansible search {{ [ 'default.svc.' + dns_domain, 'svc.' + dns_domain, dns_domain ] | join(' ') }} +{% if inventory_hostname in groups['kube-master'] %} +nameserver {{ ansible_default_ipv4.address }} +{% else %} {% for host in groups['kube-master'] %} nameserver {{ hostvars[host]['ansible_default_ipv4']['address'] }} {% endfor %} +{% endif %} From 47c211f9c1ffab43704b12a349e5903482c3f5eb Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Fri, 11 Dec 2015 11:49:39 +0100 Subject: [PATCH 09/34] upgrading docker version --- roles/docker/tasks/install.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/docker/tasks/install.yml b/roles/docker/tasks/install.yml index 4880629a8..473e132fb 100644 --- a/roles/docker/tasks/install.yml +++ b/roles/docker/tasks/install.yml @@ -13,7 +13,7 @@ with_items: - aufs-tools - cgroupfs-mount - - docker-engine=1.8.3-0~{{ ansible_distribution_release }} + - docker-engine=1.9.1-0~{{ ansible_distribution_release }} - name: Copy default docker configuration template: src=default-docker.j2 dest=/etc/default/docker backup=yes From ef8a46b8c5c8ccf6b3aac81522defdeb754a50d4 Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Fri, 11 Dec 2015 11:51:34 +0100 Subject: [PATCH 10/34] Doesn't manage firewall, note: has to be disabled before running the playbook --- roles/etcd/tasks/configure.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/roles/etcd/tasks/configure.yml b/roles/etcd/tasks/configure.yml index aca67018f..18a2cc882 100644 --- a/roles/etcd/tasks/configure.yml +++ b/roles/etcd/tasks/configure.yml @@ -1,7 +1,4 @@ --- -- name: Disable ferm - service: name=ferm state=stopped enabled=no - - name: Create etcd2 environment vars dir file: path=/etc/systemd/system/etcd2.service.d state=directory From f49620517e7c9e5573b9deda6d0509117911e1bb Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Fri, 11 Dec 2015 11:52:20 +0100 Subject: [PATCH 11/34] running kubernetes master processes as pods --- roles/kubernetes/master/handlers/main.yml | 39 +------- roles/kubernetes/master/meta/main.yml | 3 +- roles/kubernetes/master/tasks/config.yml | 94 ------------------- roles/kubernetes/master/tasks/install.yml | 34 ------- roles/kubernetes/master/tasks/main.yml | 82 +++++++++++++++- .../kubernetes/master/templates/apiserver.j2 | 28 ------ .../master/templates/controller-manager.j2 | 6 -- .../controller-manager.kubeconfig.j2 | 18 ---- ...beconfig.j2 => kubectl-kubeconfig.yaml.j2} | 4 +- .../manifests/kube-apiserver.manifest.j2 | 46 +++++++++ .../kube-controller-manager.manifest.j2 | 38 ++++++++ .../manifests/kube-podmaster.manifest.j2 | 44 +++++++++ .../manifests/kube-scheduler.manifest.j2 | 22 +++++ roles/kubernetes/master/templates/proxy.j2 | 8 -- .../master/templates/proxy.kubeconfig.j2 | 18 ---- .../kubernetes/master/templates/scheduler.j2 | 7 -- .../master/templates/scheduler.kubeconfig.j2 | 18 ---- .../systemd-init/kube-apiserver.service.j2 | 29 ------ .../kube-controller-manager.service.j2 | 20 ---- .../systemd-init/kube-proxy.service.j2 | 22 ----- .../systemd-init/kube-scheduler.service.j2 | 20 ---- 21 files changed, 238 insertions(+), 362 deletions(-) delete mode 100644 roles/kubernetes/master/tasks/config.yml delete mode 100644 roles/kubernetes/master/tasks/install.yml delete mode 100644 roles/kubernetes/master/templates/apiserver.j2 delete mode 100644 roles/kubernetes/master/templates/controller-manager.j2 delete mode 100644 roles/kubernetes/master/templates/controller-manager.kubeconfig.j2 rename roles/kubernetes/master/templates/{kubectl.kubeconfig.j2 => kubectl-kubeconfig.yaml.j2} (68%) create mode 100644 roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 create mode 100644 roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 create mode 100644 roles/kubernetes/master/templates/manifests/kube-podmaster.manifest.j2 create mode 100644 roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 delete mode 100644 roles/kubernetes/master/templates/proxy.j2 delete mode 100644 roles/kubernetes/master/templates/proxy.kubeconfig.j2 delete mode 100644 roles/kubernetes/master/templates/scheduler.j2 delete mode 100644 roles/kubernetes/master/templates/scheduler.kubeconfig.j2 delete mode 100644 roles/kubernetes/master/templates/systemd-init/kube-apiserver.service.j2 delete mode 100644 roles/kubernetes/master/templates/systemd-init/kube-controller-manager.service.j2 delete mode 100644 roles/kubernetes/master/templates/systemd-init/kube-proxy.service.j2 delete mode 100644 roles/kubernetes/master/templates/systemd-init/kube-scheduler.service.j2 diff --git a/roles/kubernetes/master/handlers/main.yml b/roles/kubernetes/master/handlers/main.yml index 4e7644b32..8b00d1689 100644 --- a/roles/kubernetes/master/handlers/main.yml +++ b/roles/kubernetes/master/handlers/main.yml @@ -1,47 +1,16 @@ --- -- name: restart daemons - command: /bin/true - notify: - - reload systemd - - restart reloaded-scheduler - - restart reloaded-controller-manager - - restart reloaded-apiserver - - restart reloaded-proxy - - name: reload systemd command: systemctl daemon-reload -- name: restart apiserver +- name: restart kubelet command: /bin/true notify: - reload systemd - - restart reloaded-apiserver + - restart reloaded-kubelet -- name: restart reloaded-apiserver +- name: restart reloaded-kubelet service: - name: kube-apiserver - state: restarted - -- name: restart controller-manager - command: /bin/true - notify: - - reload systemd - - restart reloaded-controller-manager - -- name: restart reloaded-controller-manager - service: - name: kube-controller-manager - state: restarted - -- name: restart scheduler - command: /bin/true - notify: - - reload systemd - - restart reloaded-scheduler - -- name: restart reloaded-scheduler - service: - name: kube-scheduler + name: kubelet state: restarted - name: restart proxy diff --git a/roles/kubernetes/master/meta/main.yml b/roles/kubernetes/master/meta/main.yml index 31675692c..53dd04017 100644 --- a/roles/kubernetes/master/meta/main.yml +++ b/roles/kubernetes/master/meta/main.yml @@ -1,3 +1,4 @@ --- dependencies: - - { role: kubernetes/common } + - { role: etcd } + - { role: kubernetes/node } diff --git a/roles/kubernetes/master/tasks/config.yml b/roles/kubernetes/master/tasks/config.yml deleted file mode 100644 index 2f488a921..000000000 --- a/roles/kubernetes/master/tasks/config.yml +++ /dev/null @@ -1,94 +0,0 @@ ---- -- name: get the node token values from token files - slurp: - src: "{{ kube_token_dir }}/{{ item }}-{{ inventory_hostname }}.token" - with_items: - - "system:controller_manager" - - "system:scheduler" - - "system:kubectl" - - "system:proxy" - register: tokens - delegate_to: "{{ groups['kube-master'][0] }}" - -- name: Set token facts - set_fact: - controller_manager_token: "{{ tokens.results[0].content|b64decode }}" - scheduler_token: "{{ tokens.results[1].content|b64decode }}" - kubectl_token: "{{ tokens.results[2].content|b64decode }}" - proxy_token: "{{ tokens.results[3].content|b64decode }}" - -- name: write the config files for api server - template: src=apiserver.j2 dest={{ kube_config_dir }}/apiserver backup=yes - notify: - - restart apiserver - -- name: write config file for controller-manager - template: src=controller-manager.j2 dest={{ kube_config_dir }}/controller-manager backup=yes - notify: - - restart controller-manager - -- name: write the kubecfg (auth) file for controller-manager - template: src=controller-manager.kubeconfig.j2 dest={{ kube_config_dir }}/controller-manager.kubeconfig backup=yes - notify: - - restart controller-manager - -- name: write the config file for scheduler - template: src=scheduler.j2 dest={{ kube_config_dir }}/scheduler backup=yes - notify: - - restart scheduler - -- name: write the kubecfg (auth) file for scheduler - template: src=scheduler.kubeconfig.j2 dest={{ kube_config_dir }}/scheduler.kubeconfig backup=yes - notify: - - restart scheduler - -- name: write the kubecfg (auth) file for kubectl - template: src=kubectl.kubeconfig.j2 dest={{ kube_config_dir }}/kubectl.kubeconfig backup=yes - -- name: Copy kubectl bash completion - copy: src=kubectl_bash_completion.sh dest=/etc/bash_completion.d/kubectl.sh - -- name: Create proxy environment vars dir - file: path=/etc/systemd/system/kube-proxy.service.d state=directory - -- name: Write proxy config file - template: src=proxy.j2 dest=/etc/systemd/system/kube-proxy.service.d/10-proxy-cluster.conf backup=yes - notify: - - restart proxy - -- name: write the kubecfg (auth) file for proxy - template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig backup=yes - -- name: populate users for basic auth in API - lineinfile: - dest: "{{ kube_users_dir }}/known_users.csv" - create: yes - line: '{{ item.value.pass }},{{ item.key }},{{ item.value.role }}' - backup: yes - with_dict: "{{ kube_users }}" - notify: - - restart apiserver - -- name: Enable controller-manager - service: - name: kube-controller-manager - enabled: yes - state: started - -- name: Enable scheduler - service: - name: kube-scheduler - enabled: yes - state: started - -- name: Enable kube-proxy - service: - name: kube-proxy - enabled: yes - state: started - -- name: Enable apiserver - service: - name: kube-apiserver - enabled: yes - state: started diff --git a/roles/kubernetes/master/tasks/install.yml b/roles/kubernetes/master/tasks/install.yml deleted file mode 100644 index 92d194515..000000000 --- a/roles/kubernetes/master/tasks/install.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- name: Write kube-apiserver systemd init file - template: src=systemd-init/kube-apiserver.service.j2 dest=/etc/systemd/system/kube-apiserver.service backup=yes - notify: restart apiserver - -- name: Write kube-controller-manager systemd init file - template: src=systemd-init/kube-controller-manager.service.j2 dest=/etc/systemd/system/kube-controller-manager.service backup=yes - notify: restart controller-manager - -- name: Write kube-scheduler systemd init file - template: src=systemd-init/kube-scheduler.service.j2 dest=/etc/systemd/system/kube-scheduler.service backup=yes - notify: restart scheduler - -- name: Write kube-proxy systemd init file - template: src=systemd-init/kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service backup=yes - notify: restart proxy - -- name: Install kubernetes binaries - copy: - src={{ local_release_dir }}/kubernetes/bin/{{ item }} - dest={{ bin_dir }} - owner=kube - mode=u+x - with_items: - - kube-apiserver - - kube-controller-manager - - kube-scheduler - - kube-proxy - - kubectl - notify: - - restart daemons - -- name: Allow apiserver to bind on both secure and insecure ports - shell: setcap cap_net_bind_service+ep {{ bin_dir }}/kube-apiserver diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml index 8570db68c..12459956a 100644 --- a/roles/kubernetes/master/tasks/main.yml +++ b/roles/kubernetes/master/tasks/main.yml @@ -1,3 +1,81 @@ --- -- include: install.yml -- include: config.yml +- name: Install kubectl binary + copy: + src={{ local_release_dir }}/kubernetes/bin/kubectl + dest={{ bin_dir }} + owner=kube + mode=u+x + notify: + - restart daemons + +- name: Copy kubectl bash completion + copy: + src: kubectl_bash_completion.sh + dest: /etc/bash_completion.d/kubectl.sh + +- name: populate users for basic auth in API + lineinfile: + dest: "{{ kube_users_dir }}/known_users.csv" + create: yes + line: '{{ item.value.pass }},{{ item.key }},{{ item.value.role }}' + backup: yes + with_dict: "{{ kube_users }}" + +# Sync masters +- name: synchronize auth directories for masters + synchronize: + src: "{{ item }}" + dest: "{{ kube_config_dir }}" + recursive: yes + delete: yes + rsync_opts: [ '--one-file-system'] + with_items: + - "{{ kube_token_dir }}" + - "{{ kube_cert_dir }}" + - "{{ kube_users_dir }}" + delegate_to: "{{ groups['kube-master'][0] }}" + +# Write manifests +- name: Write kube-apiserver manifest + template: + src: manifests/kube-apiserver.manifest.j2 + dest: "{{ kube_manifest_dir }}/kube-apisever.manifest" + notify: + - restart kubelet + +- meta: flush_handlers + +- name: wait for the apiserver to be running (pulling image and running container) + wait_for: + port: 8080 + +- name: install required python module 'httplib2' + apt: + name: "python-httplib2" + state: present + when: inventory_hostname == groups['kube-master'][0] + +- name: Create 'kube-system' namespace + uri: + url: http://{{ groups['kube-master'][0]}}:{{ kube_apiserver_insecure_port }}/api/v1/namespaces + method: POST + body: '{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"kube-system"}}' + status_code: 201,409 + body_format: json + run_once: yes + when: inventory_hostname == groups['kube-master'][0] + +- name: Write kube-controller-manager manifest + template: + src: manifests/kube-controller-manager.manifest.j2 + dest: "{{ kube_config_dir }}/kube-controller-manager.manifest" + +- name: Write kube-scheduler manifest + template: + src: manifests/kube-scheduler.manifest.j2 + dest: "{{ kube_config_dir }}/kube-scheduler.manifest" + +- name: Write podmaster manifest + template: + src: manifests/kube-podmaster.manifest.j2 + dest: "{{ kube_manifest_dir }}/kube-podmaster.manifest" diff --git a/roles/kubernetes/master/templates/apiserver.j2 b/roles/kubernetes/master/templates/apiserver.j2 deleted file mode 100644 index 0a38d5c87..000000000 --- a/roles/kubernetes/master/templates/apiserver.j2 +++ /dev/null @@ -1,28 +0,0 @@ -### -# kubernetes system config -# -# The following values are used to configure the kube-apiserver -# - -# The address on the local server to listen to. -KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0" - -# The port on the local server to listen on. -KUBE_API_PORT="--insecure-port={{kube_master_insecure_port}} --secure-port={{ kube_master_port }}" - -# KUBELET_PORT="--kubelet_port=10250" - -# Address range to use for services -KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range={{ kube_service_addresses }}" - -# Location of the etcd cluster -KUBE_ETCD_SERVERS="--etcd_servers={% for node in groups['etcd'] %}http://{{ node }}:2379{% if not loop.last %},{% endif %}{% endfor %}" - -# default admission control policies -KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" - -# RUNTIME API CONFIGURATION (e.g. enable extensions) -KUBE_RUNTIME_CONFIG="{% if kube_api_runtime_config is defined %}{% for conf in kube_api_runtime_config %}--runtime-config={{ conf }} {% endfor %}{% endif %}" - -# Add you own! -KUBE_API_ARGS="--tls_cert_file={{ kube_cert_dir }}/server.crt --tls_private_key_file={{ kube_cert_dir }}/server.key --client_ca_file={{ kube_cert_dir }}/ca.crt --token_auth_file={{ kube_token_dir }}/known_tokens.csv --basic-auth-file={{ kube_users_dir }}/known_users.csv --service_account_key_file={{ kube_cert_dir }}/server.crt" diff --git a/roles/kubernetes/master/templates/controller-manager.j2 b/roles/kubernetes/master/templates/controller-manager.j2 deleted file mode 100644 index c7a932900..000000000 --- a/roles/kubernetes/master/templates/controller-manager.j2 +++ /dev/null @@ -1,6 +0,0 @@ -### -# The following values are used to configure the kubernetes controller-manager - -# defaults from config and apiserver should be adequate - -KUBE_CONTROLLER_MANAGER_ARGS="--kubeconfig={{ kube_config_dir }}/controller-manager.kubeconfig --service_account_private_key_file={{ kube_cert_dir }}/server.key --root_ca_file={{ kube_cert_dir }}/ca.crt" diff --git a/roles/kubernetes/master/templates/controller-manager.kubeconfig.j2 b/roles/kubernetes/master/templates/controller-manager.kubeconfig.j2 deleted file mode 100644 index c71ac50f3..000000000 --- a/roles/kubernetes/master/templates/controller-manager.kubeconfig.j2 +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Config -current-context: controller-manager-to-{{ cluster_name }} -preferences: {} -clusters: -- cluster: - certificate-authority: {{ kube_cert_dir }}/ca.crt - server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }} - name: {{ cluster_name }} -contexts: -- context: - cluster: {{ cluster_name }} - user: controller-manager - name: controller-manager-to-{{ cluster_name }} -users: -- name: controller-manager - user: - token: {{ controller_manager_token }} diff --git a/roles/kubernetes/master/templates/kubectl.kubeconfig.j2 b/roles/kubernetes/master/templates/kubectl-kubeconfig.yaml.j2 similarity index 68% rename from roles/kubernetes/master/templates/kubectl.kubeconfig.j2 rename to roles/kubernetes/master/templates/kubectl-kubeconfig.yaml.j2 index dd8f0eabe..5cc74cf9e 100644 --- a/roles/kubernetes/master/templates/kubectl.kubeconfig.j2 +++ b/roles/kubernetes/master/templates/kubectl-kubeconfig.yaml.j2 @@ -4,8 +4,8 @@ current-context: kubectl-to-{{ cluster_name }} preferences: {} clusters: - cluster: - certificate-authority-data: {{ kube_ca_cert|b64encode }} - server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }} + certificate-authority-data: {{ kube_node_cert|b64encode }} + server: https://{{ groups['kube-master'][0] }}:{{ kube_apiserver_port }} name: {{ cluster_name }} contexts: - context: diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 new file mode 100644 index 000000000..320594fa4 --- /dev/null +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -0,0 +1,46 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kube-apiserver +spec: + hostNetwork: true + containers: + - name: kube-apiserver + image: {{ hyperkube_image.name }}:{{ hyperkube_image.tag }} + command: + - /hyperkube + - apiserver + - --insecure-bind-address=0.0.0.0 + - --etcd-servers=http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2379 + - --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota + - --service-cluster-ip-range={{ kube_service_addresses }} + - --client-ca-file={{ kube_cert_dir }}/ca.pem + - --basic-auth-file={{ kube_users_dir }}/known_users.csv + - --tls-cert-file={{ kube_cert_dir }}/apiserver.pem + - --tls-private-key-file={{ kube_cert_dir }}/apiserver-key.pem + - --service-account-key-file={{ kube_cert_dir }}/apiserver-key.pem + - --secure-port={{ kube_apiserver_port }} + - --token-auth-file={{ kube_token_dir }}/known_tokens.csv + - --v={{ kube_log_level | default('2') }} + - --allow-privileged=true + ports: + - containerPort: {{ kube_apiserver_port }} + hostPort: {{ kube_apiserver_port }} + name: https + - containerPort: {{ kube_apiserver_insecure_port }} + hostPort: {{ kube_apiserver_insecure_port }} + name: local + volumeMounts: + - mountPath: {{ kube_config_dir }} + name: kubernetes-config + readOnly: true + - mountPath: /etc/ssl/certs + name: ssl-certs-host + readOnly: true + volumes: + - hostPath: + path: {{ kube_config_dir }} + name: kubernetes-config + - hostPath: + path: /usr/share/ca-certificates + name: ssl-certs-host diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 new file mode 100644 index 000000000..17052f9f4 --- /dev/null +++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kube-controller-manager + namespace: kube-system +spec: + hostNetwork: true + containers: + - name: kube-controller-manager + image: {{ hyperkube_image.name }}:{{ hyperkube_image.tag }} + command: + - /hyperkube + - controller-manager + - --master=http://127.0.0.1:8080 + - --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem + - --root-ca-file={{ kube_cert_dir }}/ca.pem + - --v={{ kube_log_level | default('2') }} + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: 10252 + initialDelaySeconds: 15 + timeoutSeconds: 1 + volumeMounts: + - mountPath: {{ kube_cert_dir }} + name: ssl-certs-kubernetes + readOnly: true + - mountPath: /etc/ssl/certs + name: ssl-certs-host + readOnly: true + volumes: + - hostPath: + path: {{ kube_cert_dir }} + name: ssl-certs-kubernetes + - hostPath: + path: /usr/share/ca-certificates + name: ssl-certs-host diff --git a/roles/kubernetes/master/templates/manifests/kube-podmaster.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-podmaster.manifest.j2 new file mode 100644 index 000000000..a75fa3b32 --- /dev/null +++ b/roles/kubernetes/master/templates/manifests/kube-podmaster.manifest.j2 @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kube-podmaster + namespace: kube-system +spec: + hostNetwork: true + containers: + - name: scheduler-elector + image: gcr.io/google_containers/podmaster:1.1 + command: + - /podmaster + - --etcd-servers=http://127.0.0.1:2379 + - --key=scheduler + - --source-file={{ kube_config_dir}}/kube-scheduler.manifest + - --dest-file={{ kube_manifest_dir }}/kube-scheduler.manifest + volumeMounts: + - mountPath: {{ kube_config_dir }} + name: manifest-src + readOnly: true + - mountPath: {{ kube_manifest_dir }} + name: manifest-dst + - name: controller-manager-elector + image: gcr.io/google_containers/podmaster:1.1 + command: + - /podmaster + - --etcd-servers=http://127.0.0.1:2379 + - --key=controller + - --source-file={{ kube_config_dir }}/kube-controller-manager.manifest + - --dest-file={{ kube_manifest_dir }}/kube-controller-manager.manifest + terminationMessagePath: /dev/termination-log + volumeMounts: + - mountPath: {{ kube_config_dir }} + name: manifest-src + readOnly: true + - mountPath: {{ kube_manifest_dir }} + name: manifest-dst + volumes: + - hostPath: + path: {{ kube_config_dir }} + name: manifest-src + - hostPath: + path: {{ kube_manifest_dir }} + name: manifest-dst diff --git a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 new file mode 100644 index 000000000..7a595f2c6 --- /dev/null +++ b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kube-scheduler + namespace: kube-system +spec: + hostNetwork: true + containers: + - name: kube-scheduler + image: {{ hyperkube_image.name }}:{{ hyperkube_image.tag }} + command: + - /hyperkube + - scheduler + - --master=http://127.0.0.1:8080 + - --v={{ kube_log_level | default('2') }} + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: 10251 + initialDelaySeconds: 15 + timeoutSeconds: 1 diff --git a/roles/kubernetes/master/templates/proxy.j2 b/roles/kubernetes/master/templates/proxy.j2 deleted file mode 100644 index 33f811a53..000000000 --- a/roles/kubernetes/master/templates/proxy.j2 +++ /dev/null @@ -1,8 +0,0 @@ -### -# kubernetes proxy config - -# default config should be adequate - -# Add your own! -[Service] -Environment="KUBE_PROXY_ARGS=--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig --proxy-mode={{kube_proxy_mode}}" diff --git a/roles/kubernetes/master/templates/proxy.kubeconfig.j2 b/roles/kubernetes/master/templates/proxy.kubeconfig.j2 deleted file mode 100644 index 5e35eb5d2..000000000 --- a/roles/kubernetes/master/templates/proxy.kubeconfig.j2 +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Config -current-context: proxy-to-{{ cluster_name }} -preferences: {} -contexts: -- context: - cluster: {{ cluster_name }} - user: proxy - name: proxy-to-{{ cluster_name }} -clusters: -- cluster: - certificate-authority: {{ kube_cert_dir }}/ca.crt - server: http://{{ groups['kube-master'][0] }}:{{kube_master_insecure_port}} - name: {{ cluster_name }} -users: -- name: proxy - user: - token: {{ proxy_token }} diff --git a/roles/kubernetes/master/templates/scheduler.j2 b/roles/kubernetes/master/templates/scheduler.j2 deleted file mode 100644 index 8af898d0b..000000000 --- a/roles/kubernetes/master/templates/scheduler.j2 +++ /dev/null @@ -1,7 +0,0 @@ -### -# kubernetes scheduler config - -# default config should be adequate - -# Add your own! -KUBE_SCHEDULER_ARGS="--kubeconfig={{ kube_config_dir }}/scheduler.kubeconfig" diff --git a/roles/kubernetes/master/templates/scheduler.kubeconfig.j2 b/roles/kubernetes/master/templates/scheduler.kubeconfig.j2 deleted file mode 100644 index bc6203745..000000000 --- a/roles/kubernetes/master/templates/scheduler.kubeconfig.j2 +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Config -current-context: scheduler-to-{{ cluster_name }} -preferences: {} -clusters: -- cluster: - certificate-authority: {{ kube_cert_dir }}/ca.crt - server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }} - name: {{ cluster_name }} -contexts: -- context: - cluster: {{ cluster_name }} - user: scheduler - name: scheduler-to-{{ cluster_name }} -users: -- name: scheduler - user: - token: {{ scheduler_token }} diff --git a/roles/kubernetes/master/templates/systemd-init/kube-apiserver.service.j2 b/roles/kubernetes/master/templates/systemd-init/kube-apiserver.service.j2 deleted file mode 100644 index c2dd67484..000000000 --- a/roles/kubernetes/master/templates/systemd-init/kube-apiserver.service.j2 +++ /dev/null @@ -1,29 +0,0 @@ -[Unit] -Description=Kubernetes API Server -Documentation=https://github.com/GoogleCloudPlatform/kubernetes -Requires=etcd2.service -After=etcd2.service - -[Service] -EnvironmentFile=/etc/network-environment -EnvironmentFile=-/etc/kubernetes/config -EnvironmentFile=-/etc/kubernetes/apiserver -User=kube -ExecStart={{ bin_dir }}/kube-apiserver \ - $KUBE_LOGTOSTDERR \ - $KUBE_LOG_LEVEL \ - $KUBE_ETCD_SERVERS \ - $KUBE_API_ADDRESS \ - $KUBE_API_PORT \ - $KUBELET_PORT \ - $KUBE_ALLOW_PRIV \ - $KUBE_SERVICE_ADDRESSES \ - $KUBE_ADMISSION_CONTROL \ - $KUBE_RUNTIME_CONFIG \ - $KUBE_API_ARGS -Restart=on-failure -Type=notify -LimitNOFILE=65536 - -[Install] -WantedBy=multi-user.target diff --git a/roles/kubernetes/master/templates/systemd-init/kube-controller-manager.service.j2 b/roles/kubernetes/master/templates/systemd-init/kube-controller-manager.service.j2 deleted file mode 100644 index a308630eb..000000000 --- a/roles/kubernetes/master/templates/systemd-init/kube-controller-manager.service.j2 +++ /dev/null @@ -1,20 +0,0 @@ -[Unit] -Description=Kubernetes Controller Manager -Documentation=https://github.com/GoogleCloudPlatform/kubernetes -Requires=etcd2.service -After=etcd2.service - -[Service] -EnvironmentFile=-/etc/kubernetes/config -EnvironmentFile=-/etc/kubernetes/controller-manager -User=kube -ExecStart={{ bin_dir }}/kube-controller-manager \ - $KUBE_LOGTOSTDERR \ - $KUBE_LOG_LEVEL \ - $KUBE_MASTER \ - $KUBE_CONTROLLER_MANAGER_ARGS -Restart=on-failure -LimitNOFILE=65536 - -[Install] -WantedBy=multi-user.target diff --git a/roles/kubernetes/master/templates/systemd-init/kube-proxy.service.j2 b/roles/kubernetes/master/templates/systemd-init/kube-proxy.service.j2 deleted file mode 100644 index b1170c5d8..000000000 --- a/roles/kubernetes/master/templates/systemd-init/kube-proxy.service.j2 +++ /dev/null @@ -1,22 +0,0 @@ -[Unit] -Description=Kubernetes Kube-Proxy Server -Documentation=https://github.com/GoogleCloudPlatform/kubernetes -{% if kube_network_plugin is defined and kube_network_plugin == "calico" %} -After=docker.service calico-node.service -{% else %} -After=docker.service -{% endif %} - -[Service] -EnvironmentFile=/etc/kubernetes/config -EnvironmentFile=/etc/network-environment -ExecStart={{ bin_dir }}/kube-proxy \ - $KUBE_LOGTOSTDERR \ - $KUBE_LOG_LEVEL \ - $KUBE_MASTER \ - $KUBE_PROXY_ARGS -Restart=on-failure -LimitNOFILE=65536 - -[Install] -WantedBy=multi-user.target diff --git a/roles/kubernetes/master/templates/systemd-init/kube-scheduler.service.j2 b/roles/kubernetes/master/templates/systemd-init/kube-scheduler.service.j2 deleted file mode 100644 index c5d93111f..000000000 --- a/roles/kubernetes/master/templates/systemd-init/kube-scheduler.service.j2 +++ /dev/null @@ -1,20 +0,0 @@ -[Unit] -Description=Kubernetes Scheduler Plugin -Documentation=https://github.com/GoogleCloudPlatform/kubernetes -Requires=etcd2.service -After=etcd2.service - -[Service] -EnvironmentFile=-/etc/kubernetes/config -EnvironmentFile=-/etc/kubernetes/scheduler -User=kube -ExecStart={{ bin_dir }}/kube-scheduler \ - $KUBE_LOGTOSTDERR \ - $KUBE_LOG_LEVEL \ - $KUBE_MASTER \ - $KUBE_SCHEDULER_ARGS -Restart=on-failure -LimitNOFILE=65536 - -[Install] -WantedBy=multi-user.target From fd0e5e756ed2b1f12ccbaf3825eb436b597c0210 Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Fri, 11 Dec 2015 11:53:42 +0100 Subject: [PATCH 12/34] Update README, new versions --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 5a98ccda7..e3fae11cb 100644 --- a/README.md +++ b/README.md @@ -14,9 +14,9 @@ Ansible v1.9.x ### Components * [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.1.2 * [etcd](https://github.com/coreos/etcd/releases) v2.2.2 -* [calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.11.0 +* [calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.12.0 * [flanneld](https://github.com/coreos/flannel/releases) v0.5.5 -* [docker](https://www.docker.com/) v1.8.3 +* [docker](https://www.docker.com/) v1.9.1 Ansible From eb4dd5f19d63904bb30671d493cae759e4ae4c40 Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Sat, 12 Dec 2015 18:22:22 +0100 Subject: [PATCH 13/34] update kubectl bash completion --- .../master/files/kubectl_bash_completion.sh | 824 +++++++++++++++++- 1 file changed, 818 insertions(+), 6 deletions(-) diff --git a/roles/kubernetes/master/files/kubectl_bash_completion.sh b/roles/kubernetes/master/files/kubectl_bash_completion.sh index 899770530..f6d0f25b5 100644 --- a/roles/kubernetes/master/files/kubectl_bash_completion.sh +++ b/roles/kubernetes/master/files/kubectl_bash_completion.sh @@ -41,7 +41,9 @@ __handle_reply() __debug "${FUNCNAME}" case $cur in -*) - compopt -o nospace + if [[ $(type -t compopt) = "builtin" ]]; then + compopt -o nospace + fi local allflags if [ ${#must_have_one_flag[@]} -ne 0 ]; then allflags=("${must_have_one_flag[@]}") @@ -49,7 +51,9 @@ __handle_reply() allflags=("${flags[*]} ${two_word_flags[*]}") fi COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") ) - [[ $COMPREPLY == *= ]] || compopt +o nospace + if [[ $(type -t compopt) = "builtin" ]]; then + [[ $COMPREPLY == *= ]] || compopt +o nospace + fi return 0; ;; esac @@ -156,11 +160,11 @@ __handle_word() { if [[ $c -ge $cword ]]; then __handle_reply - return + return fi __debug "${FUNCNAME}: c is $c words[c] is ${words[c]}" if [[ "${words[c]}" == -* ]]; then - __handle_flag + __handle_flag elif __contains_word "${words[c]}" "${commands[@]}"; then __handle_command else @@ -283,6 +287,30 @@ _kubectl_get() flags+=("--watch") flags+=("-w") flags+=("--watch-only") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_noun=() @@ -327,6 +355,30 @@ _kubectl_describe() flags_completion+=("__handle_filename_extension_flag json|yaml|yml") flags+=("--selector=") two_word_flags+=("-l") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_noun=() @@ -370,6 +422,30 @@ _kubectl_create() flags+=("--save-config") flags+=("--schema-cache-dir=") flags+=("--validate") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_flag+=("--filename=") @@ -402,6 +478,30 @@ _kubectl_replace() flags+=("--schema-cache-dir=") flags+=("--timeout=") flags+=("--validate") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_flag+=("--filename=") @@ -429,6 +529,30 @@ _kubectl_patch() two_word_flags+=("-o") flags+=("--patch=") two_word_flags+=("-p") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_flag+=("--patch=") @@ -461,6 +585,30 @@ _kubectl_delete() flags+=("--selector=") two_word_flags+=("-l") flags+=("--timeout=") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_noun=() @@ -508,6 +656,30 @@ _kubectl_edit() flags+=("--output-version=") flags+=("--save-config") flags+=("--windows-line-endings") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_noun=() @@ -533,6 +705,30 @@ _kubectl_apply() two_word_flags+=("-o") flags+=("--schema-cache-dir=") flags+=("--validate") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_flag+=("--filename=") @@ -550,6 +746,30 @@ _kubectl_namespace() flags_with_completion=() flags_completion=() + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_noun=() @@ -577,6 +797,30 @@ _kubectl_logs() flags+=("--since-time=") flags+=("--tail=") flags+=("--timestamps") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_noun=() @@ -592,6 +836,7 @@ _kubectl_rolling-update() flags_with_completion=() flags_completion=() + flags+=("--container=") flags+=("--deployment-label-key=") flags+=("--dry-run") flags+=("--filename=") @@ -616,6 +861,30 @@ _kubectl_rolling-update() flags+=("--timeout=") flags+=("--update-period=") flags+=("--validate") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_flag+=("--filename=") @@ -646,6 +915,30 @@ _kubectl_scale() flags+=("--replicas=") flags+=("--resource-version=") flags+=("--timeout=") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_flag+=("--replicas=") @@ -668,6 +961,30 @@ _kubectl_attach() flags+=("-i") flags+=("--tty") flags+=("-t") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_noun=() @@ -691,6 +1008,30 @@ _kubectl_exec() flags+=("-i") flags+=("--tty") flags+=("-t") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_noun=() @@ -708,6 +1049,30 @@ _kubectl_port-forward() flags+=("--pod=") two_word_flags+=("-p") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_noun=() @@ -738,6 +1103,30 @@ _kubectl_proxy() two_word_flags+=("-w") flags+=("--www-prefix=") two_word_flags+=("-P") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_noun=() @@ -786,6 +1175,30 @@ _kubectl_run() flags+=("--template=") two_word_flags+=("-t") flags+=("--tty") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_flag+=("--image=") @@ -834,6 +1247,30 @@ _kubectl_expose() flags+=("--template=") two_word_flags+=("-t") flags+=("--type=") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_noun=() @@ -871,6 +1308,30 @@ _kubectl_autoscale() flags+=("--sort-by=") flags+=("--template=") two_word_flags+=("-t") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_flag+=("--max=") @@ -908,6 +1369,30 @@ _kubectl_label() flags+=("--sort-by=") flags+=("--template=") two_word_flags+=("-t") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_noun=() @@ -964,6 +1449,30 @@ _kubectl_annotate() flags+=("--sort-by=") flags+=("--template=") two_word_flags+=("-t") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_noun=() @@ -992,6 +1501,30 @@ _kubectl_config_view() flags+=("--sort-by=") flags+=("--template=") two_word_flags+=("-t") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_noun=() @@ -1012,6 +1545,25 @@ _kubectl_config_set-cluster() flags+=("--embed-certs") flags+=("--insecure-skip-tls-verify") flags+=("--server=") + flags+=("--alsologtostderr") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_noun=() @@ -1033,6 +1585,25 @@ _kubectl_config_set-credentials() flags+=("--password=") flags+=("--token=") flags+=("--username=") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--user=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_noun=() @@ -1051,6 +1622,27 @@ _kubectl_config_set-context() flags+=("--cluster=") flags+=("--namespace=") flags+=("--user=") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_noun=() @@ -1066,6 +1658,30 @@ _kubectl_config_set() flags_with_completion=() flags_completion=() + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_noun=() @@ -1081,6 +1697,30 @@ _kubectl_config_unset() flags_with_completion=() flags_completion=() + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_noun=() @@ -1096,6 +1736,30 @@ _kubectl_config_use-context() flags_with_completion=() flags_completion=() + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_noun=() @@ -1119,6 +1783,29 @@ _kubectl_config() flags_completion=() flags+=("--kubeconfig=") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_noun=() @@ -1134,6 +1821,30 @@ _kubectl_cluster-info() flags_with_completion=() flags_completion=() + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_noun=() @@ -1149,6 +1860,30 @@ _kubectl_api-versions() flags_with_completion=() flags_completion=() + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_noun=() @@ -1166,6 +1901,30 @@ _kubectl_version() flags+=("--client") flags+=("-c") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_noun=() @@ -1182,6 +1941,30 @@ _kubectl_explain() flags_completion=() flags+=("--recursive") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_noun=() @@ -1215,6 +1998,30 @@ _kubectl_convert() flags+=("--template=") two_word_flags+=("-t") flags+=("--validate") + flags+=("--alsologtostderr") + flags+=("--api-version=") + flags+=("--certificate-authority=") + flags+=("--client-certificate=") + flags+=("--client-key=") + flags+=("--cluster=") + flags+=("--context=") + flags+=("--insecure-skip-tls-verify") + flags+=("--kubeconfig=") + flags+=("--log-backtrace-at=") + flags+=("--log-dir=") + flags+=("--log-flush-frequency=") + flags+=("--logtostderr") + flags+=("--match-server-version") + flags+=("--namespace=") + flags+=("--password=") + flags+=("--server=") + two_word_flags+=("-s") + flags+=("--stderrthreshold=") + flags+=("--token=") + flags+=("--user=") + flags+=("--username=") + flags+=("--v=") + flags+=("--vmodule=") must_have_one_flag=() must_have_one_flag+=("--filename=") @@ -1291,7 +2098,7 @@ _kubectl() __start_kubectl() { local cur prev words cword - if declare -F _init_completions >/dev/null 2>&1; then + if declare -F _init_completion >/dev/null 2>&1; then _init_completion -s || return else __my_init_completion || return @@ -1311,5 +2118,10 @@ __start_kubectl() __handle_word } -complete -F __start_kubectl kubectl +if [[ $(type -t compopt) = "builtin" ]]; then + complete -F __start_kubectl kubectl +else + complete -o nospace -F __start_kubectl kubectl +fi + # ex: ts=4 sw=4 et filetype=sh From 1568cbe8e9a65ccdb9075ad0b2e6f2ce0729bba4 Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Sat, 12 Dec 2015 18:24:21 +0100 Subject: [PATCH 14/34] optionnal api runtime extensions --- .../master/templates/manifests/kube-apiserver.manifest.j2 | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 320594fa4..8ada4bc3e 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -20,6 +20,11 @@ spec: - --tls-private-key-file={{ kube_cert_dir }}/apiserver-key.pem - --service-account-key-file={{ kube_cert_dir }}/apiserver-key.pem - --secure-port={{ kube_apiserver_port }} +{% if kube_api_runtime_config is defined %} +{% for conf in kube_api_runtime_config %} + - --runtime-config={{ conf }} +{% endfor %} +{% endif %} - --token-auth-file={{ kube_token_dir }}/known_tokens.csv - --v={{ kube_log_level | default('2') }} - --allow-privileged=true From 3cbcd6f18983765f7a2c6aff5bae3fedd6a688f6 Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Sat, 12 Dec 2015 19:06:57 +0100 Subject: [PATCH 15/34] Calico uses the loadbalancer to reach etcd if 'loadbalancer_address' is defined. The loadbalancer has to be configured first --- roles/network_plugin/tasks/calico.yml | 2 +- roles/network_plugin/templates/network-environment.j2 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/network_plugin/tasks/calico.yml b/roles/network_plugin/tasks/calico.yml index 818dab709..c507d66e1 100644 --- a/roles/network_plugin/tasks/calico.yml +++ b/roles/network_plugin/tasks/calico.yml @@ -12,7 +12,7 @@ - name: Calico | Configure calico-node desired pool shell: calicoctl pool add {{ kube_pods_subnet }} environment: - ETCD_AUTHORITY: "{{ groups['kube-master'][0] }}:2379" + ETCD_AUTHORITY: "{{ loadbalancer_address | default(groups['kube-master'][0]) }}:2379" run_once: true - name: Calico | Write calico-node systemd init file diff --git a/roles/network_plugin/templates/network-environment.j2 b/roles/network_plugin/templates/network-environment.j2 index 2b89eb7e8..53ab5f15a 100755 --- a/roles/network_plugin/templates/network-environment.j2 +++ b/roles/network_plugin/templates/network-environment.j2 @@ -10,7 +10,7 @@ KUBERNETES_MASTER={{ groups['kube-master'][0] }} # Location of etcd cluster used by Calico. By default, this uses the etcd # instance running on the Kubernetes Master -ETCD_AUTHORITY={{ groups['kube-master'][0] }}:2379 +ETCD_AUTHORITY={{ loadbalancer_address | default(groups['kube-master'][0]) }}:2379 # The kubernetes-apiserver location - used by the calico plugin KUBE_API_ROOT=http://{{ groups['kube-master'][0] }}:{{kube_apiserver_insecure_port}}/api/v1/ From af9b9458747b712a30e36364e2b4f81b54e30be2 Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Sat, 12 Dec 2015 19:09:19 +0100 Subject: [PATCH 16/34] add the loadbalancer address to ssl certs --- roles/kubernetes/node/templates/openssl.conf.j2 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/kubernetes/node/templates/openssl.conf.j2 b/roles/kubernetes/node/templates/openssl.conf.j2 index 05015651f..ce2b88e9c 100644 --- a/roles/kubernetes/node/templates/openssl.conf.j2 +++ b/roles/kubernetes/node/templates/openssl.conf.j2 @@ -15,7 +15,7 @@ IP.{{ loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_ {% endfor %} {% set idx = groups['kube-master'] | length | int + 1 %} IP.{{ idx | string }} = {{ kube_apiserver_ip }} -{% if kube_loadbalancer_ip is defined | default('') %} +{% if loadbalancer_address is defined | default('') %} {% set idx = idx | int + 1 %} -IP.{{ idx | string }} = {{ kube_loadbalancer }} +IP.{{ idx | string }} = {{ loadbalancer_address }} {% endif %} From 0a1b92f34829cd926def188d1965b6bf3626286f Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Sat, 12 Dec 2015 19:11:09 +0100 Subject: [PATCH 17/34] cluster log level variable 'kube_log_level' --- roles/kubernetes/node/templates/config.j2 | 2 +- roles/kubernetes/node/templates/kubelet.j2 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/kubernetes/node/templates/config.j2 b/roles/kubernetes/node/templates/config.j2 index f68dffb3d..03752e1c9 100644 --- a/roles/kubernetes/node/templates/config.j2 +++ b/roles/kubernetes/node/templates/config.j2 @@ -17,7 +17,7 @@ KUBE_LOGTOSTDERR="--logtostderr=true" # journal message level, 0 is debug -KUBE_LOG_LEVEL="{{ kube_log_level | default('--v=2') }}" +KUBE_LOG_LEVEL="--v={{ kube_log_level | default('2') }}" # Should this cluster be allowed to run privileged docker containers KUBE_ALLOW_PRIV="--allow_privileged=true" diff --git a/roles/kubernetes/node/templates/kubelet.j2 b/roles/kubernetes/node/templates/kubelet.j2 index b062a055a..0db8483d1 100644 --- a/roles/kubernetes/node/templates/kubelet.j2 +++ b/roles/kubernetes/node/templates/kubelet.j2 @@ -1,6 +1,6 @@ [Service] Environment="KUBE_LOGTOSTDERR=--logtostderr=true" -Environment="KUBE_LOG_LEVEL={{ kube_log_level | default('--v=2') }}" +Environment="KUBE_LOG_LEVEL=--v={{ kube_log_level | default('2') }}" Environment="KUBE_ALLOW_PRIV=--allow_privileged=true" {% if inventory_hostname in groups['kube-master'] %} Environment="KUBELET_API_SERVER=--api_servers=http://{{ hostvars[inventory_hostname]['ip'] | default(hostvars[inventory_hostname]['ansible_default_ipv4']['address']) }}:{{ kube_apiserver_insecure_port }}" From 59994a6df113614d35d869ff00a5aaed7a1110ef Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Sat, 12 Dec 2015 19:32:18 +0100 Subject: [PATCH 18/34] Quickstart documentation --- README.md | 35 ++++++++++++++-- .../{production => test}/group_vars/all.yml | 41 ++++++++++--------- environments/test/inventory.example | 34 +++++++++++++++ roles/kubernetes/node/defaults/main.yml | 6 +-- 4 files changed, 91 insertions(+), 25 deletions(-) rename environments/{production => test}/group_vars/all.yml (75%) create mode 100644 environments/test/inventory.example diff --git a/README.md b/README.md index e3fae11cb..20ba6a65a 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,33 @@ Ansible v1.9.x * [flanneld](https://github.com/coreos/flannel/releases) v0.5.5 * [docker](https://www.docker.com/) v1.9.1 +Quickstart +------------------------- +The following steps will quickly setup a kubernetes cluster with default configuration. +These defaults are good for a test purposes. + +Edit the inventory according to the number of servers +``` +[downloader] +10.115.99.1 + +[kube-master] +10.115.99.31 + +[kube-node] +10.115.99.32 +10.115.99.33 + +[k8s-cluster:children] +kube-node +kube-master +``` + +Run the playbook +``` +ansible-playbook -i environments/production/inventory cluster.yml -u root +``` + Ansible ------------------------- @@ -44,11 +71,10 @@ In node-mesh mode the nodes peers with all the nodes in order to exchange routes [kube-master] 10.99.0.26 - -[etcd] -10.99.0.26 +10.99.0.59 [kube-node] +10.99.0.59 10.99.0.4 10.99.0.5 10.99.0.36 @@ -60,6 +86,7 @@ In node-mesh mode the nodes peers with all the nodes in order to exchange routes 10.99.0.5 local_as=xxxxxxxx [usa] +10.99.0.59 local_as=xxxxxxxx 10.99.0.36 local_as=xxxxxxxx 10.99.0.37 local_as=xxxxxxxx @@ -69,9 +96,11 @@ kube-master [paris:vars] peers=[{"router_id": "10.99.0.2", "as": "65xxx"}, {"router_id": "10.99.0.3", "as": "65xxx"}] +loadbalancer_address="10.99.0.24" [usa:vars] peers=[{"router_id": "10.99.0.34", "as": "65xxx"}, {"router_id": "10.99.0.35", "as": "65xxx"}] +loadbalancer_address="10.99.0.44" ``` ### Playbook diff --git a/environments/production/group_vars/all.yml b/environments/test/group_vars/all.yml similarity index 75% rename from environments/production/group_vars/all.yml rename to environments/test/group_vars/all.yml index 581fa1434..5dc380898 100644 --- a/environments/production/group_vars/all.yml +++ b/environments/test/group_vars/all.yml @@ -5,28 +5,31 @@ bin_dir: /usr/local/bin # Note: ensure that you've enough disk space (about 1G) local_release_dir: "/tmp/releases" +# Cluster Loglevel configuration +kube_log_level: 2 + # Users to create for basic auth in Kubernetes API via HTTP -# kube_users: -# kube: -# pass: changeme -# role: admin +kube_users: + kube: + pass: changeme + role: admin # root: # pass: changeme # role: admin # Kubernetes cluster name, also will be used as DNS domain -# cluster_name: cluster.local +cluster_name: cluster.local # set this variable to calico if needed. keep it empty if flannel is used -# kube_network_plugin: calico +kube_network_plugin: calico # Kubernetes internal network for services, unused block of space. -# kube_service_addresses: 10.233.0.0/18 +kube_service_addresses: 10.233.0.0/18 # internal network. When used, it will assign IP # addresses from this range to individual pods. # This network must be unused in your network infrastructure! -# kube_pods_subnet: 10.233.64.0/18 +kube_pods_subnet: 10.233.64.0/18 # internal network total size (optional). This is the prefix of the # entire network. Must be unused in your environment. @@ -35,17 +38,17 @@ local_release_dir: "/tmp/releases" # internal network node size allocation (optional). This is the size allocated # to each node on your network. With these defaults you should have # room for 4096 nodes with 254 pods per node. -# kube_network_node_prefix: 24 +kube_network_node_prefix: 24 # With calico it is possible to distributed routes with border routers of the datacenter. -# peer_with_router: false +peer_with_router: false # Warning : enabling router peering will disable calico's default behavior ('node mesh'). # The subnets of each nodes will be distributed by the datacenter router # The port the API Server will be listening on. -# kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" -# kube_apiserver_port: 443 # (https) -# kube_apiserver_insecure_port: 8080 # (http) +kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}" +kube_apiserver_port: 443 # (https) +kube_apiserver_insecure_port: 8080 # (http) # Internal DNS configuration. # Kubernetes can create and mainatain its own DNS server to resolve service names @@ -56,13 +59,13 @@ local_release_dir: "/tmp/releases" # Kubernetes won't do this for you (yet). # Upstream dns servers used by dnsmasq -# upstream_dns_servers: -# - 8.8.8.8 -# - 4.4.8.8 +upstream_dns_servers: + - 8.8.8.8 + - 4.4.8.8 # # # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md -# dns_setup: true -# dns_domain: "{{ cluster_name }}" +dns_setup: true +dns_domain: "{{ cluster_name }}" # # # Ip address of the kubernetes dns service -# dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address') }}" +dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address') }}" diff --git a/environments/test/inventory.example b/environments/test/inventory.example new file mode 100644 index 000000000..35730d191 --- /dev/null +++ b/environments/test/inventory.example @@ -0,0 +1,34 @@ +[downloader] +10.99.0.26 + +[kube-master] +10.99.0.26 +10.99.0.27 + +[kube-node] +10.99.0.27 +10.99.0.4 +10.99.0.5 +10.99.0.36 +10.99.0.37 + +[paris] +10.99.0.26 +10.99.0.4 local_as=xxxxxxxx +10.99.0.5 local_as=xxxxxxxx + +[usa] +10.99.0.36 local_as=xxxxxxxx +10.99.0.37 local_as=xxxxxxxx + +[k8s-cluster:children] +kube-node +kube-master + +[paris:vars] +peers=[{"router_id": "10.99.0.2", "as": "65xxx"}, {"router_id": "10.99.0.3", "as": "65xxx"}] +loadbalancer_address="10.99.0.24" + +[usa:vars] +peers=[{"router_id": "10.99.0.34", "as": "65xxx"}, {"router_id": "10.99.0.35", "as": "65xxx"}] +loadbalancer_address="10.99.0.44" diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml index d48d72b66..209a2420f 100644 --- a/roles/kubernetes/node/defaults/main.yml +++ b/roles/kubernetes/node/defaults/main.yml @@ -43,6 +43,6 @@ hyperkube_image: # pick the 10th ip address in the kube_service_addresses range and use that. dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address') }}" -# kube_api_runtime_config: -# - extensions/v1beta1/daemonsets=true -# - extensions/v1beta1/deployments=true +kube_api_runtime_config: + - extensions/v1beta1/daemonsets=true + - extensions/v1beta1/deployments=true From 9862afb0975defce5734874bdabf20dc3f20f205 Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Sun, 13 Dec 2015 16:41:18 +0100 Subject: [PATCH 19/34] Upgrade kubernetes to v1.1.3 --- README.md | 2 +- roles/download/defaults/main.yml | 6 +++--- roles/kubernetes/master/tasks/main.yml | 1 + roles/kubernetes/node/defaults/main.yml | 6 ++++-- 4 files changed, 9 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 20ba6a65a..62fecf202 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ The firewalls are not managed, you'll need to implement your own rules the way y Ansible v1.9.x ### Components -* [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.1.2 +* [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.1.3 * [etcd](https://github.com/coreos/etcd/releases) v2.2.2 * [calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.12.0 * [flanneld](https://github.com/coreos/flannel/releases) v0.5.5 diff --git a/roles/download/defaults/main.yml b/roles/download/defaults/main.yml index d21b43752..a4739f0fc 100644 --- a/roles/download/defaults/main.yml +++ b/roles/download/defaults/main.yml @@ -2,9 +2,9 @@ etcd_version: v2.2.2 flannel_version: 0.5.5 -kube_version: v1.1.2 -kubectl_checksum: "e0585c6e63f796d87b34cd1f16554892a49421b98a2862a896b2b7ebf1439ace" -kubelet_checksum: "6c029d34888e1ec4b9ab4b500b0712388984340488c5f3c19e2c759d1003cbff" +kube_version: v1.1.3 +kubectl_checksum: "01b9bea18061a27b1cf30e34fd8ab45cfc096c9a9d57d0ed21072abb40dd3d1d" +kubelet_checksum: "62191c66f2d670dd52ddf1d88ef81048977abf1ffaa95ee6333299447eb6a482" calico_version: v0.12.0 diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml index 12459956a..f6955f689 100644 --- a/roles/kubernetes/master/tasks/main.yml +++ b/roles/kubernetes/master/tasks/main.yml @@ -48,6 +48,7 @@ - name: wait for the apiserver to be running (pulling image and running container) wait_for: port: 8080 + delay: 10 - name: install required python module 'httplib2' apt: diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml index 209a2420f..fdea54b40 100644 --- a/roles/kubernetes/node/defaults/main.yml +++ b/roles/kubernetes/node/defaults/main.yml @@ -33,8 +33,10 @@ dns_domain: "{{ cluster_name }}" kube_proxy_mode: userspace hyperkube_image: - name: gcr.io/google_containers/hyperkube - tag: v1.1.2 +# Temporary image, waiting for official google release +# name: gcr.io/google_containers/hyperkube + name: quay.io/smana/hyperkube + tag: v1.1.3 # IP address of the DNS server. # Kubernetes will create a pod with several containers, serving as the DNS From f90830973904a11444394b689546fa53082ac3d9 Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Sun, 13 Dec 2015 16:59:22 +0100 Subject: [PATCH 20/34] update README with multi-master notes --- README.md | 32 ++++++++++++++++++++++++-------- 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 62fecf202..779d6a2d9 100644 --- a/README.md +++ b/README.md @@ -6,8 +6,9 @@ Based on [CiscoCloud](https://github.com/CiscoCloud/kubernetes-ansible) work. ### Requirements Tested on **Debian Jessie** and **Ubuntu** (14.10, 15.04, 15.10). -The target servers must have access to the Internet in order to pull docker imaqes. -The firewalls are not managed, you'll need to implement your own rules the way you used to. +* The target servers must have access to the Internet in order to pull docker imaqes. +* The firewalls are not managed, you'll need to implement your own rules the way you used to. +* the following packages are required: openssl, curl, dnsmasq, python-httplib2 on remote servers and python-ipaddr on deployment machine. Ansible v1.9.x @@ -21,7 +22,7 @@ Ansible v1.9.x Quickstart ------------------------- The following steps will quickly setup a kubernetes cluster with default configuration. -These defaults are good for a test purposes. +These defaults are good for tests purposes. Edit the inventory according to the number of servers ``` @@ -45,6 +46,8 @@ Run the playbook ansible-playbook -i environments/production/inventory cluster.yml -u root ``` +You can jump directly to "*Available apps, installation procedure*" + Ansible ------------------------- @@ -111,20 +114,25 @@ loadbalancer_address="10.99.0.44" roles: - { role: download, tags: download } -- hosts: k8s-cluster +# etcd must be running on master(s) before going on +- hosts: kube-master roles: - { role: etcd, tags: etcd } + +- hosts: k8s-cluster + roles: - { role: docker, tags: docker } - - { role: network_plugin, tags: ['calico', 'flannel', 'network'] } - { role: dnsmasq, tags: dnsmasq } + - { role: network_plugin, tags: ['calico', 'flannel', 'network'] } + +- hosts: kube-node + roles: + - { role: kubernetes/node, tags: node } - hosts: kube-master roles: - { role: kubernetes/master, tags: master } -- hosts: kube-node - roles: - - { role: kubernetes/node, tags: node } ``` ### Run @@ -136,6 +144,14 @@ ansible-playbook -i environments/dev/inventory cluster.yml -u root Kubernetes ------------------------- +### Multi master notes +* You can choose where to install the master components. If you want your master node to act both as master (api,scheduler,controller) and node (e.g. accept workloads, create pods ...), +the server address has to be present on both groups 'kube-master' and 'kube-node'. + +* Almost all kubernetes components are running into pods except *kubelet*. These pods are managed by kubelet which ensure they're always running + +* One etcd cluster member per node will be configured. For safety reasons, you should have at least two master nodes. + ### Network Overlay You can choose between 2 network plugins. Only one must be chosen. From 5efc09710b2e4af79af501d64489a9937721c66f Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Mon, 14 Dec 2015 09:54:58 +0100 Subject: [PATCH 21/34] Renaming hyperkube image vars --- .../master/templates/manifests/kube-apiserver.manifest.j2 | 2 +- .../manifests/kube-controller-manager.manifest.j2 | 2 +- .../master/templates/manifests/kube-scheduler.manifest.j2 | 2 +- roles/kubernetes/node/defaults/main.yml | 7 +++---- .../node/templates/manifests/kube-proxy.manifest.j2 | 2 +- 5 files changed, 7 insertions(+), 8 deletions(-) diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 8ada4bc3e..666d61759 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -6,7 +6,7 @@ spec: hostNetwork: true containers: - name: kube-apiserver - image: {{ hyperkube_image.name }}:{{ hyperkube_image.tag }} + image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} command: - /hyperkube - apiserver diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 index 17052f9f4..1b98ecbc8 100644 --- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 @@ -7,7 +7,7 @@ spec: hostNetwork: true containers: - name: kube-controller-manager - image: {{ hyperkube_image.name }}:{{ hyperkube_image.tag }} + image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} command: - /hyperkube - controller-manager diff --git a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 index 7a595f2c6..0a0efdcb0 100644 --- a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 @@ -7,7 +7,7 @@ spec: hostNetwork: true containers: - name: kube-scheduler - image: {{ hyperkube_image.name }}:{{ hyperkube_image.tag }} + image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} command: - /hyperkube - scheduler diff --git a/roles/kubernetes/node/defaults/main.yml b/roles/kubernetes/node/defaults/main.yml index fdea54b40..a098e0c11 100644 --- a/roles/kubernetes/node/defaults/main.yml +++ b/roles/kubernetes/node/defaults/main.yml @@ -32,11 +32,10 @@ dns_domain: "{{ cluster_name }}" kube_proxy_mode: userspace -hyperkube_image: # Temporary image, waiting for official google release -# name: gcr.io/google_containers/hyperkube - name: quay.io/smana/hyperkube - tag: v1.1.3 +# hyperkube_image_repo: gcr.io/google_containers/hyperkube +hyperkube_image_repo: quay.io/smana/hyperkube +hyperkube_image_tag: v1.1.3 # IP address of the DNS server. # Kubernetes will create a pod with several containers, serving as the DNS diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 index 5d8aef5c0..32c52e7d9 100644 --- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 @@ -7,7 +7,7 @@ spec: hostNetwork: true containers: - name: kube-proxy - image: {{ hyperkube_image.name }}:{{ hyperkube_image.tag }} + image: {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} command: - /hyperkube - proxy From 2fc8b46996d88f389154c2bf11244b7e99ca54fe Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Mon, 14 Dec 2015 10:39:13 +0100 Subject: [PATCH 22/34] etcd can run on a distinct cluster --- README.md | 10 +++++++++- cluster.yml | 2 +- .../templates/manifests/kube-apiserver.manifest.j2 | 3 ++- .../templates/manifests/kube-podmaster.manifest.j2 | 6 ++++-- 4 files changed, 16 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 779d6a2d9..fd8f41668 100644 --- a/README.md +++ b/README.md @@ -32,6 +32,10 @@ Edit the inventory according to the number of servers [kube-master] 10.115.99.31 +[etcd] +10.115.99.31 +10.115.99.32 + [kube-node] 10.115.99.32 10.115.99.33 @@ -76,6 +80,10 @@ In node-mesh mode the nodes peers with all the nodes in order to exchange routes 10.99.0.26 10.99.0.59 +[etcd] +10.99.0.26 +10.99.0.59 + [kube-node] 10.99.0.59 10.99.0.4 @@ -115,7 +123,7 @@ loadbalancer_address="10.99.0.44" - { role: download, tags: download } # etcd must be running on master(s) before going on -- hosts: kube-master +- hosts: etcd roles: - { role: etcd, tags: etcd } diff --git a/cluster.yml b/cluster.yml index 8e0792a2b..d411f381c 100644 --- a/cluster.yml +++ b/cluster.yml @@ -5,7 +5,7 @@ - { role: download, tags: download } # etcd must be running on master(s) before going on -- hosts: kube-master +- hosts: etcd roles: - { role: etcd, tags: etcd } diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 666d61759..37f65357b 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -11,7 +11,8 @@ spec: - /hyperkube - apiserver - --insecure-bind-address=0.0.0.0 - - --etcd-servers=http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2379 + - --etcd-servers={% for srv in groups['etcd'] %}http://{{ srv }}:2379{% if not loop.last %},{% endif %}{% endfor %} + - --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota - --service-cluster-ip-range={{ kube_service_addresses }} - --client-ca-file={{ kube_cert_dir }}/ca.pem diff --git a/roles/kubernetes/master/templates/manifests/kube-podmaster.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-podmaster.manifest.j2 index a75fa3b32..86447badf 100644 --- a/roles/kubernetes/master/templates/manifests/kube-podmaster.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-podmaster.manifest.j2 @@ -10,7 +10,8 @@ spec: image: gcr.io/google_containers/podmaster:1.1 command: - /podmaster - - --etcd-servers=http://127.0.0.1:2379 + - --etcd-servers={% for srv in groups['etcd'] %}http://{{ srv }}:2379{% if not loop.last %},{% endif %}{% endfor %} + - --key=scheduler - --source-file={{ kube_config_dir}}/kube-scheduler.manifest - --dest-file={{ kube_manifest_dir }}/kube-scheduler.manifest @@ -24,7 +25,8 @@ spec: image: gcr.io/google_containers/podmaster:1.1 command: - /podmaster - - --etcd-servers=http://127.0.0.1:2379 + - --etcd-servers={% for srv in groups['etcd'] %}http://{{ srv }}:2379{% if not loop.last %},{% endif %}{% endfor %} + - --key=controller - --source-file={{ kube_config_dir }}/kube-controller-manager.manifest - --dest-file={{ kube_manifest_dir }}/kube-controller-manager.manifest From e2984b4fdb7d69571346663c965bc1686a050493 Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Tue, 15 Dec 2015 11:49:11 +0100 Subject: [PATCH 23/34] ha etcd with calico --- cluster.yml | 6 +----- roles/etcd/handlers/main.yml | 19 +++++++++---------- roles/etcd/tasks/configure.yml | 11 +++++++++-- roles/etcd/tasks/install.yml | 10 +--------- roles/etcd/templates/etcd2.j2 | 11 ++++++++--- roles/etcd/templates/systemd-etcd2.service.j2 | 4 ++++ roles/network_plugin/tasks/calico.yml | 2 +- .../templates/network-environment.j2 | 8 +++++--- 8 files changed, 38 insertions(+), 33 deletions(-) diff --git a/cluster.yml b/cluster.yml index d411f381c..ef91f27ac 100644 --- a/cluster.yml +++ b/cluster.yml @@ -4,13 +4,9 @@ roles: - { role: download, tags: download } -# etcd must be running on master(s) before going on -- hosts: etcd - roles: - - { role: etcd, tags: etcd } - - hosts: k8s-cluster roles: + - { role: etcd, tags: etcd } - { role: docker, tags: docker } - { role: dnsmasq, tags: dnsmasq } - { role: network_plugin, tags: ['calico', 'flannel', 'network'] } diff --git a/roles/etcd/handlers/main.yml b/roles/etcd/handlers/main.yml index af2442abf..67334a353 100644 --- a/roles/etcd/handlers/main.yml +++ b/roles/etcd/handlers/main.yml @@ -1,15 +1,14 @@ --- -- name: restart daemons - command: /bin/true - notify: - - reload systemd - - restart etcd2 - - name: reload systemd command: systemctl daemon-reload -- name: restart etcd2 - service: name=etcd2 state=restarted +- name: restart reloaded-etcd2 + service: + name: etcd2 + state: restarted -- name: Save iptables rules - command: service iptables save +- name: restart etcd2 + command: /bin/true + notify: + - reload systemd + - restart reloaded-etcd2 diff --git a/roles/etcd/tasks/configure.yml b/roles/etcd/tasks/configure.yml index 18a2cc882..5aea90f44 100644 --- a/roles/etcd/tasks/configure.yml +++ b/roles/etcd/tasks/configure.yml @@ -1,11 +1,18 @@ --- +- name: Copy etcd2.service systemd file + template: + src: systemd-etcd2.service.j2 + dest: /lib/systemd/system/etcd2.service + backup: yes + notify: + - restart etcd2 + - name: Create etcd2 environment vars dir file: path=/etc/systemd/system/etcd2.service.d state=directory - name: Write etcd2 config file - template: src=etcd2.j2 dest=/etc/systemd/system/etcd2.service.d/10-etcd2-cluster.conf backup=yes + template: src=etcd2.j2 dest=/etc/systemd/system/etcd2.service.d/10-etcd2.conf backup=yes notify: - - reload systemd - restart etcd2 - name: Ensure etcd2 is running diff --git a/roles/etcd/tasks/install.yml b/roles/etcd/tasks/install.yml index b500d88ed..f02dc93db 100644 --- a/roles/etcd/tasks/install.yml +++ b/roles/etcd/tasks/install.yml @@ -11,15 +11,7 @@ with_items: - etcdctl - etcd - notify: - - restart daemons + notify: restart etcd2 - name: Create etcd2 binary symlink file: src=/usr/local/bin/etcd dest=/usr/local/bin/etcd2 state=link - -- name: Copy etcd2.service systemd file - template: - src: systemd-etcd2.service.j2 - dest: /lib/systemd/system/etcd2.service - backup: yes - notify: restart daemons diff --git a/roles/etcd/templates/etcd2.j2 b/roles/etcd/templates/etcd2.j2 index cb3305287..a00fb72e2 100644 --- a/roles/etcd/templates/etcd2.j2 +++ b/roles/etcd/templates/etcd2.j2 @@ -1,16 +1,21 @@ # etcd2.0 +[Service] +{% if inventory_hostname in groups['etcd'] %} {% set etcd = {} %} -{% for srv in groups['kube-master'] %} +{% for srv in groups['etcd'] %} {% if inventory_hostname == srv %} {% set _dummy = etcd.update({'name':"master"+loop.index|string}) %} {% endif %} {% endfor %} -[Service] Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2379" Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2380" -Environment="ETCD_INITIAL_CLUSTER={% for srv in groups['kube-master'] %}master{{ loop.index|string }}=http://{{ srv }}:2380{% if not loop.last %},{% endif %}{% endfor %}" +Environment="ETCD_INITIAL_CLUSTER={% for srv in groups['etcd'] %}master{{ loop.index|string }}=http://{{ srv }}:2380{% if not loop.last %},{% endif %}{% endfor %}" Environment="ETCD_INITIAL_CLUSTER_STATE=new" Environment="ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd" Environment="ETCD_LISTEN_CLIENT_URLS=http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2379,http://127.0.0.1:2379" Environment="ETCD_LISTEN_PEER_URLS=http://{{ hostvars[inventory_hostname]['ip'] | default( ansible_default_ipv4.address) }}:2380" Environment="ETCD_NAME={{ etcd.name }}" +{% else %} +Environment="ETCD_INITIAL_CLUSTER={% for srv in groups['etcd'] %}master{{ loop.index|string }}=http://{{ srv }}:2380{% if not loop.last %},{% endif %}{% endfor %}" +Environment="ETCD_LISTEN_CLIENT_URLS=http://127.0.0.1:23799" +{% endif %} diff --git a/roles/etcd/templates/systemd-etcd2.service.j2 b/roles/etcd/templates/systemd-etcd2.service.j2 index 26cda24eb..84a527d04 100644 --- a/roles/etcd/templates/systemd-etcd2.service.j2 +++ b/roles/etcd/templates/systemd-etcd2.service.j2 @@ -6,7 +6,11 @@ Conflicts=etcd.service User=etcd Environment=ETCD_DATA_DIR=/var/lib/etcd2 Environment=ETCD_NAME=%m +{% if inventory_hostname in groups['etcd'] %} ExecStart={{ bin_dir }}/etcd2 +{% else %} +ExecStart={{ bin_dir }}/etcd2 -proxy on +{% endif %} Restart=always RestartSec=10s LimitNOFILE=40000 diff --git a/roles/network_plugin/tasks/calico.yml b/roles/network_plugin/tasks/calico.yml index c507d66e1..eba8967d1 100644 --- a/roles/network_plugin/tasks/calico.yml +++ b/roles/network_plugin/tasks/calico.yml @@ -12,7 +12,7 @@ - name: Calico | Configure calico-node desired pool shell: calicoctl pool add {{ kube_pods_subnet }} environment: - ETCD_AUTHORITY: "{{ loadbalancer_address | default(groups['kube-master'][0]) }}:2379" + ETCD_AUTHORITY: "{{ groups['etcd'][0] }}:2379" run_once: true - name: Calico | Write calico-node systemd init file diff --git a/roles/network_plugin/templates/network-environment.j2 b/roles/network_plugin/templates/network-environment.j2 index 53ab5f15a..5793e8818 100755 --- a/roles/network_plugin/templates/network-environment.j2 +++ b/roles/network_plugin/templates/network-environment.j2 @@ -4,20 +4,22 @@ CALICO_IPAM=true DEFAULT_IPV4={{ip | default(ansible_default_ipv4.address) }} -{% if inventory_hostname in groups['kube-node'] %} # The kubernetes master IP KUBERNETES_MASTER={{ groups['kube-master'][0] }} # Location of etcd cluster used by Calico. By default, this uses the etcd # instance running on the Kubernetes Master -ETCD_AUTHORITY={{ loadbalancer_address | default(groups['kube-master'][0]) }}:2379 +{% if inventory_hostname in groups['etcd'] %} +ETCD_AUTHORITY="127.0.0.1:2379" +{% else %} +ETCD_AUTHORITY="127.0.0.1:23799" +{% endif %} # The kubernetes-apiserver location - used by the calico plugin KUBE_API_ROOT=http://{{ groups['kube-master'][0] }}:{{kube_apiserver_insecure_port}}/api/v1/ # Location of the calicoctl binary - used by the calico plugin CALICOCTL_PATH="{{ bin_dir }}/calicoctl" -{% endif %} {% else %} FLANNEL_ETCD_PREFIX="--etcd-prefix=/{{ cluster_name }}/network" {% endif %} From 4055980ce696d768ae4237b142cf1dfa01e3dbf0 Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Tue, 15 Dec 2015 13:14:27 +0100 Subject: [PATCH 24/34] ha apiservers for kubelet --- roles/kubernetes/node/templates/kubelet.j2 | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/roles/kubernetes/node/templates/kubelet.j2 b/roles/kubernetes/node/templates/kubelet.j2 index 0db8483d1..02fce526f 100644 --- a/roles/kubernetes/node/templates/kubelet.j2 +++ b/roles/kubernetes/node/templates/kubelet.j2 @@ -2,11 +2,7 @@ Environment="KUBE_LOGTOSTDERR=--logtostderr=true" Environment="KUBE_LOG_LEVEL=--v={{ kube_log_level | default('2') }}" Environment="KUBE_ALLOW_PRIV=--allow_privileged=true" -{% if inventory_hostname in groups['kube-master'] %} -Environment="KUBELET_API_SERVER=--api_servers=http://{{ hostvars[inventory_hostname]['ip'] | default(hostvars[inventory_hostname]['ansible_default_ipv4']['address']) }}:{{ kube_apiserver_insecure_port }}" -{% else %} -Environment="KUBELET_API_SERVER=--api_servers=https://{{ groups['kube-master'][0]}}:{{ kube_apiserver_port }}" -{% endif %} +Environment="KUBELET_API_SERVER=--api_servers={% for srv in groups['kube-master'] %}https://{{ srv }}:{{ kube_apiserver_port }}{% if not loop.last %},{% endif %}{% endfor %}" # The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces) Environment="KUBELET_ADDRESS=--address=0.0.0.0" # The port for the info server to serve on From 953f482585181bde926f9e9a99cf25ae48919032 Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Tue, 15 Dec 2015 15:20:08 +0100 Subject: [PATCH 25/34] kube-proxy loadbalancing, need an external loadbalancer --- environments/test/group_vars/all.yml | 15 +++++++++++++++ environments/test/inventory.example | 10 +--------- roles/dnsmasq/tasks/main.yml | 8 ++++++++ .../templates/manifests/kube-proxy.manifest.j2 | 6 +++++- roles/kubernetes/node/templates/openssl.conf.j2 | 5 +---- 5 files changed, 30 insertions(+), 14 deletions(-) diff --git a/environments/test/group_vars/all.yml b/environments/test/group_vars/all.yml index 5dc380898..35ae21a0a 100644 --- a/environments/test/group_vars/all.yml +++ b/environments/test/group_vars/all.yml @@ -69,3 +69,18 @@ dns_domain: "{{ cluster_name }}" # # # Ip address of the kubernetes dns service dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address') }}" + +# For multi masters architecture: +# kube-proxy doesn't support multiple apiservers for the time being so you'll need to configure your own loadbalancer +# This domain name will be inserted into the /etc/hosts file of all servers +# configurationexample with haproxy : +# lissten kubernetes-apiserver-https +# bind 10.99.0.21:8383 +# option ssl-hello-chk +# mode tcp +# timeout client 3h +# timeout server 3h +# server master1 10.99.0.26:443 +# server master2 10.99.0.27:443 +# balance roundrobin +apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local" diff --git a/environments/test/inventory.example b/environments/test/inventory.example index 35730d191..a811b084a 100644 --- a/environments/test/inventory.example +++ b/environments/test/inventory.example @@ -17,18 +17,10 @@ 10.99.0.4 local_as=xxxxxxxx 10.99.0.5 local_as=xxxxxxxx -[usa] +[new-york] 10.99.0.36 local_as=xxxxxxxx 10.99.0.37 local_as=xxxxxxxx [k8s-cluster:children] kube-node kube-master - -[paris:vars] -peers=[{"router_id": "10.99.0.2", "as": "65xxx"}, {"router_id": "10.99.0.3", "as": "65xxx"}] -loadbalancer_address="10.99.0.24" - -[usa:vars] -peers=[{"router_id": "10.99.0.34", "as": "65xxx"}, {"router_id": "10.99.0.35", "as": "65xxx"}] -loadbalancer_address="10.99.0.44" diff --git a/roles/dnsmasq/tasks/main.yml b/roles/dnsmasq/tasks/main.yml index 7ba02c36c..87c056167 100644 --- a/roles/dnsmasq/tasks/main.yml +++ b/roles/dnsmasq/tasks/main.yml @@ -8,6 +8,14 @@ when: hostvars[item].ansible_default_ipv4.address is defined with_items: groups['all'] +- name: populate kubernetes loadbalancer address into hosts file + lineinfile: + dest: /etc/hosts + regexp: ".*{{ apiserver_loadbalancer_domain_name }}$" + line: "{{ loadbalancer_apiserver.address }} lb-apiserver.kubernetes.local" + state: present + when: loadbalancer_apiserver is defined + - name: clean hosts file lineinfile: dest: /etc/hosts diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 index 32c52e7d9..923c29764 100644 --- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 @@ -15,7 +15,11 @@ spec: {% if inventory_hostname in groups['kube-master'] %} - --master=http://127.0.0.1:8080 {% else %} - - --master=https://{{ groups['kube-master'][0] }}:{{kube_apiserver_port }} +{% if loadbalancer_apiserver.address is defined | default('') %} + - --master=https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port }} +{% else %} + - --master=https://{{ groups['kube-master'][0] }}:{{ kube_apiserver_port }} +{% endif%} - --kubeconfig=/etc/kubernetes/node-kubeconfig.yaml {% endif %} securityContext: diff --git a/roles/kubernetes/node/templates/openssl.conf.j2 b/roles/kubernetes/node/templates/openssl.conf.j2 index ce2b88e9c..3715d97b8 100644 --- a/roles/kubernetes/node/templates/openssl.conf.j2 +++ b/roles/kubernetes/node/templates/openssl.conf.j2 @@ -10,12 +10,9 @@ subjectAltName = @alt_names DNS.1 = kubernetes DNS.2 = kubernetes.default DNS.3 = kubernetes.default.svc.{{ dns_domain }} +DNS.4 = {{ apiserver_loadbalancer_domain_name }} {% for host in groups['kube-master'] %} IP.{{ loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }} {% endfor %} {% set idx = groups['kube-master'] | length | int + 1 %} IP.{{ idx | string }} = {{ kube_apiserver_ip }} -{% if loadbalancer_address is defined | default('') %} -{% set idx = idx | int + 1 %} -IP.{{ idx | string }} = {{ loadbalancer_address }} -{% endif %} From 4d1828c724e11da05f3e4364adf11e7ff4aac745 Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Tue, 15 Dec 2015 15:25:24 +0100 Subject: [PATCH 26/34] group vars per location --- environments/test/group_vars/new-york.yml | 10 ++++++++++ environments/test/group_vars/paris.yml | 10 ++++++++++ 2 files changed, 20 insertions(+) create mode 100644 environments/test/group_vars/new-york.yml create mode 100644 environments/test/group_vars/paris.yml diff --git a/environments/test/group_vars/new-york.yml b/environments/test/group_vars/new-york.yml new file mode 100644 index 000000000..5b51961db --- /dev/null +++ b/environments/test/group_vars/new-york.yml @@ -0,0 +1,10 @@ +--- +peers: + -router_id: "10.99.0.34" + as: "65xxx" + - router_id: "10.99.0.35" + as: "65xxx" + +loadbalancer_apiserver: + address: "10.99.0.44" + port: "8383" diff --git a/environments/test/group_vars/paris.yml b/environments/test/group_vars/paris.yml new file mode 100644 index 000000000..052200ba6 --- /dev/null +++ b/environments/test/group_vars/paris.yml @@ -0,0 +1,10 @@ +--- +peers: + -router_id: "10.99.0.2" + as: "65xxx" + - router_id: "10.99.0.3" + as: "65xxx" + +loadbalancer_apiserver: + address: "10.99.0.21" + port: "8383" From 43afd42f596e582908e1aa18403097b10afe72be Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Tue, 15 Dec 2015 15:27:12 +0100 Subject: [PATCH 27/34] use 3 members for etcd clustering --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index fd8f41668..c0f380039 100644 --- a/README.md +++ b/README.md @@ -35,6 +35,7 @@ Edit the inventory according to the number of servers [etcd] 10.115.99.31 10.115.99.32 +10.115.99.33 [kube-node] 10.115.99.32 @@ -82,6 +83,7 @@ In node-mesh mode the nodes peers with all the nodes in order to exchange routes [etcd] 10.99.0.26 +10.99.0.4 10.99.0.59 [kube-node] From f21f660cc5b85f9594df866f0b1c4982d7ae8a14 Mon Sep 17 00:00:00 2001 From: ant31 <2t.antoine@gmail.com> Date: Tue, 15 Dec 2015 16:27:12 +0100 Subject: [PATCH 28/34] Use kube_apiserver_port --- roles/kubernetes/master/tasks/main.yml | 2 +- .../master/templates/manifests/kube-apiserver.manifest.j2 | 1 + .../node/templates/manifests/kube-proxy.manifest.j2 | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/roles/kubernetes/master/tasks/main.yml b/roles/kubernetes/master/tasks/main.yml index f6955f689..6426a0e0d 100644 --- a/roles/kubernetes/master/tasks/main.yml +++ b/roles/kubernetes/master/tasks/main.yml @@ -47,7 +47,7 @@ - name: wait for the apiserver to be running (pulling image and running container) wait_for: - port: 8080 + port: "{{kube_apiserver_insecure_port}}" delay: 10 - name: install required python module 'httplib2' diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 37f65357b..940ec1ace 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -21,6 +21,7 @@ spec: - --tls-private-key-file={{ kube_cert_dir }}/apiserver-key.pem - --service-account-key-file={{ kube_cert_dir }}/apiserver-key.pem - --secure-port={{ kube_apiserver_port }} + - --insecure-port={{ kube_apiserver_insecure_port }} {% if kube_api_runtime_config is defined %} {% for conf in kube_api_runtime_config %} - --runtime-config={{ conf }} diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 index 32c52e7d9..e921bc846 100644 --- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 @@ -13,9 +13,9 @@ spec: - proxy - --v={{ kube_log_level | default('2') }} {% if inventory_hostname in groups['kube-master'] %} - - --master=http://127.0.0.1:8080 + - --master=http://127.0.0.1:{{kube_apiserver_insecure_port}} {% else %} - - --master=https://{{ groups['kube-master'][0] }}:{{kube_apiserver_port }} + - --master=https://{{ groups['kube-master'][0] }}:{{kube_apiserver_port }} - --kubeconfig=/etc/kubernetes/node-kubeconfig.yaml {% endif %} securityContext: From c91a3183d38e6a13bfd0596cd0e873a9e2376430 Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Tue, 15 Dec 2015 16:51:55 +0100 Subject: [PATCH 29/34] manage undefined vars for loadbalancing --- roles/dnsmasq/tasks/main.yml | 2 +- .../kubernetes/node/templates/manifests/kube-proxy.manifest.j2 | 2 +- roles/kubernetes/node/templates/openssl.conf.j2 | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/roles/dnsmasq/tasks/main.yml b/roles/dnsmasq/tasks/main.yml index 87c056167..b3585f47a 100644 --- a/roles/dnsmasq/tasks/main.yml +++ b/roles/dnsmasq/tasks/main.yml @@ -14,7 +14,7 @@ regexp: ".*{{ apiserver_loadbalancer_domain_name }}$" line: "{{ loadbalancer_apiserver.address }} lb-apiserver.kubernetes.local" state: present - when: loadbalancer_apiserver is defined + when: loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined - name: clean hosts file lineinfile: diff --git a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 index 923c29764..3c429ec07 100644 --- a/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 +++ b/roles/kubernetes/node/templates/manifests/kube-proxy.manifest.j2 @@ -15,7 +15,7 @@ spec: {% if inventory_hostname in groups['kube-master'] %} - --master=http://127.0.0.1:8080 {% else %} -{% if loadbalancer_apiserver.address is defined | default('') %} +{% if loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined %} - --master=https://{{ apiserver_loadbalancer_domain_name }}:{{ loadbalancer_apiserver.port }} {% else %} - --master=https://{{ groups['kube-master'][0] }}:{{ kube_apiserver_port }} diff --git a/roles/kubernetes/node/templates/openssl.conf.j2 b/roles/kubernetes/node/templates/openssl.conf.j2 index 3715d97b8..c594e3337 100644 --- a/roles/kubernetes/node/templates/openssl.conf.j2 +++ b/roles/kubernetes/node/templates/openssl.conf.j2 @@ -10,7 +10,9 @@ subjectAltName = @alt_names DNS.1 = kubernetes DNS.2 = kubernetes.default DNS.3 = kubernetes.default.svc.{{ dns_domain }} +{% if loadbalancer_apiserver is defined and apiserver_loadbalancer_domain_name is defined %} DNS.4 = {{ apiserver_loadbalancer_domain_name }} +{% endif %} {% for host in groups['kube-master'] %} IP.{{ loop.index }} = {{ hostvars[host]['ip'] | default(hostvars[host]['ansible_default_ipv4']['address']) }} {% endfor %} From 9649f2779d1131429211fc00bd92e14559ef5d8d Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Tue, 15 Dec 2015 17:01:29 +0100 Subject: [PATCH 30/34] Commenting out loadbalancing vars --- environments/test/group_vars/all.yml | 6 +++--- environments/test/group_vars/new-york.yml | 20 ++++++++++---------- environments/test/group_vars/paris.yml | 20 ++++++++++---------- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/environments/test/group_vars/all.yml b/environments/test/group_vars/all.yml index 35ae21a0a..41c87a57d 100644 --- a/environments/test/group_vars/all.yml +++ b/environments/test/group_vars/all.yml @@ -73,8 +73,8 @@ dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address # For multi masters architecture: # kube-proxy doesn't support multiple apiservers for the time being so you'll need to configure your own loadbalancer # This domain name will be inserted into the /etc/hosts file of all servers -# configurationexample with haproxy : -# lissten kubernetes-apiserver-https +# configuration example with haproxy : +# listen kubernetes-apiserver-https # bind 10.99.0.21:8383 # option ssl-hello-chk # mode tcp @@ -83,4 +83,4 @@ dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address # server master1 10.99.0.26:443 # server master2 10.99.0.27:443 # balance roundrobin -apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local" +# apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local" diff --git a/environments/test/group_vars/new-york.yml b/environments/test/group_vars/new-york.yml index 5b51961db..ce9d95396 100644 --- a/environments/test/group_vars/new-york.yml +++ b/environments/test/group_vars/new-york.yml @@ -1,10 +1,10 @@ ---- -peers: - -router_id: "10.99.0.34" - as: "65xxx" - - router_id: "10.99.0.35" - as: "65xxx" - -loadbalancer_apiserver: - address: "10.99.0.44" - port: "8383" +#--- +#peers: +# -router_id: "10.99.0.34" +# as: "65xxx" +# - router_id: "10.99.0.35" +# as: "65xxx" +# +#loadbalancer_apiserver: +# address: "10.99.0.44" +# port: "8383" diff --git a/environments/test/group_vars/paris.yml b/environments/test/group_vars/paris.yml index 052200ba6..e8b34ae0d 100644 --- a/environments/test/group_vars/paris.yml +++ b/environments/test/group_vars/paris.yml @@ -1,10 +1,10 @@ ---- -peers: - -router_id: "10.99.0.2" - as: "65xxx" - - router_id: "10.99.0.3" - as: "65xxx" - -loadbalancer_apiserver: - address: "10.99.0.21" - port: "8383" +#--- +#peers: +# -router_id: "10.99.0.2" +# as: "65xxx" +# - router_id: "10.99.0.3" +# as: "65xxx" +# +#loadbalancer_apiserver: +# address: "10.99.0.21" +# port: "8383" From f2069b296c6767324ce78ad5e3476f37b5f7ce94 Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Tue, 15 Dec 2015 17:16:19 +0100 Subject: [PATCH 31/34] BGP peering and loadbalancing vars are managed in a group_vars file --- README.md | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index c0f380039..9e810477f 100644 --- a/README.md +++ b/README.md @@ -106,14 +106,6 @@ In node-mesh mode the nodes peers with all the nodes in order to exchange routes [k8s-cluster:children] kube-node kube-master - -[paris:vars] -peers=[{"router_id": "10.99.0.2", "as": "65xxx"}, {"router_id": "10.99.0.3", "as": "65xxx"}] -loadbalancer_address="10.99.0.24" - -[usa:vars] -peers=[{"router_id": "10.99.0.34", "as": "65xxx"}, {"router_id": "10.99.0.35", "as": "65xxx"}] -loadbalancer_address="10.99.0.44" ``` ### Playbook @@ -161,6 +153,9 @@ the server address has to be present on both groups 'kube-master' and 'kube-node * Almost all kubernetes components are running into pods except *kubelet*. These pods are managed by kubelet which ensure they're always running * One etcd cluster member per node will be configured. For safety reasons, you should have at least two master nodes. + +* Kube-proxy doesn't support multiple apiservers on startup ([#18174]('https://github.com/kubernetes/kubernetes/issues/18174')). An external loadbalancer needs to be configured. +In order to do so, some variables have to be used '**loadbalancer_apiserver**' and '**apiserver_loadbalancer_domain_name**' ### Network Overlay From 61bb6468ef8acfd49169302a2883fff074646afc Mon Sep 17 00:00:00 2001 From: Smaine Kahlouch Date: Tue, 15 Dec 2015 17:24:37 +0100 Subject: [PATCH 32/34] Update README, cluster.yml --- README.md | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 9e810477f..89c4771c7 100644 --- a/README.md +++ b/README.md @@ -48,7 +48,7 @@ kube-master Run the playbook ``` -ansible-playbook -i environments/production/inventory cluster.yml -u root +ansible-playbook -i environments/test/inventory cluster.yml -u root ``` You can jump directly to "*Available apps, installation procedure*" @@ -59,7 +59,7 @@ Ansible ### Download binaries A role allows to download required binaries. They will be stored in a directory defined by the variable **'local_release_dir'** (by default /tmp). -Please ensure that you have enough disk space there (about **1G**). +Please ensure that you have enough disk space there (about **300M**). **Note**: Whenever you'll need to change the version of a software, you'll have to erase the content of this directory. @@ -116,13 +116,9 @@ kube-master roles: - { role: download, tags: download } -# etcd must be running on master(s) before going on -- hosts: etcd - roles: - - { role: etcd, tags: etcd } - - hosts: k8s-cluster roles: + - { role: etcd, tags: etcd } - { role: docker, tags: docker } - { role: dnsmasq, tags: dnsmasq } - { role: network_plugin, tags: ['calico', 'flannel', 'network'] } From 958c770befecf2d7dd952d09e1b60e382599481f Mon Sep 17 00:00:00 2001 From: ant31 <2t.antoine@gmail.com> Date: Wed, 16 Dec 2015 17:43:26 +0100 Subject: [PATCH 33/34] Update ports --- apps.yml | 10 +++++----- roles/apps/k8s-common | 2 +- roles/apps/k8s-heapster | 2 +- .../manifests/kube-controller-manager.manifest.j2 | 2 +- .../templates/manifests/kube-scheduler.manifest.j2 | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/apps.yml b/apps.yml index 1b25d98e7..64d3f2dbb 100644 --- a/apps.yml +++ b/apps.yml @@ -2,7 +2,7 @@ - hosts: kube-master roles: # System - - { role: apps/k8s-kubedns, tags: 'kubedns' } + - { role: apps/k8s-kubedns, tags: ['kubedns', 'kube-system'] } # Databases - { role: apps/k8s-postgres, tags: 'postgres' } @@ -14,13 +14,13 @@ - { role: apps/k8s-rabbitmq, tags: 'rabbitmq' } # Monitoring - - { role: apps/k8s-influxdb, tags: 'influxdb'} - - { role: apps/k8s-heapster, tags: 'heapster'} - - { role: apps/k8s-kubedash, tags: 'kubedash'} + - { role: apps/k8s-influxdb, tags: ['influxdb', 'kube-system']} + - { role: apps/k8s-heapster, tags: ['heapster', 'kube-system']} + - { role: apps/k8s-kubedash, tags: ['kubedash', 'kube-system']} # logging - { role: apps/k8s-kube-logstash, tags: 'kube-logstash'} # Console - { role: apps/k8s-fabric8, tags: 'fabric8' } - - { role: apps/k8s-kube-ui, tags: 'kube-ui' } + - { role: apps/k8s-kube-ui, tags: ['kube-ui', 'kube-system']} diff --git a/roles/apps/k8s-common b/roles/apps/k8s-common index 2ef9669d6..eaab0692e 160000 --- a/roles/apps/k8s-common +++ b/roles/apps/k8s-common @@ -1 +1 @@ -Subproject commit 2ef9669d6d78e81c4e6de75cce239cabc99f00ad +Subproject commit eaab0692ed375420e183d18392ce79a4c6ed2069 diff --git a/roles/apps/k8s-heapster b/roles/apps/k8s-heapster index dc088e25e..fbd3ff4af 160000 --- a/roles/apps/k8s-heapster +++ b/roles/apps/k8s-heapster @@ -1 +1 @@ -Subproject commit dc088e25efcd040e127543b861448aa0d219eac9 +Subproject commit fbd3ff4afc2fbf4a5fd0dad11a5f19ba11f0b7df diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 index 1b98ecbc8..44e52f7c0 100644 --- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 @@ -11,7 +11,7 @@ spec: command: - /hyperkube - controller-manager - - --master=http://127.0.0.1:8080 + - --master=http://127.0.0.1:{{kube_apiserver_insecure_port}} - --service-account-private-key-file={{ kube_cert_dir }}/apiserver-key.pem - --root-ca-file={{ kube_cert_dir }}/ca.pem - --v={{ kube_log_level | default('2') }} diff --git a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 index 0a0efdcb0..6360dcc54 100644 --- a/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-scheduler.manifest.j2 @@ -11,7 +11,7 @@ spec: command: - /hyperkube - scheduler - - --master=http://127.0.0.1:8080 + - --master=http://127.0.0.1:{{kube_apiserver_insecure_port}} - --v={{ kube_log_level | default('2') }} livenessProbe: httpGet: From 44ac355aa7ad0c01760d2876b182177366fb1416 Mon Sep 17 00:00:00 2001 From: ant31 <2t.antoine@gmail.com> Date: Wed, 16 Dec 2015 18:01:52 +0100 Subject: [PATCH 34/34] Update depedencies --- roles/apps/k8s-common | 2 +- roles/apps/k8s-etcd | 2 +- roles/apps/k8s-heapster | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/apps/k8s-common b/roles/apps/k8s-common index eaab0692e..c69c5f881 160000 --- a/roles/apps/k8s-common +++ b/roles/apps/k8s-common @@ -1 +1 @@ -Subproject commit eaab0692ed375420e183d18392ce79a4c6ed2069 +Subproject commit c69c5f881fe414f6856f811b9bb40cd19bcf83f4 diff --git a/roles/apps/k8s-etcd b/roles/apps/k8s-etcd index e3e574ea2..abd61ee91 160000 --- a/roles/apps/k8s-etcd +++ b/roles/apps/k8s-etcd @@ -1 +1 @@ -Subproject commit e3e574ea25ef4b1db79cc20b6dd31efa8a7d87cb +Subproject commit abd61ee91ae729e7b79ecd56d6bb4eed0ddbe604 diff --git a/roles/apps/k8s-heapster b/roles/apps/k8s-heapster index fbd3ff4af..44a6519bf 160000 --- a/roles/apps/k8s-heapster +++ b/roles/apps/k8s-heapster @@ -1 +1 @@ -Subproject commit fbd3ff4afc2fbf4a5fd0dad11a5f19ba11f0b7df +Subproject commit 44a6519bf8957bff316d3e3bc857d554f69c4016