Merge pull request #727 from bogdando/standalone_kubelet

Address standalone kubelet config case
This commit is contained in:
Antoine Legrand 2016-12-13 18:15:25 +01:00 committed by GitHub
commit 2795f9a612
35 changed files with 92 additions and 150 deletions

View file

@ -1,6 +1,6 @@
--- ---
- name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV - name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV
template: src={{item.file}} dest=/etc/kubernetes/{{item.dest}} template: src={{item.file}} dest={{kube_config_dir}}/{{item.dest}}
with_items: with_items:
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json} - { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml} - { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
@ -13,7 +13,7 @@
namespace: default namespace: default
kubectl: "{{bin_dir}}/kubectl" kubectl: "{{bin_dir}}/kubectl"
resource: "{{item.item.type}}" resource: "{{item.item.type}}"
filename: "/etc/kubernetes/{{item.item.dest}}" filename: "{{kube_config_dir}}/{{item.item.dest}}"
state: "{{item.changed | ternary('latest','present') }}" state: "{{item.changed | ternary('latest','present') }}"
with_items: "{{ gluster_pv.results }}" with_items: "{{ gluster_pv.results }}"
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined

View file

@ -4,6 +4,28 @@ bootstrap_os: none
# Directory where the binaries will be installed # Directory where the binaries will be installed
bin_dir: /usr/local/bin bin_dir: /usr/local/bin
# Kubernetes configuration dirs and system namespace.
# Those are where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernets.
# This puts them in a sane location and namespace.
# Editting those values will almost surely break something.
kube_config_dir: /etc/kubernetes
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
system_namespace: kube-system
# Logging directory (sysvinit systems)
kube_log_dir: "/var/log/kubernetes"
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl"
# This is where all of the bearer tokens will be stored
kube_token_dir: "{{ kube_config_dir }}/tokens"
# This is where to save basic auth file
kube_users_dir: "{{ kube_config_dir }}/users"
## Change this to use another Kubernetes version, e.g. a current beta release ## Change this to use another Kubernetes version, e.g. a current beta release
kube_version: 1.4.6 kube_version: 1.4.6

View file

@ -34,7 +34,7 @@
state: link state: link
- name: Create dnsmasq manifests - name: Create dnsmasq manifests
template: src={{item.file}} dest=/etc/kubernetes/{{item.file}} template: src={{item.file}} dest={{kube_config_dir}}/{{item.file}}
with_items: with_items:
- {file: dnsmasq-ds.yml, type: ds} - {file: dnsmasq-ds.yml, type: ds}
- {file: dnsmasq-svc.yml, type: svc} - {file: dnsmasq-svc.yml, type: svc}
@ -44,10 +44,10 @@
- name: Start Resources - name: Start Resources
kube: kube:
name: dnsmasq name: dnsmasq
namespace: kube-system namespace: "{{system_namespace}}"
kubectl: "{{bin_dir}}/kubectl" kubectl: "{{bin_dir}}/kubectl"
resource: "{{item.item.type}}" resource: "{{item.item.type}}"
filename: /etc/kubernetes/{{item.item.file}} filename: "{{kube_config_dir}}/{{item.item.file}}"
state: "{{item.changed | ternary('latest','present') }}" state: "{{item.changed | ternary('latest','present') }}"
with_items: "{{ manifests.results }}" with_items: "{{ manifests.results }}"
when: inventory_hostname == groups['kube-master'][0] when: inventory_hostname == groups['kube-master'][0]

View file

@ -3,7 +3,7 @@ apiVersion: extensions/v1beta1
kind: DaemonSet kind: DaemonSet
metadata: metadata:
name: dnsmasq name: dnsmasq
namespace: kube-system namespace: "{{system_namespace}}"
labels: labels:
k8s-app: dnsmasq k8s-app: dnsmasq
spec: spec:

View file

@ -6,7 +6,7 @@ metadata:
kubernetes.io/cluster-service: 'true' kubernetes.io/cluster-service: 'true'
k8s-app: dnsmasq k8s-app: dnsmasq
name: dnsmasq name: dnsmasq
namespace: kube-system namespace: {{system_namespace}}
spec: spec:
ports: ports:
- port: 53 - port: 53

View file

@ -1,6 +1,3 @@
kube_config_dir: /etc/kubernetes
kube_namespace: kube-system
# Versions # Versions
kubedns_version: 1.9 kubedns_version: 1.9
kubednsmasq_version: 1.3 kubednsmasq_version: 1.3

View file

@ -8,6 +8,6 @@
name: "calico-policy-controller" name: "calico-policy-controller"
kubectl: "{{bin_dir}}/kubectl" kubectl: "{{bin_dir}}/kubectl"
filename: "{{kube_config_dir}}/calico-policy-controller.yml" filename: "{{kube_config_dir}}/calico-policy-controller.yml"
namespace: "{{kube_namespace}}" namespace: "{{system_namespace}}"
resource: "rs" resource: "rs"
when: inventory_hostname == groups['kube-master'][0] when: inventory_hostname == groups['kube-master'][0]

View file

@ -11,7 +11,7 @@
- name: Kubernetes Apps | Start Resources - name: Kubernetes Apps | Start Resources
kube: kube:
name: kubedns name: kubedns
namespace: "{{ kube_namespace }}" namespace: "{{ system_namespace }}"
kubectl: "{{bin_dir}}/kubectl" kubectl: "{{bin_dir}}/kubectl"
resource: "{{item.item.type}}" resource: "{{item.item.type}}"
filename: "{{kube_config_dir}}/{{item.item.file}}" filename: "{{kube_config_dir}}/{{item.item.file}}"

View file

@ -2,7 +2,7 @@ apiVersion: extensions/v1beta1
kind: ReplicaSet kind: ReplicaSet
metadata: metadata:
name: calico-policy-controller name: calico-policy-controller
namespace: {{ kube_namespace }} namespace: {{ system_namespace }}
labels: labels:
k8s-app: calico-policy k8s-app: calico-policy
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
@ -15,7 +15,7 @@ spec:
template: template:
metadata: metadata:
name: calico-policy-controller name: calico-policy-controller
namespace: kube-system namespace: {{system_namespace}}
labels: labels:
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
k8s-app: calico-policy k8s-app: calico-policy

View file

@ -2,7 +2,7 @@ apiVersion: v1
kind: ReplicationController kind: ReplicationController
metadata: metadata:
name: kubedns name: kubedns
namespace: {{ kube_namespace }} namespace: {{ system_namespace }}
labels: labels:
k8s-app: kubedns k8s-app: kubedns
version: v19 version: v19

View file

@ -2,7 +2,7 @@ apiVersion: v1
kind: Service kind: Service
metadata: metadata:
name: kubedns name: kubedns
namespace: {{ kube_namespace }} namespace: {{ system_namespace }}
labels: labels:
k8s-app: kubedns k8s-app: kubedns
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"

View file

@ -3,15 +3,15 @@
kube: kube:
name: "canal-config" name: "canal-config"
kubectl: "{{bin_dir}}/kubectl" kubectl: "{{bin_dir}}/kubectl"
filename: "/etc/kubernetes/canal-config.yaml" filename: "{{kube_config_dir}}/canal-config.yaml"
resource: "configmap" resource: "configmap"
namespace: "kube-system" namespace: "{{system_namespace}}"
- name: Start flannel and calico-node - name: Start flannel and calico-node
run_once: true run_once: true
kube: kube:
name: "canal-node" name: "canal-node"
kubectl: "{{bin_dir}}/kubectl" kubectl: "{{bin_dir}}/kubectl"
filename: "/etc/kubernetes/canal-node.yaml" filename: "{{kube_config_dir}}/canal-node.yaml"
resource: "ds" resource: "ds"
namespace: "kube-system" namespace: "{{system_namespace}}"

View file

@ -1,28 +1,7 @@
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl"
# This is where all of the bearer tokens will be stored
kube_token_dir: "{{ kube_config_dir }}/tokens"
# This is where to save basic auth file
kube_users_dir: "{{ kube_config_dir }}/users"
# An experimental dev/test only dynamic volumes provisioner, # An experimental dev/test only dynamic volumes provisioner,
# for PetSets. Works for kube>=v1.3 only. # for PetSets. Works for kube>=v1.3 only.
kube_hostpath_dynamic_provisioner: "false" kube_hostpath_dynamic_provisioner: "false"
# This is where you can drop yaml/json files and the kubelet will run those
# pods on startup
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
# This directory is where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernets.
# This puts them in a sane location.
# Editting this value will almost surely break something. Don't
# change it. Things like the systemd scripts are hard coded to
# look in here. Don't do it.
kube_config_dir: /etc/kubernetes
# change to 0.0.0.0 to enable insecure access from anywhere (not recommended) # change to 0.0.0.0 to enable insecure access from anywhere (not recommended)
kube_apiserver_insecure_bind_address: 127.0.0.1 kube_apiserver_insecure_bind_address: 127.0.0.1
@ -30,9 +9,6 @@ kube_apiserver_insecure_bind_address: 127.0.0.1
# Inclusive at both ends of the range. # Inclusive at both ends of the range.
kube_apiserver_node_port_range: "30000-32767" kube_apiserver_node_port_range: "30000-32767"
# Logging directory (sysvinit systems)
kube_log_dir: "/var/log/kubernetes"
# ETCD cert dir for connecting apiserver to etcd # ETCD cert dir for connecting apiserver to etcd
etcd_config_dir: /etc/ssl/etcd etcd_config_dir: /etc/ssl/etcd
etcd_cert_dir: "{{ etcd_config_dir }}/ssl" etcd_cert_dir: "{{ etcd_config_dir }}/ssl"

View file

@ -1,4 +1,4 @@
apiVersion: v1 apiVersion: v1
kind: Namespace kind: Namespace
metadata: metadata:
name: kube-system name: "{{system_namespace}}"

View file

@ -36,28 +36,27 @@
tags: kube-apiserver tags: kube-apiserver
- meta: flush_handlers - meta: flush_handlers
# Create kube-system namespace
- name: copy 'kube-system' namespace manifest - name: copy kube system namespace manifest
copy: src=namespace.yml dest=/etc/kubernetes/kube-system-ns.yml copy: src=namespace.yml dest={{kube_config_dir}}/{{system_namespace}}-ns.yml
run_once: yes run_once: yes
when: inventory_hostname == groups['kube-master'][0] when: inventory_hostname == groups['kube-master'][0]
tags: apps tags: apps
- name: Check if kube-system exists - name: Check if kube system namespace exists
command: "{{ bin_dir }}/kubectl get ns kube-system" command: "{{ bin_dir }}/kubectl get ns {{system_namespace}}"
register: 'kubesystem' register: 'kubesystem'
changed_when: False changed_when: False
failed_when: False failed_when: False
run_once: yes run_once: yes
tags: apps tags: apps
- name: Create 'kube-system' namespace - name: Create kube system namespace
command: "{{ bin_dir }}/kubectl create -f /etc/kubernetes/kube-system-ns.yml" command: "{{ bin_dir }}/kubectl create -f {{kube_config_dir}}/{{system_namespace}}-ns.yml"
changed_when: False changed_when: False
when: kubesystem|failed and inventory_hostname == groups['kube-master'][0] when: kubesystem|failed and inventory_hostname == groups['kube-master'][0]
tags: apps tags: apps
# Write other manifests
- name: Write kube-controller-manager manifest - name: Write kube-controller-manager manifest
template: template:
src: manifests/kube-controller-manager.manifest.j2 src: manifests/kube-controller-manager.manifest.j2

View file

@ -2,7 +2,7 @@ apiVersion: v1
kind: Pod kind: Pod
metadata: metadata:
name: kube-apiserver name: kube-apiserver
namespace: kube-system namespace: {{system_namespace}}
labels: labels:
k8s-app: kube-apiserver k8s-app: kube-apiserver
spec: spec:

View file

@ -2,7 +2,7 @@ apiVersion: v1
kind: Pod kind: Pod
metadata: metadata:
name: kube-controller-manager name: kube-controller-manager
namespace: kube-system namespace: {{system_namespace}}
labels: labels:
k8s-app: kube-controller k8s-app: kube-controller
spec: spec:

View file

@ -3,4 +3,4 @@ namespace_kubesystem:
apiVersion: v1 apiVersion: v1
kind: Namespace kind: Namespace
metadata: metadata:
name: kube-system name: "{{system_namespace}}"

View file

@ -1,15 +1,6 @@
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl"
# change to 0.0.0.0 to enable insecure access from anywhere (not recommended) # change to 0.0.0.0 to enable insecure access from anywhere (not recommended)
kube_apiserver_insecure_bind_address: 127.0.0.1 kube_apiserver_insecure_bind_address: 127.0.0.1
# This is where you can drop yaml/json files and the kubelet will run those
# pods on startup
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
dns_domain: "{{ cluster_name }}"
# resolv.conf to base dns config # resolv.conf to base dns config
kube_resolv_conf: "/etc/resolv.conf" kube_resolv_conf: "/etc/resolv.conf"
@ -22,16 +13,5 @@ kube_proxy_masquerade_all: true
# - extensions/v1beta1/daemonsets=true # - extensions/v1beta1/daemonsets=true
# - extensions/v1beta1/deployments=true # - extensions/v1beta1/deployments=true
# Logging directory (sysvinit systems)
kube_log_dir: "/var/log/kubernetes"
# This directory is where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernets.
# This puts them in a sane location.
# Editting this value will almost surely break something. Don't
# change it. Things like the systemd scripts are hard coded to
# look in here. Don't do it.
kube_config_dir: /etc/kubernetes
nginx_image_repo: nginx nginx_image_repo: nginx
nginx_image_tag: 1.11.4-alpine nginx_image_tag: 1.11.4-alpine

View file

@ -1,4 +1,9 @@
--- ---
- set_fact:
standalone_kubelet: >-
{%- if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] -%}true{%- else -%}false{%- endif -%}
tags: facts
- include: install.yml - include: install.yml
tags: kubelet tags: kubelet

View file

@ -1,6 +1,6 @@
--- ---
- name: nginx-proxy | Write static pod - name: nginx-proxy | Write static pod
template: src=manifests/nginx-proxy.manifest.j2 dest=/etc/kubernetes/manifests/nginx-proxy.yml template: src=manifests/nginx-proxy.manifest.j2 dest={{kube_manifest_dir}}/nginx-proxy.yml
- name: nginx-proxy | Make nginx directory - name: nginx-proxy | Make nginx directory
file: path=/etc/nginx state=directory mode=0700 owner=root file: path=/etc/nginx state=directory mode=0700 owner=root

View file

@ -27,7 +27,7 @@ DAEMON_USER=root
[ -x "$DAEMON" ] || exit 0 [ -x "$DAEMON" ] || exit 0
# Read configuration variable file if it is present # Read configuration variable file if it is present
[ -r /etc/kubernetes/$NAME.env ] && . /etc/kubernetes/$NAME.env [ -r {{kube_config_dir}}/$NAME.env ] && . {{kube_config_dir}}/$NAME.env
# Define LSB log_* functions. # Define LSB log_* functions.
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present # Depend on lsb-base (>= 3.2-14) to ensure that this file is present

View file

@ -3,7 +3,7 @@
--net=host --pid=host --name=kubelet --restart=on-failure:5 \ --net=host --pid=host --name=kubelet --restart=on-failure:5 \
-v /etc/cni:/etc/cni:ro \ -v /etc/cni:/etc/cni:ro \
-v /opt/cni:/opt/cni:ro \ -v /opt/cni:/opt/cni:ro \
-v /etc/kubernetes:/etc/kubernetes \ -v {{kube_config_dir}}:{{kube_config_dir}} \
-v /sys:/sys \ -v /sys:/sys \
-v /dev:/dev \ -v /dev:/dev \
-v {{ docker_daemon_graph }}:/var/lib/docker \ -v {{ docker_daemon_graph }}:/var/lib/docker \

View file

@ -12,17 +12,21 @@ KUBELET_ADDRESS="--address={{ ip | default("0.0.0.0") }}"
# KUBELET_PORT="--port=10250" # KUBELET_PORT="--port=10250"
# You may leave this blank to use the actual hostname # You may leave this blank to use the actual hostname
KUBELET_HOSTNAME="--hostname-override={{ ansible_hostname }}" KUBELET_HOSTNAME="--hostname-override={{ ansible_hostname }}"
{% if inventory_hostname in groups['kube-master'] and inventory_hostname not in groups['kube-node'] %}
KUBELET_REGISTER_NODE="--register-node=false"
{% endif %}
# location of the api-server # location of the api-server
{% set kubelet_args_base %}--pod-manifest-path={{ kube_manifest_dir }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}{% endset %}
{% if dns_setup|bool and skip_dnsmasq|bool %} {% if dns_setup|bool and skip_dnsmasq|bool %}
KUBELET_ARGS="--cluster_dns={{ skydns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --require-kubeconfig --pod-manifest-path={{ kube_manifest_dir }} --resolv-conf={{ kube_resolv_conf }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}" {% set kubelet_args_dns %}--cluster_dns={{ skydns_server }} --cluster_domain={{ dns_domain }} --resolv-conf={{ kube_resolv_conf }} {{ kubelet_args_base }}{% endset %}
{% elif dns_setup|bool %} {% elif dns_setup|bool %}
KUBELET_ARGS="--cluster_dns={{ dns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --require-kubeconfig --pod-manifest-path={{ kube_manifest_dir }} --resolv-conf={{ kube_resolv_conf }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}" {% set kubelet_args_dns %}--cluster_dns={{ dns_server }} --cluster_domain={{ dns_domain }} --resolv-conf={{ kube_resolv_conf }} {{ kubelet_args_base }}{% endset %}
{% else %} {% else %}
KUBELET_ARGS="--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --require-kubeconfig --pod-manifest-path={{ kube_manifest_dir }} --pod-infra-container-image={{ pod_infra_image_repo }}:{{ pod_infra_image_tag }}" {% set kubelet_args_dns = kubelet_args_base %}
{% endif %} {% endif %}
{% if not standalone_kubelet|bool %}
{% set kubelet_args %}--kubeconfig={{ kube_config_dir}}/node-kubeconfig.yaml --require-kubeconfig {{ kubelet_args_dns }}{% endset %}
{% else %}
{% set kubelet_args = kubelet_args_dns %}
{% endif %}
KUBELET_ARGS="{{ kubelet_args }}"
{% if kube_network_plugin is defined and kube_network_plugin in ["calico", "weave", "canal"] %} {% if kube_network_plugin is defined and kube_network_plugin in ["calico", "weave", "canal"] %}
KUBELET_NETWORK_PLUGIN="--network-plugin=cni --network-plugin-dir=/etc/cni/net.d" KUBELET_NETWORK_PLUGIN="--network-plugin=cni --network-plugin-dir=/etc/cni/net.d"
{% elif kube_network_plugin is defined and kube_network_plugin == "weave" %} {% elif kube_network_plugin is defined and kube_network_plugin == "weave" %}

View file

@ -10,7 +10,7 @@ Wants=docker.socket
{% endif %} {% endif %}
[Service] [Service]
EnvironmentFile=/etc/kubernetes/kubelet.env EnvironmentFile={{kube_config_dir}}/kubelet.env
ExecStart={{ bin_dir }}/kubelet \ ExecStart={{ bin_dir }}/kubelet \
$KUBE_LOGTOSTDERR \ $KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \ $KUBE_LOG_LEVEL \

View file

@ -2,7 +2,7 @@ apiVersion: v1
kind: Pod kind: Pod
metadata: metadata:
name: kube-proxy name: kube-proxy
namespace: kube-system namespace: {{system_namespace}}
labels: labels:
k8s-app: kube-proxy k8s-app: kube-proxy
spec: spec:
@ -17,7 +17,7 @@ spec:
- --v={{ kube_log_level }} - --v={{ kube_log_level }}
- --master={{ kube_apiserver_endpoint }} - --master={{ kube_apiserver_endpoint }}
{% if not is_kube_master %} {% if not is_kube_master %}
- --kubeconfig=/etc/kubernetes/node-kubeconfig.yaml - --kubeconfig={{kube_config_dir}}/node-kubeconfig.yaml
{% endif %} {% endif %}
- --bind-address={{ ip | default(ansible_default_ipv4.address) }} - --bind-address={{ ip | default(ansible_default_ipv4.address) }}
- --cluster-cidr={{ kube_pods_subnet }} - --cluster-cidr={{ kube_pods_subnet }}
@ -31,10 +31,10 @@ spec:
- mountPath: /etc/ssl/certs - mountPath: /etc/ssl/certs
name: ssl-certs-host name: ssl-certs-host
readOnly: true readOnly: true
- mountPath: /etc/kubernetes/node-kubeconfig.yaml - mountPath: {{kube_config_dir}}/node-kubeconfig.yaml
name: "kubeconfig" name: "kubeconfig"
readOnly: true readOnly: true
- mountPath: /etc/kubernetes/ssl - mountPath: {{kube_config_dir}}/ssl
name: "etc-kube-ssl" name: "etc-kube-ssl"
readOnly: true readOnly: true
- mountPath: /var/run/dbus - mountPath: /var/run/dbus
@ -46,10 +46,10 @@ spec:
path: /usr/share/ca-certificates path: /usr/share/ca-certificates
- name: "kubeconfig" - name: "kubeconfig"
hostPath: hostPath:
path: "/etc/kubernetes/node-kubeconfig.yaml" path: "{{kube_config_dir}}/node-kubeconfig.yaml"
- name: "etc-kube-ssl" - name: "etc-kube-ssl"
hostPath: hostPath:
path: "/etc/kubernetes/ssl" path: "{{kube_config_dir}}/ssl"
- name: "var-run-dbus" - name: "var-run-dbus"
hostPath: hostPath:
path: "/var/run/dbus" path: "/var/run/dbus"

View file

@ -2,7 +2,7 @@ apiVersion: v1
kind: Pod kind: Pod
metadata: metadata:
name: nginx-proxy name: nginx-proxy
namespace: kube-system namespace: {{system_namespace}}
labels: labels:
k8s-app: kube-nginx k8s-app: kube-nginx
spec: spec:

View file

@ -27,7 +27,7 @@ pidfile="/var/run/$prog.pid"
lockfile="/var/lock/subsys/$prog" lockfile="/var/lock/subsys/$prog"
logfile="/var/log/$prog" logfile="/var/log/$prog"
[ -e /etc/kubernetes/$prog.env ] && . /etc/kubernetes/$prog.env [ -e {{kube_config_dir}}/$prog.env ] && . {{kube_config_dir}}/$prog.env
start() { start() {
if [ ! -x $exec ]; then if [ ! -x $exec ]; then

View file

@ -1,26 +1,6 @@
--- ---
run_gitinfos: false run_gitinfos: false
# This directory is where all the additional scripts go
# that Kubernetes normally puts in /srv/kubernetes.
# This puts them in a sane location
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
# This directory is where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernets.
# This puts them in a sane location.
# Editting this value will almost surely break something. Don't
# change it. Things like the systemd scripts are hard coded to
# look in here. Don't do it.
kube_config_dir: /etc/kubernetes
# Logging directory (sysvinit systems)
kube_log_dir: "/var/log/kubernetes"
# This is where you can drop yaml/json files and the kubelet will run those
# pods on startup
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
epel_rpm_download_url: "https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm" epel_rpm_download_url: "https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm"
common_required_pkgs: common_required_pkgs:

View file

@ -1,21 +0,0 @@
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl"
# This is where all of the bearer tokens will be stored
kube_token_dir: "{{ kube_config_dir }}/tokens"
# This is where to save basic auth file
kube_users_dir: "{{ kube_config_dir }}/users"
# This directory is where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernets.
# This puts them in a sane location.
# Editting this value will almost surely break something. Don't
# change it. Things like the systemd scripts are hard coded to
# look in here. Don't do it.
kube_config_dir: /etc/kubernetes
# This directory is where all the additional scripts go
# that Kubernetes normally puts in /srv/kubernetes.
# This puts them in a sane location
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"

View file

@ -35,12 +35,12 @@
- name: Canal | Write canal configmap - name: Canal | Write canal configmap
template: template:
src: canal-config.yml.j2 src: canal-config.yml.j2
dest: /etc/kubernetes/canal-config.yaml dest: "{{kube_config_dir}}/canal-config.yaml"
- name: Canal | Write canal node configuration - name: Canal | Write canal node configuration
template: template:
src: canal-node.yml.j2 src: canal-node.yml.j2
dest: /etc/kubernetes/canal-node.yaml dest: "{{kube_config_dir}}/canal-node.yaml"
- name: Canal | Copy cni plugins from hyperkube - name: Canal | Copy cni plugins from hyperkube
command: "/usr/bin/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /usr/bin/rsync -a /opt/cni/bin/ /cnibindir/" command: "/usr/bin/docker run --rm -v /opt/cni/bin:/cnibindir {{ hyperkube_image_repo }}:{{ hyperkube_image_tag }} /usr/bin/rsync -a /opt/cni/bin/ /cnibindir/"

View file

@ -10,7 +10,7 @@
- name: Flannel | Create flannel pod manifest - name: Flannel | Create flannel pod manifest
template: template:
src: flannel-pod.yml src: flannel-pod.yml
dest: /etc/kubernetes/manifests/flannel-pod.manifest dest: "{{kube_manifest_dir}}/flannel-pod.manifest"
notify: Flannel | delete default docker bridge notify: Flannel | delete default docker bridge
- name: Flannel | Wait for flannel subnet.env file presence - name: Flannel | Wait for flannel subnet.env file presence

View file

@ -3,7 +3,7 @@
apiVersion: "v1" apiVersion: "v1"
metadata: metadata:
name: "flannel" name: "flannel"
namespace: "kube-system" namespace: "{{system_namespace}}"
labels: labels:
app: "flannel" app: "flannel"
version: "v0.1" version: "v0.1"

View file

@ -34,7 +34,7 @@
- name: reset | delete some files and directories - name: reset | delete some files and directories
file: path={{ item }} state=absent file: path={{ item }} state=absent
with_items: with_items:
- /etc/kubernetes/ - "{{kube_config_dir}}"
- /var/lib/kubelet - /var/lib/kubelet
- /var/lib/etcd - /var/lib/etcd
- /etc/ssl/etcd - /etc/ssl/etcd

View file

@ -41,31 +41,31 @@
cmd: journalctl -u kubelet --no-pager cmd: journalctl -u kubelet --no-pager
- name: kubedns_logs - name: kubedns_logs
cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=kubedns -o jsonpath={.items..metadata.name}`; cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=kubedns -o jsonpath={.items..metadata.name}`;
do kubectl logs ${i} --namespace kube-system kubedns; done" do kubectl logs ${i} --namespace {{system_namespace}} kubedns; done"
- name: apiserver_logs - name: apiserver_logs
cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=kube-apiserver -o jsonpath={.items..metadata.name}`; cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=kube-apiserver -o jsonpath={.items..metadata.name}`;
do kubectl logs ${i} --namespace kube-system; done" do kubectl logs ${i} --namespace {{system_namespace}}; done"
- name: controller_logs - name: controller_logs
cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=kube-controller -o jsonpath={.items..metadata.name}`; cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=kube-controller -o jsonpath={.items..metadata.name}`;
do kubectl logs ${i} --namespace kube-system; done" do kubectl logs ${i} --namespace {{system_namespace}}; done"
- name: scheduler_logs - name: scheduler_logs
cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=kube-scheduler -o jsonpath={.items..metadata.name}`; cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=kube-scheduler -o jsonpath={.items..metadata.name}`;
do kubectl logs ${i} --namespace kube-system; done" do kubectl logs ${i} --namespace {{system_namespace}}; done"
- name: proxy_logs - name: proxy_logs
cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=kube-proxy -o jsonpath={.items..metadata.name}`; cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=kube-proxy -o jsonpath={.items..metadata.name}`;
do kubectl logs ${i} --namespace kube-system; done" do kubectl logs ${i} --namespace {{system_namespace}}; done"
- name: nginx_logs - name: nginx_logs
cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=kube-nginx -o jsonpath={.items..metadata.name}`; cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=kube-nginx -o jsonpath={.items..metadata.name}`;
do kubectl logs ${i} --namespace kube-system; done" do kubectl logs ${i} --namespace {{system_namespace}}; done"
- name: flannel_logs - name: flannel_logs
cmd: "for i in `kubectl get pods --all-namespaces -l app=flannel -o jsonpath={.items..metadata.name}`; cmd: "for i in `kubectl get pods --all-namespaces -l app=flannel -o jsonpath={.items..metadata.name}`;
do kubectl logs ${i} --namespace kube-system flannel-container; done" do kubectl logs ${i} --namespace {{system_namespace}} flannel-container; done"
- name: canal_logs - name: canal_logs
cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=canal-node -o jsonpath={.items..metadata.name}`; cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=canal-node -o jsonpath={.items..metadata.name}`;
do kubectl logs ${i} --namespace kube-system flannel; done" do kubectl logs ${i} --namespace {{system_namespace}} flannel; done"
- name: calico_policy_logs - name: calico_policy_logs
cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=calico-policy -o jsonpath={.items..metadata.name}`; cmd: "for i in `kubectl get pods --all-namespaces -l k8s-app=calico-policy -o jsonpath={.items..metadata.name}`;
do kubectl logs ${i} --namespace kube-system calico-policy-controller; done" do kubectl logs ${i} --namespace {{system_namespace}} calico-policy-controller; done"
logs: logs:
- /var/log/syslog - /var/log/syslog