Initial commit

This commit is contained in:
Smaine Kahlouch 2015-10-03 22:19:50 +02:00
parent 4aa588e481
commit 00c562828f
109 changed files with 3174 additions and 2 deletions

View file

@ -1,2 +1,31 @@
# kubernetes-ansible This playbook deploys a whole kubernetes cluster, configures network overlay and some addons.
Setup a kubernetes cluster
# Download necessary binaries
Note: a variable 'local_release_dir' defines where the binaries will be downloaded.
Ensure you've enough disk space
# Kubernetes
Kubernetes services are configured with the nodePort type.
eg: each node opoens the same tcp port and forwards the traffic to the target pod wherever it is located.
master :
- apiserver :
Currently the apiserver listen on both secure and unsecure ports
todo, secure everything. Calico especially
- scheduler :
- controller :
- proxy
node :
- kubelet :
kubelet is configured to call calico whenever a pod is created/destroyed
- proxy
configures all the forwarding rules
# Overlay network
You can choose between 2 network overlays. Only one must be chosen.
flannel: gre/vxlan (layer 2) networking
calico: bgp (layer 3) networking.
# Loadbalancer
The machine where ansible is ran must be allowed to access to the master ip on port 8080 (kubernetes api).
Indeed it gathered the services definition in order to know which NodePort is configured.

21
cluster.yml Normal file
View file

@ -0,0 +1,21 @@
---
- hosts: downloader
sudo: no
roles:
- { role: download, tags: download }
- hosts: k8s-cluster
roles:
- { role: etcd, tags: etcd }
- { role: docker, tags: docker }
- { role: overlay_network, tags: ['calico', 'flannel', 'network'] }
- { role: dnsmasq, tags: dnsmasq }
- hosts: kube-master
roles:
- { role: kubernetes/master, tags: master }
- { role: addons, tags: addons }
- hosts: kube-node
roles:
- { role: kubernetes/node, tags: node }

View file

@ -0,0 +1,6 @@
# Directory where the binaries will be installed
bin_dir: /usr/local/bin
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
local_release_dir: "/tmp/releases"

View file

@ -0,0 +1,68 @@
# Users to create for basic auth in Kubernetes API via HTTP
kube_users:
kube:
pass: changeme
role: admin
root:
pass: changeme
role: admin
# Kubernetes cluster name, also will be used as DNS domain
cluster_name: cluster.local
#
# set this variable to calico if needed. keep it empty if flannel is used
overlay_network_plugin: calico
# Kubernetes internal network for services, unused block of space.
kube_service_addresses: 10.233.0.0/18
# internal network. When used, it will assign IP
# addresses from this range to individual pods.
# This network must be unused in your network infrastructure!
overlay_network_subnet: 10.233.64.0/18
# internal network total size (optional). This is the prefix of the
# entire overlay network. So the entirety of 4.0.0.0/16 must be
# unused in your environment.
# overlay_network_prefix: 18
# internal network node size allocation (optional). This is the size allocated
# to each node on your network. With these defaults you should have
# room for 4096 nodes with 254 pods per node.
overlay_network_host_prefix: 24
# Internal DNS configuration.
# Kubernetes can create and mainatain its own DNS server to resolve service names
# into appropriate IP addresses. It's highly advisable to run such DNS server,
# as it greatly simplifies configuration of your applications - you can use
# service names instead of magic environment variables.
# You still must manually configure all your containers to use this DNS server,
# Kubernetes won't do this for you (yet).
# Upstream dns servers used by dnsmasq
upstream_dns_servers:
- 8.8.8.8
- 4.4.8.8
# Turn this varable to 'false' to disable whole DNS configuration.
dns_setup: true
dns_domain: "{{ cluster_name }}"
# Ip address of the kubernetes dns service
kube_dns_server: 10.233.0.10
# Number of replicas of DNS instances started on kubernetes
dns_replicas: 2
# Set to 'false' to disable default Kubernetes UI setup
enable_ui: true
# Set to 'false' to disable default Elasticsearch + Kibana logging setup
enable_logging: false
# Set to "false' to disable default Monitoring (cAdvisor + heapster + influxdb + grafana)
enable_monitoring: false
# Set to 'false' to disable the docker garbage collection.
# Every hour it removes images that are not used by containers and exited containers.
enable_docker_gc: true

View file

@ -0,0 +1,19 @@
[downloader]
192.168.0.1
[kube-master]
# NB : the br_addr must be in the {{ calico_pool }} subnet
# it will assign a /24 subnet per node
192.168.0.1 br_addr=10.233.64.1
[kube-node]
192.168.0.2 br_addr=10.233.65.1
192.168.0.3 br_addr=10.233.66.1
192.168.0.4 br_addr=10.233.67.1
[etcd]
192.168.0.1
[k8s-cluster:children]
kube-node
kube-master

View file

@ -0,0 +1,6 @@
# Directory where the binaries will be installed
# bin_dir: /usr/local/bin
# Where the binaries will be downloaded.
# Note: ensure that you've enough disk space (about 1G)
# local_release_dir: "/tmp/releases"

View file

@ -0,0 +1,68 @@
# Users to create for basic auth in Kubernetes API via HTTP
# kube_users:
# kube:
# pass: changeme
# role: admin
# root:
# pass: changeme
# role: admin
# Kubernetes cluster name, also will be used as DNS domain
# cluster_name: cluster.local
#
# set this variable to calico if needed. keep it empty if flannel is used
# overlay_network_plugin: calico
# Kubernetes internal network for services, unused block of space.
# kube_service_addresses: 10.233.0.0/18
# internal network. When used, it will assign IP
# addresses from this range to individual pods.
# This network must be unused in your network infrastructure!
# overlay_network_subnet: 10.233.64.0/18
# internal network total size (optional). This is the prefix of the
# entire overlay network. So the entirety of 4.0.0.0/16 must be
# unused in your environment.
# overlay_network_prefix: 18
# internal network node size allocation (optional). This is the size allocated
# to each node on your network. With these defaults you should have
# room for 4096 nodes with 254 pods per node.
# overlay_network_host_prefix: 24
# Internal DNS configuration.
# Kubernetes can create and mainatain its own DNS server to resolve service names
# into appropriate IP addresses. It's highly advisable to run such DNS server,
# as it greatly simplifies configuration of your applications - you can use
# service names instead of magic environment variables.
# You still must manually configure all your containers to use this DNS server,
# Kubernetes won't do this for you (yet).
# Upstream dns servers used by dnsmasq
# upstream_dns_servers:
# - 8.8.8.8
# - 4.4.8.8
# Turn this varable to 'false' to disable whole DNS configuration.
# dns_setup: true
# dns_domain: "{{ cluster_name }}"
# Ip address of the kubernetes dns service
# kube_dns_server: 10.233.0.10
# Number of replicas of DNS instances started on kubernetes
# dns_replicas: 2
# Set to 'false' to disable default Kubernetes UI setup
# enable_ui: true
# Set to 'false' to disable default Elasticsearch + Kibana logging setup
# enable_logging: false
# Set to "false' to disable default Monitoring (cAdvisor + heapster + influxdb + grafana)
# enable_monitoring: false
# Set to 'false' to disable the docker garbage collection.
# Every hour it removes images that are not used by containers and exited containers.
# enable_docker_gc: true

309
library/kube.py Normal file
View file

@ -0,0 +1,309 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
DOCUMENTATION = """
---
module: kube
short_description: Manage Kubernetes Cluster
description:
- Create, replace, remove, and stop resources within a Kubernetes Cluster
version_added: "2.0"
options:
name:
required: false
default: null
description:
- The name associated with resource
filename:
required: false
default: null
description:
- The path and filename of the resource(s) definition file.
namespace:
required: false
default: null
description:
- The namespace associated with the resource(s)
resource:
required: false
default: null
description:
- The resource to perform an action on. pods (po), replicationControllers (rc), services (svc)
label:
required: false
default: null
description:
- The labels used to filter specific resources.
server:
required: false
default: null
description:
- The url for the API server that commands are executed against.
api_version:
required: false
choices: ['v1', 'v1beta3']
default: v1
description:
- The API version associated with cluster.
force:
required: false
default: false
description:
- A flag to indicate to force delete, replace, or stop.
all:
required: false
default: false
description:
- A flag to indicate delete all, stop all, or all namespaces when checking exists.
log_level:
required: false
default: 0
description:
- Indicates the level of verbosity of logging by kubectl.
state:
required: false
choices: ['present', 'absent', 'latest', 'reloaded', 'stopped']
default: present
description:
- present handles checking existence or creating if definition file provided,
absent handles deleting resource(s) based on other options,
latest handles creating ore updating based on existence,
reloaded handles updating resource(s) definition using definition file,
stopped handles stopping resource(s) based on other options.
requirements:
- kubectl
author: "Kenny Jones (@kenjones-cisco)"
"""
EXAMPLES = """
- name: test nginx is present
kube: name=nginx resource=rc state=present
- name: test nginx is stopped
kube: name=nginx resource=rc state=stopped
- name: test nginx is absent
kube: name=nginx resource=rc state=absent
- name: test nginx is present
kube: filename=/tmp/nginx.yml
"""
class KubeManager(object):
def __init__(self, module):
self.module = module
self.base_cmd = [module.get_bin_path('kubectl', True)]
self.api_version = module.params.get('api_version')
if self.api_version:
self.base_cmd.append('--api-version=' + self.api_version)
if module.params.get('server'):
self.base_cmd.append('--server=' + module.params.get('server'))
if module.params.get('log_level'):
self.base_cmd.append('--v=' + str(module.params.get('log_level')))
if module.params.get('namespace'):
self.base_cmd.append('--namespace=' + module.params.get('namespace'))
self.all = module.params.get('all')
self.force = module.params.get('force')
self.name = module.params.get('name')
self.filename = module.params.get('filename')
self.resource = module.params.get('resource')
self.label = module.params.get('label')
def _execute(self, cmd):
args = self.base_cmd + cmd
try:
rc, out, err = self.module.run_command(args)
if rc != 0:
self.module.fail_json(
msg='error running kubectl (%s) command (rc=%d): %s' % (' '.join(args), rc, out or err))
except Exception as exc:
self.module.fail_json(
msg='error running kubectl (%s) command: %s' % (' '.join(args), str(exc)))
return out.splitlines()
def _execute_nofail(self, cmd):
args = self.base_cmd + cmd
rc, out, err = self.module.run_command(args)
if rc != 0:
return None
return out.splitlines()
def create(self, check=True):
if check and self.exists():
return []
cmd = ['create']
if not self.filename:
self.module.fail_json(msg='filename required to create')
cmd.append('--filename=' + self.filename)
return self._execute(cmd)
def replace(self):
if not self.force and not self.exists():
return []
cmd = ['replace']
if self.api_version != 'v1':
cmd = ['update']
if self.force:
cmd.append('--force')
if not self.filename:
self.module.fail_json(msg='filename required to reload')
cmd.append('--filename=' + self.filename)
return self._execute(cmd)
def delete(self):
if not self.force and not self.exists():
return []
cmd = ['delete']
if self.filename:
cmd.append('--filename=' + self.filename)
else:
if not self.resource:
self.module.fail_json(msg='resource required to delete without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all')
if self.force:
cmd.append('--ignore-not-found')
return self._execute(cmd)
def exists(self):
cmd = ['get']
if not self.resource:
return False
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
cmd.append('--no-headers')
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all-namespaces')
result = self._execute_nofail(cmd)
if not result:
return False
return True
def stop(self):
if not self.force and not self.exists():
return []
cmd = ['stop']
if self.filename:
cmd.append('--filename=' + self.filename)
else:
if not self.resource:
self.module.fail_json(msg='resource required to stop without filename')
cmd.append(self.resource)
if self.name:
cmd.append(self.name)
if self.label:
cmd.append('--selector=' + self.label)
if self.all:
cmd.append('--all')
if self.force:
cmd.append('--ignore-not-found')
return self._execute(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(),
filename=dict(),
namespace=dict(),
resource=dict(),
label=dict(),
server=dict(),
api_version=dict(default='v1', choices=['v1', 'v1beta3']),
force=dict(default=False, type='bool'),
all=dict(default=False, type='bool'),
log_level=dict(default=0, type='int'),
state=dict(default='present', choices=['present', 'absent', 'latest', 'reloaded', 'stopped']),
)
)
changed = False
manager = KubeManager(module)
state = module.params.get('state')
if state == 'present':
result = manager.create()
elif state == 'absent':
result = manager.delete()
elif state == 'reloaded':
result = manager.replace()
elif state == 'stopped':
result = manager.stop()
elif state == 'latest':
if manager.exists():
manager.force = True
result = manager.replace()
else:
result = manager.create(check=False)
else:
module.fail_json(msg='Unrecognized state %s.' % state)
if result:
changed = True
module.exit_json(changed=changed,
msg='success: %s' % (' '.join(result))
)
from ansible.module_utils.basic import * # noqa
if __name__ == '__main__':
main()

View file

@ -0,0 +1,2 @@
---
# defaults file for addons

View file

@ -0,0 +1,40 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: elasticsearch-logging-v1
namespace: kube-system
labels:
k8s-app: elasticsearch-logging
version: v1
kubernetes.io/cluster-service: "true"
spec:
replicas: 2
selector:
k8s-app: elasticsearch-logging
version: v1
template:
metadata:
labels:
k8s-app: elasticsearch-logging
version: v1
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/elasticsearch:1.7
name: elasticsearch-logging
resources:
limits:
cpu: 100m
ports:
- containerPort: 9200
name: db
protocol: TCP
- containerPort: 9300
name: transport
protocol: TCP
volumeMounts:
- name: es-persistent-storage
mountPath: /data
volumes:
- name: es-persistent-storage
emptyDir: {}

View file

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: elasticsearch-logging
namespace: kube-system
labels:
k8s-app: elasticsearch-logging
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "Elasticsearch"
spec:
ports:
- port: 9200
protocol: TCP
targetPort: db
selector:
k8s-app: elasticsearch-logging

View file

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: monitoring-grafana
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "Grafana"
spec:
type: NodePort
ports:
- port: 80
targetPort: 8080
selector:
k8s-app: influxGrafana

View file

@ -0,0 +1,32 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: monitoring-heapster-v8
namespace: kube-system
labels:
k8s-app: heapster
version: v8
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: heapster
version: v8
template:
metadata:
labels:
k8s-app: heapster
version: v8
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/heapster:v0.17.0
name: heapster
resources:
limits:
cpu: 100m
memory: 300Mi
command:
- /heapster
- --source=kubernetes:''
- --sink=influxdb:http://monitoring-influxdb:8086

View file

@ -0,0 +1,15 @@
kind: Service
apiVersion: v1
metadata:
name: monitoring-heapster
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "Heapster"
spec:
type: NodePort
ports:
- port: 80
targetPort: 8082
selector:
k8s-app: heapster

View file

@ -0,0 +1,53 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: monitoring-influx-grafana-v1
namespace: kube-system
labels:
k8s-app: influxGrafana
version: v1
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: influxGrafana
version: v1
template:
metadata:
labels:
k8s-app: influxGrafana
version: v1
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/heapster_influxdb:v0.3
name: influxdb
resources:
limits:
cpu: 100m
memory: 200Mi
ports:
- containerPort: 8083
hostPort: 8083
- containerPort: 8086
hostPort: 8086
volumeMounts:
- name: influxdb-persistent-storage
mountPath: /data
- image: gcr.io/google_containers/heapster_grafana:v0.7
name: grafana
resources:
limits:
cpu: 100m
memory: 100Mi
env:
- name: INFLUXDB_EXTERNAL_URL
value: /api/v1/proxy/namespaces/kube-system/services/monitoring-influxdb:api/db/
- name: INFLUXDB_HOST
value: monitoring-influxdb
- name: INFLUXDB_PORT
value: "8086"
volumes:
- name: influxdb-persistent-storage
emptyDir: {}

View file

@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
name: monitoring-influxdb
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "InfluxDB"
spec:
ports:
- name: http
port: 8083
targetPort: 8083
- name: api
port: 8086
targetPort: 8086
selector:
k8s-app: influxGrafana

View file

@ -0,0 +1,34 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kibana-logging-v1
namespace: kube-system
labels:
k8s-app: kibana-logging
version: v1
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kibana-logging
version: v1
template:
metadata:
labels:
k8s-app: kibana-logging
version: v1
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: kibana-logging
image: gcr.io/google_containers/kibana:1.3
resources:
limits:
cpu: 100m
env:
- name: "ELASTICSEARCH_URL"
value: "http://elasticsearch-logging:9200"
ports:
- containerPort: 5601
name: ui
protocol: TCP

View file

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: kibana-logging
namespace: kube-system
labels:
k8s-app: kibana-logging
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "Kibana"
spec:
type: NodePort
ports:
- port: 5601
protocol: TCP
targetPort: ui
selector:
k8s-app: kibana-logging

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: kube-system

View file

@ -0,0 +1,36 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-ui-v1
namespace: kube-system
labels:
k8s-app: kube-ui
version: v1
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-ui
version: v1
template:
metadata:
labels:
k8s-app: kube-ui
version: v1
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: kube-ui
image: gcr.io/google_containers/kube-ui:v1.1
resources:
limits:
cpu: 100m
memory: 50Mi
ports:
- containerPort: 8080
livenessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 30
timeoutSeconds: 5

View file

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: kube-ui
namespace: kube-system
labels:
k8s-app: kube-ui
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeUI"
spec:
type: NodePort
selector:
k8s-app: kube-ui
ports:
- port: 80
targetPort: 8080

View file

@ -0,0 +1,2 @@
---
# handlers file for addons

View file

@ -0,0 +1,4 @@
---
dependencies:
- { role: kubernetes/master }
- { role: kubernetes/common }

View file

@ -0,0 +1,44 @@
---
- name: Kube-UI | Write pod file
copy:
src: kube-ui-rc.yaml
dest: "{{ kube_manifest_dir }}/kube-ui-rc.yaml"
register: kube_ui_rc_def
when: enable_ui
tags:
- addons
- kube-ui
- name: Kube-UI | Write service file
copy:
src: kube-ui-svc.yaml
dest: "{{ kube_manifest_dir }}/kube-ui-svc.yaml"
register: kube_ui_svc_def
when: enable_ui
tags:
- addons
- kube-ui
- name: Kube-UI | Create or update replication controller
kube:
namespace: kube-system
resource: rc
name: kube-ui-v1
filename: "{{ kube_manifest_dir }}/kube-ui-rc.yaml"
state: "{{ kube_ui_rc_def.changed | ternary('latest','present') }}"
when: enable_ui
tags:
- addons
- kube-ui
- name: Kube-UI | Create or update service
kube:
namespace: kube-system
resource: svc
name: kube-ui
filename: "{{ kube_manifest_dir }}/kube-ui-svc.yaml"
state: "{{ kube_ui_svc_def.changed | ternary('latest','present') }}"
when: enable_ui
tags:
- addons
- kube-ui

View file

@ -0,0 +1,88 @@
---
- name: Logging | Kibana | Write pod file
copy:
src: kibana-rc.yaml
dest: "{{ kube_manifest_dir }}/kibana-rc.yaml"
register: kibana_rc_def
when: enable_logging
tags:
- addons
- logging
- name: Logging | Kibana | Write service file
copy:
src: kibana-svc.yaml
dest: "{{ kube_manifest_dir }}/kibana-svc.yaml"
register: kibana_svc_def
when: enable_logging
tags:
- addons
- logging
- name: Logging | ES | Write pod file
copy:
src: es-rc.yaml
dest: "{{ kube_manifest_dir }}/es-rc.yaml"
register: es_rc_def
when: enable_logging
tags:
- addons
- logging
- name: Logging | ES | Write service file
copy:
src: es-svc.yaml
dest: "{{ kube_manifest_dir }}/es-svc.yaml"
register: es_svc_def
when: enable_logging
tags:
- addons
- logging
- name: Logging | ES | Create/update replication controller
kube:
namespace: kube-system
resource: rc
name: elasticsearch-logging-v1
filename: "{{ kube_manifest_dir }}/es-rc.yaml"
state: "{{ es_rc_def.changed | ternary('latest','present') }}"
when: enable_logging
tags:
- addons
- logging
- name: Logging | ES | Create/update service
kube:
namespace: kube-system
resource: svc
name: elasticsearch-logging
filename: "{{ kube_manifest_dir }}/es-svc.yaml"
state: "{{ es_svc_def.changed | ternary('latest','present') }}"
when: enable_logging
tags:
- addons
- logging
- name: Logging | Kibana | Create/update replication controller
kube:
namespace: kube-system
resource: rc
name: kibana-logging-v1
filename: "{{ kube_manifest_dir }}/kibana-rc.yaml"
state: "{{ kibana_rc_def.changed | ternary('latest','present') }}"
when: enable_logging
tags:
- addons
- logging
- name: Logging | Kibana | Create/update service
kube:
namespace: kube-system
resource: svc
name: kibana-logging
filename: "{{ kube_manifest_dir }}/kibana-svc.yaml"
state: "{{ kibana_svc_def.changed | ternary('latest','present') }}"
when: enable_logging
tags:
- addons
- logging

View file

@ -0,0 +1,45 @@
---
- name: create manifests directory
file: path={{ kube_manifest_dir }} state=directory
- name: Write kube-system namespace manifest
copy:
src=kube-system.yaml
dest={{ kube_manifest_dir }}/kube-system.yaml
- name: Create kube-system namespace
kube:
resource: ns
name: kube-system
filename: "{{ kube_manifest_dir }}/kube-system.yaml"
state: present
tags:
- addons
ignore_errors: yes
- name: Run kube-gen-token script to create {{ kube_token_dir }}/known_tokens.csv
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item }}"
environment:
TOKEN_DIR: "{{ kube_token_dir }}"
with_items:
- "system:dns"
- "system:monitoring"
- "system:logging"
register: gentoken
changed_when: "'Added' in gentoken.stdout"
notify:
- restart apiserver
tags:
- addons
- include: skydns.yml
when: dns_setup
- include: kube-ui.yml
when: enable_ui
- include: logging.yml
when: enable_logging
- include: monitoring.yml
when: enable_monitoring

View file

@ -0,0 +1,111 @@
---
- name: Monitoring | Influxdb | Write controller file
copy:
src: influxdb-grafana-controller.yaml
dest: "{{ kube_manifest_dir }}/influxdb-grafana-controller.yaml"
register: influxdb_rc_def
when: enable_monitoring
tags:
- addons
- monitoring
- name: Monitoring | Influxdb | Write service file
copy:
src: influxdb-service.yaml
dest: "{{ kube_manifest_dir }}/influxdb-service.yaml"
register: influxdb_svc_def
when: enable_monitoring
tags:
- addons
- monitoring
- name: Monitoring | Grafana | Write service file
copy:
src: grafana-service.yaml
dest: "{{ kube_manifest_dir }}/grafana-service.yaml"
register: grafana_svc_def
when: enable_monitoring
tags:
- addons
- monitoring
- name: Monitoring | Heapster | Write controller file
copy:
src: heapster-controller.yaml
dest: "{{ kube_manifest_dir }}/heapster-controller.yaml"
register: heapster_rc_def
when: enable_monitoring
tags:
- addons
- monitoring
- name: Monitoring | Heapster | Write service file
copy:
src: heapster-service.yaml
dest: "{{ kube_manifest_dir }}/heapster-service.yaml"
register: heapster_svc_def
when: enable_monitoring
tags:
- addons
- monitoring
- name: Monitoring | Influxdb | Create/update replication controller
kube:
namespace: kube-system
resource: rc
name: monitoring-influx-grafana-v1
filename: "{{ kube_manifest_dir }}/influxdb-grafana-controller.yaml"
state: "{{ influxdb_rc_def.changed | ternary('latest','present') }}"
when: enable_monitoring
tags:
- addons
- monitoring
- name: Monitoring | Influxdb | Create/update service
kube:
namespace: kube-system
resource: svc
name: monitoring-influxdb
filename: "{{ kube_manifest_dir }}/influxdb-service.yaml"
state: "{{ influxdb_svc_def.changed | ternary('latest','present') }}"
when: enable_monitoring
tags:
- addons
- monitoring
- name: Monitoring | Grafana | Create/update service
kube:
namespace: kube-system
resource: svc
name: monitoring-grafana
filename: "{{ kube_manifest_dir }}/grafana-service.yaml"
state: "{{ grafana_svc_def.changed | ternary('latest','present') }}"
when: enable_monitoring
tags:
- addons
- monitoring
- name: Monitoring | Heapster | Create/update replication controller
kube:
namespace: kube-system
resource: rc
name: monitoring-heapster-v8
filename: "{{ kube_manifest_dir }}/heapster-controller.yaml"
state: "{{ heapster_rc_def.changed | ternary('latest','present') }}"
when: enable_monitoring
tags:
- addons
- monitoring
- name: Monitoring | Heapster | Create/update service
kube:
namespace: kube-system
resource: svc
name: monitoring-heapster
filename: "{{ kube_manifest_dir }}/heapster-service.yaml"
state: "{{ heapster_svc_def.changed | ternary('latest','present') }}"
when: enable_monitoring
tags:
- addons
- monitoring

View file

@ -0,0 +1,44 @@
---
- name: SkyDNS | Write pod file
template:
src: skydns-rc.yaml.j2
dest: "{{ kube_manifest_dir }}/skydns-rc.yaml"
register: dns_rc_def
when: dns_setup
tags:
- addons
- skydns
- name: SkyDNS | Write service file
template:
src: skydns-svc.yaml.j2
dest: "{{ kube_manifest_dir }}/skydns-svc.yaml"
register: dns_svc_def
when: dns_setup
tags:
- addons
- skydns
- name: SkyDNS | Create or update replication controller
kube:
namespace: kube-system
resource: rc
name: kube-dns-v8
filename: "{{ kube_manifest_dir }}/skydns-rc.yaml"
state: "{{ dns_rc_def.changed | ternary('latest','present') }}"
when: dns_setup
tags:
- addons
- skydns
- name: SkyDNS | Create or update service
kube:
namespace: kube-system
resource: svc
name: kube-dns
filename: "{{ kube_manifest_dir }}/skydns-svc.yaml"
state: "{{ dns_svc_def.changed | ternary('latest','present') }}"
when: dns_setup
tags:
- addons
- skydns

View file

@ -0,0 +1,91 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-dns-v8
namespace: kube-system
labels:
k8s-app: kube-dns
version: v8
kubernetes.io/cluster-service: "true"
spec:
replicas: {{ dns_replicas }}
selector:
k8s-app: kube-dns
version: v8
template:
metadata:
labels:
k8s-app: kube-dns
version: v8
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: etcd
image: gcr.io/google_containers/etcd:2.0.9
resources:
limits:
cpu: 100m
memory: 50Mi
command:
- /usr/local/bin/etcd
- -data-dir
- /var/etcd/data
- -listen-client-urls
- http://127.0.0.1:2379,http://127.0.0.1:4001
- -advertise-client-urls
- http://127.0.0.1:2379,http://127.0.0.1:4001
- -initial-cluster-token
- skydns-etcd
volumeMounts:
- name: etcd-storage
mountPath: /var/etcd/data
- name: kube2sky
image: gcr.io/google_containers/kube2sky:1.11
resources:
limits:
cpu: 100m
memory: 50Mi
args:
# command = "/kube2sky"
- -domain={{ dns_domain }}
- name: skydns
image: gcr.io/google_containers/skydns:2015-03-11-001
resources:
limits:
cpu: 100m
memory: 50Mi
args:
# command = "/skydns"
- -machines=http://localhost:4001
- -addr=0.0.0.0:53
- -domain={{ dns_domain }}.
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
- name: healthz
image: gcr.io/google_containers/exechealthz:1.0
resources:
limits:
cpu: 10m
memory: 20Mi
args:
- -cmd=nslookup kubernetes.default.svc.{{ dns_domain }} localhost >/dev/null
- -port=8080
ports:
- containerPort: 8080
protocol: TCP
volumes:
- name: etcd-storage
emptyDir: {}
dnsPolicy: Default # Don't use cluster DNS.

View file

@ -0,0 +1,20 @@
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: {{ kube_dns_server }}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP

View file

@ -0,0 +1,4 @@
#!/bin/sh
make_resolv_conf() {
:
}

View file

@ -0,0 +1,3 @@
---
- name: restart dnsmasq
command: systemctl restart dnsmasq

View file

@ -0,0 +1,58 @@
---
- name: populate inventory into hosts file
lineinfile:
dest: /etc/hosts
regexp: "^{{ hostvars[item].ansible_default_ipv4.address }} {{ item }}$"
line: "{{ hostvars[item].ansible_default_ipv4.address }} {{ item }}"
state: present
when: hostvars[item].ansible_default_ipv4.address is defined
with_items: groups['all']
- name: clean hosts file
lineinfile:
dest: /etc/hosts
regexp: "{{ item }}"
state: absent
with_items:
- '^127\.0\.0\.1(\s+){{ inventory_hostname }}.*'
- '^::1(\s+){{ inventory_hostname }}.*'
- name: install dnsmasq and bindr9utils
apt:
name: "{{ item }}"
state: present
with_items:
- dnsmasq
- bind9utils
when: inventory_hostname in groups['kube-master'][0]
- name: ensure dnsmasq.d directory exists
file:
path: /etc/dnsmasq.d
state: directory
when: inventory_hostname in groups['kube-master'][0]
- name: configure dnsmasq
template:
src: 01-kube-dns.conf.j2
dest: /etc/dnsmasq.d/01-kube-dns.conf
mode: 755
notify:
- restart dnsmasq
when: inventory_hostname in groups['kube-master'][0]
- name: enable dnsmasq
service:
name: dnsmasq
state: started
enabled: yes
when: inventory_hostname in groups['kube-master'][0]
- name: update resolv.conf with new DNS setup
template:
src: resolv.conf.j2
dest: /etc/resolv.conf
mode: 644
- name: disable resolv.conf modification by dhclient
copy: src=dhclient_nodnsupdate dest=/etc/dhcp/dhclient-enter-hooks.d/nodnsupdate mode=u+x

View file

@ -0,0 +1,19 @@
#Listen on all interfaces
interface=*
addn-hosts=/etc/hosts
bogus-priv
#Set upstream dns servers
{% if upstream_dns_servers is defined %}
{% for srv in upstream_dns_servers %}
server={{ srv }}
{% endfor %}
{% else %}
server=8.8.8.8
server=8.8.4.4
{% endif %}
# Forward k8s domain to kube-dns
server=/{{ dns_domain }}/{{ kube_dns_server }}

View file

@ -0,0 +1,5 @@
; generated by ansible
search {{ [ 'default.svc.' + dns_domain, 'svc.' + dns_domain, dns_domain ] | join(' ') }}
{% for host in groups['kube-master'] %}
nameserver {{ hostvars[host]['ansible_default_ipv4']['address'] }}
{% endfor %}

View file

@ -0,0 +1,211 @@
#!/bin/bash
# Copyright (c) 2014 Spotify AB.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This script attempts to garbage collect docker containers and images.
# Containers that exited more than an hour ago are removed.
# Images that have existed more than an hour and are not in use by any
# containers are removed.
# Note: Although docker normally prevents removal of images that are in use by
# containers, we take extra care to not remove any image tags (e.g.
# ubuntu:14.04, busybox, etc) that are used by containers. A naive
# "docker rmi `docker images -q`" will leave images stripped of all tags,
# forcing users to re-pull the repositories even though the images
# themselves are still on disk.
# Note: State is stored in $STATE_DIR, defaulting to /var/lib/docker-gc
set -o nounset
set -o errexit
GRACE_PERIOD_SECONDS=${GRACE_PERIOD_SECONDS:=3600}
STATE_DIR=${STATE_DIR:=/var/lib/docker-gc}
DOCKER=${DOCKER:=docker}
PID_DIR=${PID_DIR:=/var/run}
for pid in $(pidof -s docker-gc); do
if [[ $pid != $$ ]]; then
echo "[$(date)] : docker-gc : Process is already running with PID $pid"
exit 1
fi
done
trap "rm -f -- '$PID_DIR/dockergc'" EXIT
echo $$ > $PID_DIR/dockergc
EXCLUDE_FROM_GC=${EXCLUDE_FROM_GC:=/etc/docker-gc-exclude}
if [ ! -f "$EXCLUDE_FROM_GC" ]
then
EXCLUDE_FROM_GC=/dev/null
fi
EXCLUDE_CONTAINERS_FROM_GC=${EXCLUDE_CONTAINERS_FROM_GC:=/etc/docker-gc-exclude-containers}
if [ ! -f "$EXCLUDE_CONTAINERS_FROM_GC" ]
then
EXCLUDE_CONTAINERS_FROM_GC=/dev/null
fi
EXCLUDE_IDS_FILE="exclude_ids"
EXCLUDE_CONTAINER_IDS_FILE="exclude_container_ids"
function date_parse {
if date --utc >/dev/null 2>&1; then
# GNU/date
echo $(date -u --date "${1}" "+%s")
else
# BSD/date
echo $(date -j -u -f "%F %T" "${1}" "+%s")
fi
}
# Elapsed time since a docker timestamp, in seconds
function elapsed_time() {
# Docker 1.5.0 datetime format is 2015-07-03T02:39:00.390284991
# Docker 1.7.0 datetime format is 2015-07-03 02:39:00.390284991 +0000 UTC
utcnow=$(date -u "+%s")
replace_q="${1#\"}"
without_ms="${replace_q:0:19}"
replace_t="${without_ms/T/ }"
epoch=$(date_parse "${replace_t}")
echo $(($utcnow - $epoch))
}
function compute_exclude_ids() {
# Find images that match patterns in the EXCLUDE_FROM_GC file and put their
# id prefixes into $EXCLUDE_IDS_FILE, prefixed with ^
PROCESSED_EXCLUDES="processed_excludes.tmp"
# Take each line and put a space at the beginning and end, so when we
# grep for them below, it will effectively be: "match either repo:tag
# or imageid". Also delete blank lines or lines that only contain
# whitespace
sed 's/^\(.*\)$/ \1 /' $EXCLUDE_FROM_GC | sed '/^ *$/d' > $PROCESSED_EXCLUDES
# The following looks a bit of a mess, but here's what it does:
# 1. Get images
# 2. Skip header line
# 3. Turn columnar display of 'REPO TAG IMAGEID ....' to 'REPO:TAG IMAGEID'
# 4. find lines that contain things mentioned in PROCESSED_EXCLUDES
# 5. Grab the image id from the line
# 6. Prepend ^ to the beginning of each line
# What this does is make grep patterns to match image ids mentioned by
# either repo:tag or image id for later greppage
$DOCKER images \
| tail -n+2 \
| sed 's/^\([^ ]*\) *\([^ ]*\) *\([^ ]*\).*/ \1:\2 \3 /' \
| grep -f $PROCESSED_EXCLUDES 2>/dev/null \
| cut -d' ' -f3 \
| sed 's/^/^/' > $EXCLUDE_IDS_FILE
}
function compute_exclude_container_ids() {
# Find containers matching to patterns listed in EXCLUDE_CONTAINERS_FROM_GC file
# Implode their values with a \| separator on a single line
PROCESSED_EXCLUDES=`cat $EXCLUDE_CONTAINERS_FROM_GC \
| xargs \
| sed -e 's/ /\|/g'`
# The empty string would match everything
if [ "$PROCESSED_EXCLUDES" = "" ]; then
touch $EXCLUDE_CONTAINER_IDS_FILE
return
fi
# Find all docker images
# Filter out with matching names
# and put them to $EXCLUDE_CONTAINER_IDS_FILE
$DOCKER ps -a \
| grep -E "$PROCESSED_EXCLUDES" \
| awk '{ print $1 }' \
| tr -s " " "\012" \
| sort -u > $EXCLUDE_CONTAINER_IDS_FILE
}
# Change into the state directory (and create it if it doesn't exist)
if [ ! -d "$STATE_DIR" ]
then
mkdir -p $STATE_DIR
fi
cd "$STATE_DIR"
# Verify that docker is reachable
$DOCKER version 1>/dev/null
# List all currently existing containers
$DOCKER ps -a -q --no-trunc | sort | uniq > containers.all
# List running containers
$DOCKER ps -q --no-trunc | sort | uniq > containers.running
# compute ids of container images to exclude from GC
compute_exclude_ids
# compute ids of containers to exclude from GC
compute_exclude_container_ids
# List containers that are not running
comm -23 containers.all containers.running > containers.exited
# Find exited containers that finished at least GRACE_PERIOD_SECONDS ago
echo -n "" > containers.reap.tmp
cat containers.exited | while read line
do
EXITED=$(${DOCKER} inspect -f "{{json .State.FinishedAt}}" ${line})
ELAPSED=$(elapsed_time $EXITED)
if [[ $ELAPSED -gt $GRACE_PERIOD_SECONDS ]]; then
echo $line >> containers.reap.tmp
fi
done
# List containers that we will remove and exclude ids.
cat containers.reap.tmp | sort | uniq | grep -v -f $EXCLUDE_CONTAINER_IDS_FILE > containers.reap || true
# List containers that we will keep.
comm -23 containers.all containers.reap > containers.keep
# List images used by containers that we keep.
# This may be both image id's and repo/name:tag, so normalize to image id's only
cat containers.keep |
xargs -n 1 $DOCKER inspect -f '{{.Config.Image}}' 2>/dev/null |
sort | uniq |
xargs -n 1 $DOCKER inspect -f '{{.Id}}' 2>/dev/null |
sort | uniq > images.used
# List images to reap; images that existed last run and are not in use.
$DOCKER images -q --no-trunc | sort | uniq > images.all
# Find images that are created at least GRACE_PERIOD_SECONDS ago
echo -n "" > images.reap.tmp
cat images.all | while read line
do
CREATED=$(${DOCKER} inspect -f "{{.Created}}" ${line})
ELAPSED=$(elapsed_time $CREATED)
if [[ $ELAPSED -gt $GRACE_PERIOD_SECONDS ]]; then
echo $line >> images.reap.tmp
fi
done
comm -23 images.reap.tmp images.used | grep -v -f $EXCLUDE_IDS_FILE > images.reap || true
# Reap containers.
xargs -n 1 $DOCKER rm --volumes=true < containers.reap &>/dev/null || true
# Reap images.
xargs -n 1 $DOCKER rmi < images.reap &>/dev/null || true

View file

@ -0,0 +1,17 @@
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network.target docker.socket
Requires=docker.socket
[Service]
EnvironmentFile=-/etc/default/docker
Type=notify
ExecStart=/usr/bin/docker daemon -H fd:// $DOCKER_OPTS
MountFlags=slave
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,12 @@
---
- name: restart docker
command: /bin/true
notify:
- reload systemd
- restart docker service
- name: reload systemd
shell: systemctl daemon-reload
- name: restart docker service
service: name=docker state=restarted

View file

@ -0,0 +1,41 @@
---
- name: Write script for calico/docker bridge configuration
template: src=create_cbr.j2 dest=/etc/network/if-up.d/create_cbr mode=u+x
when: overlay_network_plugin is defined and overlay_network_plugin == "calico"
- name: Configure calico/docker bridge
shell: /etc/network/if-up.d/create_cbr
when: overlay_network_plugin is defined and overlay_network_plugin == "calico"
- name: Configure docker to use cbr0 bridge
lineinfile:
dest=/etc/default/docker
regexp='.*DOCKER_OPTS=.*'
line='DOCKER_OPTS="--bridge=cbr0 --iptables=false --ip-masq=false"'
notify:
- restart docker
when: overlay_network_plugin is defined and overlay_network_plugin == "calico"
- name: enable docker
service:
name: docker
enabled: yes
state: started
tags:
- docker
- meta: flush_handlers
#- name: login to arkena's docker registry
# shell : >
# docker login --username={{ dockerhub_user }}
# --password={{ dockerhub_pass }}
# --email={{ dockerhub_email }}
#- pause: prompt='WARNING The next task will remove all exited containers, enter to continue'
#
#- name: Purge all exited containers
# shell: >
# if [ ! -z "$(docker ps -aq -f status=exited)" ]; then
# docker rm $(docker ps -aq -f status=exited);
# fi

View file

@ -0,0 +1,33 @@
---
- name: Configure debian distribution apt repository
template: src=debian.list.j2 dest=/etc/apt/sources.list.d/{{ ansible_distribution_release }}.list
- name: Install prerequisites for https transport
apt: pkg={{ item }} state=present update_cache=yes
with_items:
- apt-transport-https
- ca-certificates
- name: Configure docker apt repository
template: src=docker.list.j2 dest=/etc/apt/sources.list.d/docker.list
- name: Install docker-engine
apt: pkg={{ item }} state=present force=yes update_cache=yes
with_items:
- aufs-tools
- cgroupfs-mount
- docker-engine=1.8.2-0~{{ ansible_distribution_release }}
- name: Copy default docker configuration
template: src=default-docker.j2 dest=/etc/default/docker
notify: restart docker
- name: Copy Docker systemd unit file
copy: src=systemd-docker.service dest=/lib/systemd/system/docker.service
notify: restart docker
- name: Copy Docker garbage collection script
copy: src=docker-gc dest={{ bin_dir }}/docker-gc mode=700
- name: Copy Cron for garbage collection script
template: src=cron_docker-gc.j2 dest=/etc/cron.hourly/cron_docker-gc

View file

@ -0,0 +1,3 @@
---
- include: install.yml
- include: configure.yml

View file

@ -0,0 +1,14 @@
#!/bin/bash
# Create calico bridge cbr0 if it doesn't exist
ifaces=$(ifconfig -a | sed 's/[ \t].*//;/^\(lo\|\)$/d' |tr '\n' ' ')
if ! [[ "${ifaces}" =~ "cbr0" ]];then
brctl addbr cbr0
ip link set cbr0 up
fi
# Configure calico bridge ip
br_ips=$(ip addr list cbr0 |grep "inet " |cut -d' ' -f6)
if ! [[ "${br_ips}" =~ "{{ br_addr }}/{{ overlay_network_host_prefix }}" ]];then
ip a add {{ br_addr }}/{{ overlay_network_host_prefix }} dev cbr0
fi

View file

@ -0,0 +1,4 @@
#!/bin/sh
test -x {{ bin_dir }}/docker-gc || exit 0
{{ bin_dir }}/docker-gc

View file

@ -0,0 +1,10 @@
deb http://debian.arkena.net/debian/ {{ ansible_distribution_release }} main contrib non-free
deb-src http://debian.arkena.net/debian/ {{ ansible_distribution_release }} main contrib non-free
deb http://debian.arkena.net/debian/ {{ ansible_distribution_release }}-updates main contrib non-free
deb-src http://debian.arkena.net/debian/ {{ ansible_distribution_release }}-updates main contrib non-free
deb http://debian.arkena.net/debian-security/ {{ ansible_distribution_release }}/updates main contrib non-free
deb-src http://debian.arkena.net/debian-security {{ ansible_distribution_release }}/updates main contrib non-free
deb http://debian.arkena.net/debian/ {{ ansible_distribution_release }}-backports main contrib
deb-src http://debian.arkena.net/debian/ {{ ansible_distribution_release }}-backports main contrib
deb http://debian.arkena.net/debian-smartjog/ {{ ansible_distribution_release }} smartjog
deb-src http://debian.arkena.net/debian-smartjog/ {{ ansible_distribution_release }} smartjog

View file

@ -0,0 +1,15 @@
# Docker Upstart and SysVinit configuration file
# Customize location of Docker binary (especially for development testing).
#DOCKER="/usr/local/bin/docker"
# Use DOCKER_OPTS to modify the daemon startup options.
{% if overlay_network_plugin is defined and overlay_network_plugin == "calico" %}
DOCKER_OPTS="--bridge=cbr0 --iptables=false --ip-masq=false"
{% endif %}
# If you need Docker to use an HTTP proxy, it can also be specified here.
#export http_proxy="http://127.0.0.1:3128/"
# This is also a handy place to tweak where Docker's temporary files go.
#export TMPDIR="/mnt/bigdrive/docker-tmp"

View file

@ -0,0 +1 @@
deb https://apt.dockerproject.org/repo debian-{{ ansible_distribution_release }} main

View file

@ -0,0 +1,4 @@
---
dockerhub_user: arkenadev
dockerhub_pass: 4rk3n4d3v
dockerhub_email: smaine.kahlouch@gmail.com

View file

@ -0,0 +1,5 @@
---
etcd_download_url: https://github.com/coreos/etcd/releases/download
flannel_download_url: https://github.com/coreos/flannel/releases/download
kube_download_url: https://github.com/GoogleCloudPlatform/kubernetes/releases/download
calico_download_url: https://github.com/Metaswitch/calico-docker/releases/download

View file

@ -0,0 +1,21 @@
---
- name: Create calico release directory
local_action: file
path={{ local_release_dir }}/calico/bin
recurse=yes
state=directory
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Check if calicoctl has been downloaded
local_action: stat
path={{ local_release_dir }}/calico/bin/calicoctl
register: c_tar
delegate_to: "{{ groups['kube-master'][0] }}"
# issues with get_url module and redirects, to be tested again in the near future
- name: Download calico
local_action: shell
curl -o {{ local_release_dir }}/calico/bin/calicoctl -Ls {{ calico_download_url }}/{{ calico_version }}/calicoctl
when: not c_tar.stat.exists
register: dl_calico
delegate_to: "{{ groups['kube-master'][0] }}"

View file

@ -0,0 +1,42 @@
---
- name: Create etcd release directory
local_action: file
path={{ local_release_dir }}/etcd/bin
recurse=yes
state=directory
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Check if etcd release archive has been downloaded
local_action: stat
path={{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz
register: e_tar
delegate_to: "{{ groups['kube-master'][0] }}"
# issues with get_url module and redirects, to be tested again in the near future
- name: Download etcd
local_action: shell
curl -o {{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz -Ls {{ etcd_download_url }}/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-amd64.tar.gz
when: not e_tar.stat.exists
register: dl_etcd
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Extract etcd archive
local_action: unarchive
src={{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64.tar.gz
dest={{ local_release_dir }}/etcd copy=no
when: dl_etcd|changed
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Pick up only etcd binaries
local_action: copy
src={{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64/{{ item }}
dest={{ local_release_dir }}/etcd/bin
with_items:
- etcdctl
- etcd
when: dl_etcd|changed
- name: Delete unused etcd files
local_action: file
path={{ local_release_dir }}/etcd/etcd-{{ etcd_version }}-linux-amd64 state=absent
when: dl_etcd|changed

View file

@ -0,0 +1,39 @@
---
- name: Create flannel release directory
local_action: file
path={{ local_release_dir }}/flannel
recurse=yes
state=directory
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Check if flannel release archive has been downloaded
local_action: stat
path={{ local_release_dir }}/flannel/flannel-{{ flannel_version }}-linux-amd64.tar.gz
register: f_tar
delegate_to: "{{ groups['kube-master'][0] }}"
# issues with get_url module and redirects, to be tested again in the near future
- name: Download flannel
local_action: shell
curl -o {{ local_release_dir }}/flannel/flannel-{{ flannel_version }}-linux-amd64.tar.gz -Ls {{ flannel_download_url }}/v{{ flannel_version }}/flannel-{{ flannel_version }}-linux-amd64.tar.gz
when: not f_tar.stat.exists
register: dl_flannel
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Extract flannel archive
local_action: unarchive
src={{ local_release_dir }}/flannel/flannel-{{ flannel_version }}-linux-amd64.tar.gz
dest={{ local_release_dir }}/flannel copy=no
when: dl_flannel|changed
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Pick up only flannel binaries
local_action: copy
src={{ local_release_dir }}/flannel/flannel-{{ flannel_version }}/flanneld
dest={{ local_release_dir }}/flannel/bin
when: dl_flannel|changed
- name: Delete unused flannel files
local_action: file
path={{ local_release_dir }}/flannel/flannel-{{ flannel_version }} state=absent
when: dl_flannel|changed

View file

@ -0,0 +1,47 @@
---
- name: Create kubernetes release directory
local_action: file
path={{ local_release_dir }}/kubernetes
state=directory
- name: Check if kubernetes release archive has been downloaded
local_action: stat
path={{ local_release_dir }}/kubernetes/kubernetes.tar.gz
register: k_tar
# issues with get_url module and redirects, to be tested again in the near future
- name: Download kubernetes
local_action: shell
curl -o {{ local_release_dir }}/kubernetes/kubernetes.tar.gz -Ls {{ kube_download_url }}/{{ kube_version }}/kubernetes.tar.gz
when: not k_tar.stat.exists or k_tar.stat.checksum != "{{ kube_sha1 }}"
register: dl_kube
- name: Compare kubernetes archive checksum
local_action: stat
path={{ local_release_dir }}/kubernetes/kubernetes.tar.gz
register: k_tar
failed_when: k_tar.stat.checksum != "{{ kube_sha1 }}"
when: dl_kube|changed
- name: Extract kubernetes archive
local_action: unarchive
src={{ local_release_dir }}/kubernetes/kubernetes.tar.gz
dest={{ local_release_dir }}/kubernetes copy=no
when: dl_kube|changed
- name: Extract kubernetes binaries archive
local_action: unarchive
src={{ local_release_dir }}/kubernetes/kubernetes/server/kubernetes-server-linux-amd64.tar.gz
dest={{ local_release_dir }}/kubernetes copy=no
when: dl_kube|changed
- name: Pick up only kubernetes binaries
local_action: synchronize
src={{ local_release_dir }}/kubernetes/kubernetes/server/bin
dest={{ local_release_dir }}/kubernetes
when: dl_kube|changed
- name: Delete unused kubernetes files
local_action: file
path={{ local_release_dir }}/kubernetes/kubernetes state=absent
when: dl_kube|changed

View file

@ -0,0 +1,5 @@
---
- include: kubernetes.yml
- include: etcd.yml
- include: calico.yml
- include: flannel.yml

View file

@ -0,0 +1,8 @@
---
etcd_version: v2.2.0
flannel_version: 0.5.3
kube_version: v1.0.6
kube_sha1: 289f9a11ea2f3cfcc6cbd50d29c3d16d4978b76c
calico_version: v0.5.1

View file

@ -0,0 +1,15 @@
---
- name: restart daemons
command: /bin/true
notify:
- reload systemd
- restart etcd2
- name: reload systemd
command: systemctl daemon-reload
- name: restart etcd2
service: name=etcd2 state=restarted
- name: Save iptables rules
command: service iptables save

View file

@ -0,0 +1,15 @@
---
- name: Disable ferm
service: name=ferm state=stopped enabled=no
- name: Create etcd2 environment vars dir
file: path=/etc/systemd/system/etcd2.service.d state=directory
- name: Write etcd2 config file
template: src=etcd2.j2 dest=/etc/systemd/system/etcd2.service.d/10-etcd2-cluster.conf
notify:
- reload systemd
- restart etcd2
- name: Ensure etcd2 is running
service: name=etcd2 state=started enabled=yes

View file

@ -0,0 +1,24 @@
---
- name: Create etcd user
user: name=etcd shell=/bin/nologin home=/var/lib/etcd2
- name: Install etcd binaries
copy:
src={{ local_release_dir }}/etcd/bin/{{ item }}
dest={{ bin_dir }}
owner=etcd
mode=u+x
with_items:
- etcdctl
- etcd
notify:
- restart daemons
- name: Create etcd2 binary symlink
file: src=/usr/local/bin/etcd dest=/usr/local/bin/etcd2 state=link
- name: Copy etcd2.service systemd file
template:
src: systemd-etcd2.service.j2
dest: /lib/systemd/system/etcd2.service
notify: restart daemons

View file

@ -0,0 +1,3 @@
---
- include: install.yml
- include: configure.yml

View file

@ -0,0 +1,17 @@
# etcd2.0
[Service]
{% if inventory_hostname in groups['kube-master'] %}
Environment="ETCD_ADVERTISE_CLIENT_URLS=http://{{ ansible_default_ipv4.address }}:2379,http://{{ ansible_default_ipv4.address }}:4001"
Environment="ETCD_INITIAL_ADVERTISE_PEER_URLS=http://{{ ansible_default_ipv4.address }}:2380"
Environment="ETCD_INITIAL_CLUSTER=master=http://{{ ansible_default_ipv4.address }}:2380"
Environment="ETCD_INITIAL_CLUSTER_STATE=new"
Environment="ETCD_INITIAL_CLUSTER_TOKEN=k8s_etcd"
Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001"
Environment="ETCD_LISTEN_PEER_URLS=http://:2380,http://{{ ansible_default_ipv4.address }}:7001"
Environment="ETCD_NAME=master"
{% else %}
Environment="ETCD_ADVERTISE_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001"
Environment="ETCD_INITIAL_CLUSTER=master=http://{{ groups['kube-master'][0] }}:2380"
Environment="ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001"
Environment="ETCD_PROXY=on"
{% endif %}

View file

@ -0,0 +1,15 @@
[Unit]
Description=etcd2
Conflicts=etcd.service
[Service]
User=etcd
Environment=ETCD_DATA_DIR=/var/lib/etcd2
Environment=ETCD_NAME=%m
ExecStart={{ bin_dir }}/etcd2
Restart=always
RestartSec=10s
LimitNOFILE=40000
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,41 @@
# This directory is where all the additional scripts go
# that Kubernetes normally puts in /srv/kubernetes.
# This puts them in a sane location
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
# This directory is where all the additional config stuff goes
# the kubernetes normally puts in /srv/kubernets.
# This puts them in a sane location.
# Editting this value will almost surely break something. Don't
# change it. Things like the systemd scripts are hard coded to
# look in here. Don't do it.
kube_config_dir: /etc/kubernetes
# The port the API Server will be listening on.
kube_master_port: 443
# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/certs"
# This is where all of the bearer tokens will be stored
kube_token_dir: "{{ kube_config_dir }}/tokens"
# This is where to save basic auth file
kube_users_dir: "{{ kube_config_dir }}/users"
# This is where you can drop yaml/json files and the kubelet will run those
# pods on startup
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
# This is the group that the cert creation scripts chgrp the
# cert files to. Not really changable...
kube_cert_group: kube-cert
dns_domain: "{{ cluster_name }}"
# IP address of the DNS server.
# Kubernetes will create a pod with several containers, serving as the DNS
# server and expose it under this IP address. The IP address must be from
# the range specified as kube_service_addresses. This magic will actually
# pick the 10th ip address in the kube_service_addresses range and use that.
# dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(253)|ipaddr('address') }}"

View file

@ -0,0 +1,31 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
token_dir=${TOKEN_DIR:-/var/srv/kubernetes}
token_file="${token_dir}/known_tokens.csv"
create_accounts=($@)
touch "${token_file}"
for account in "${create_accounts[@]}"; do
if grep ",${account}," "${token_file}" ; then
continue
fi
token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
echo "${token},${account},${account}" >> "${token_file}"
echo "${token}" > "${token_dir}/${account}.token"
echo "Added ${account}"
done

View file

@ -0,0 +1,115 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# Caller should set in the ev:
# MASTER_IP - this may be an ip or things like "_use_gce_external_ip_"
# DNS_DOMAIN - which will be passed to minions in --cluster_domain
# SERVICE_CLUSTER_IP_RANGE - where all service IPs are allocated
# MASTER_NAME - I'm not sure what it is...
# Also the following will be respected
# CERT_DIR - where to place the finished certs
# CERT_GROUP - who the group owner of the cert files should be
cert_ip="${MASTER_IP:="${1}"}"
master_name="${MASTER_NAME:="kubernetes"}"
service_range="${SERVICE_CLUSTER_IP_RANGE:="10.0.0.0/16"}"
dns_domain="${DNS_DOMAIN:="cluster.local"}"
cert_dir="${CERT_DIR:-"/srv/kubernetes"}"
cert_group="${CERT_GROUP:="kube-cert"}"
# The following certificate pairs are created:
#
# - ca (the cluster's certificate authority)
# - server
# - kubelet
# - kubecfg (for kubectl)
#
# TODO(roberthbailey): Replace easyrsa with a simple Go program to generate
# the certs that we need.
# TODO: Add support for discovery on other providers?
if [ "$cert_ip" == "_use_gce_external_ip_" ]; then
cert_ip=$(curl -s -H Metadata-Flavor:Google http://metadata.google.internal./computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip)
fi
if [ "$cert_ip" == "_use_aws_external_ip_" ]; then
cert_ip=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4)
fi
if [ "$cert_ip" == "_use_azure_dns_name_" ]; then
cert_ip=$(uname -n | awk -F. '{ print $2 }').cloudapp.net
fi
tmpdir=$(mktemp -d --tmpdir kubernetes_cacert.XXXXXX)
trap 'rm -rf "${tmpdir}"' EXIT
cd "${tmpdir}"
# TODO: For now, this is a patched tool that makes subject-alt-name work, when
# the fix is upstream move back to the upstream easyrsa. This is cached in GCS
# but is originally taken from:
# https://github.com/brendandburns/easy-rsa/archive/master.tar.gz
#
# To update, do the following:
# curl -o easy-rsa.tar.gz https://github.com/brendandburns/easy-rsa/archive/master.tar.gz
# gsutil cp easy-rsa.tar.gz gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz
# gsutil acl ch -R -g all:R gs://kubernetes-release/easy-rsa/easy-rsa.tar.gz
#
# Due to GCS caching of public objects, it may take time for this to be widely
# distributed.
# Calculate the first ip address in the service range
octects=($(echo "${service_range}" | sed -e 's|/.*||' -e 's/\./ /g'))
((octects[3]+=1))
service_ip=$(echo "${octects[*]}" | sed 's/ /./g')
# Determine appropriete subject alt names
sans="IP:${cert_ip},IP:${service_ip},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${dns_domain},DNS:${master_name}"
curl -L -O https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz > /dev/null 2>&1
tar xzf easy-rsa.tar.gz > /dev/null
cd easy-rsa-master/easyrsa3
(./easyrsa init-pki > /dev/null 2>&1
./easyrsa --batch "--req-cn=${cert_ip}@$(date +%s)" build-ca nopass > /dev/null 2>&1
./easyrsa --subject-alt-name="${sans}" build-server-full "${master_name}" nopass > /dev/null 2>&1
./easyrsa build-client-full kubelet nopass > /dev/null 2>&1
./easyrsa build-client-full kubecfg nopass > /dev/null 2>&1) || {
# If there was an error in the subshell, just die.
# TODO(roberthbailey): add better error handling here
echo "=== Failed to generate certificates: Aborting ==="
exit 2
}
mkdir -p "$cert_dir"
cp -p pki/ca.crt "${cert_dir}/ca.crt"
cp -p "pki/issued/${master_name}.crt" "${cert_dir}/server.crt" > /dev/null 2>&1
cp -p "pki/private/${master_name}.key" "${cert_dir}/server.key" > /dev/null 2>&1
cp -p pki/issued/kubecfg.crt "${cert_dir}/kubecfg.crt"
cp -p pki/private/kubecfg.key "${cert_dir}/kubecfg.key"
cp -p pki/issued/kubelet.crt "${cert_dir}/kubelet.crt"
cp -p pki/private/kubelet.key "${cert_dir}/kubelet.key"
CERTS=("ca.crt" "server.key" "server.crt" "kubelet.key" "kubelet.crt" "kubecfg.key" "kubecfg.crt")
for cert in "${CERTS[@]}"; do
chgrp "${cert_group}" "${cert_dir}/${cert}"
chmod 660 "${cert_dir}/${cert}"
done

View file

@ -0,0 +1,3 @@
---
dependencies:
- { role: etcd }

View file

@ -0,0 +1,42 @@
---
#- name: Get create ca cert script from Kubernetes
# get_url:
# url=https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/master/cluster/saltbase/salt/generate-cert/make-ca-cert.sh
# dest={{ kube_script_dir }}/make-ca-cert.sh mode=0500
# force=yes
- name: certs | install cert generation script
copy:
src=make-ca-cert.sh
dest={{ kube_script_dir }}
mode=0500
changed_when: false
# FIXME This only generates a cert for one master...
- name: certs | run cert generation script
command:
"{{ kube_script_dir }}/make-ca-cert.sh {{ inventory_hostname }}"
args:
creates: "{{ kube_cert_dir }}/server.crt"
environment:
MASTER_IP: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
MASTER_NAME: "{{ inventory_hostname }}"
DNS_DOMAIN: "{{ dns_domain }}"
SERVICE_CLUSTER_IP_RANGE: "{{ kube_service_addresses }}"
CERT_DIR: "{{ kube_cert_dir }}"
CERT_GROUP: "{{ kube_cert_group }}"
- name: certs | check certificate permissions
file:
path={{ item }}
group={{ kube_cert_group }}
owner=kube
mode=0440
with_items:
- "{{ kube_cert_dir }}/ca.crt"
- "{{ kube_cert_dir }}/server.crt"
- "{{ kube_cert_dir }}/server.key"
- "{{ kube_cert_dir }}/kubecfg.crt"
- "{{ kube_cert_dir }}/kubecfg.key"
- "{{ kube_cert_dir }}/kubelet.crt"
- "{{ kube_cert_dir }}/kubelet.key"

View file

@ -0,0 +1,30 @@
---
- name: tokens | copy the token gen script
copy:
src=kube-gen-token.sh
dest={{ kube_script_dir }}
mode=u+x
- name: tokens | generate tokens for master components
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
environment:
TOKEN_DIR: "{{ kube_token_dir }}"
with_nested:
- [ "system:controller_manager", "system:scheduler", "system:kubectl", 'system:proxy' ]
- "{{ groups['kube-master'][0] }}"
register: gentoken
changed_when: "'Added' in gentoken.stdout"
notify:
- restart daemons
- name: tokens | generate tokens for node components
command: "{{ kube_script_dir }}/kube-gen-token.sh {{ item[0] }}-{{ item[1] }}"
environment:
TOKEN_DIR: "{{ kube_token_dir }}"
with_nested:
- [ 'system:kubelet', 'system:proxy' ]
- "{{ groups['kube-node'] }}"
register: gentoken
changed_when: "'Added' in gentoken.stdout"
notify:
- restart daemons

View file

@ -0,0 +1,29 @@
---
- name: define alias command for kubectl all
lineinfile:
dest=/etc/bash.bashrc
line="alias kball='{{ bin_dir }}/kubectl --all-namespaces -o wide'"
regexp='^alias kball=.*$'
state=present
insertafter=EOF
create=True
- name: create kubernetes config directory
file: path={{ kube_config_dir }} state=directory
- name: create kubernetes script directory
file: path={{ kube_script_dir }} state=directory
- name: Make sure manifest directory exists
file: path={{ kube_manifest_dir }} state=directory
- name: write the global config file
template:
src: config.j2
dest: "{{ kube_config_dir }}/config"
notify:
- restart daemons
- include: secrets.yml
tags:
- secrets

View file

@ -0,0 +1,50 @@
---
- name: certs | create system kube-cert groups
group: name={{ kube_cert_group }} state=present system=yes
- name: create system kube user
user:
name=kube
comment="Kubernetes user"
shell=/sbin/nologin
state=present
system=yes
groups={{ kube_cert_group }}
- name: certs | make sure the certificate directory exits
file:
path={{ kube_cert_dir }}
state=directory
mode=o-rwx
group={{ kube_cert_group }}
- name: tokens | make sure the tokens directory exits
file:
path={{ kube_token_dir }}
state=directory
mode=o-rwx
group={{ kube_cert_group }}
- include: gen_certs.yml
run_once: true
when: inventory_hostname == groups['kube-master'][0]
- name: Read back the CA certificate
slurp:
src: "{{ kube_cert_dir }}/ca.crt"
register: ca_cert
run_once: true
delegate_to: "{{ groups['kube-master'][0] }}"
- name: certs | register the CA certificate as a fact for later use
set_fact:
kube_ca_cert: "{{ ca_cert.content|b64decode }}"
- name: certs | write CA certificate everywhere
copy: content="{{ kube_ca_cert }}" dest="{{ kube_cert_dir }}/ca.crt"
notify:
- restart daemons
- include: gen_tokens.yml
run_once: true
when: inventory_hostname == groups['kube-master'][0]

View file

@ -0,0 +1,26 @@
###
# kubernetes system config
#
# The following values are used to configure various aspects of all
# kubernetes services, including
#
# kube-apiserver.service
# kube-controller-manager.service
# kube-scheduler.service
# kubelet.service
# kube-proxy.service
# Comma separated list of nodes in the etcd cluster
# KUBE_ETCD_SERVERS="--etcd_servers="
# logging to stderr means we get it in the systemd journal
KUBE_LOGTOSTDERR="--logtostderr=true"
# journal message level, 0 is debug
KUBE_LOG_LEVEL="--v=5"
# Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow_privileged=true"
# How the replication controller, scheduler, and proxy
KUBE_MASTER="--master=https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}"

View file

@ -0,0 +1,32 @@
---
- name: restart daemons
command: /bin/true
notify:
- reload systemd
- restart apiserver
- restart controller-manager
- restart scheduler
- restart proxy
- name: reload systemd
command: systemctl daemon-reload
- name: restart apiserver
service:
name: kube-apiserver
state: restarted
- name: restart controller-manager
service:
name: kube-controller-manager
state: restarted
- name: restart scheduler
service:
name: kube-scheduler
state: restarted
- name: restart proxy
service:
name: kube-proxy
state: restarted

View file

@ -0,0 +1,3 @@
---
dependencies:
- { role: kubernetes/common }

View file

@ -0,0 +1,87 @@
---
- name: get the node token values from token files
slurp:
src: "{{ kube_token_dir }}/{{ item }}-{{ inventory_hostname }}.token"
with_items:
- "system:controller_manager"
- "system:scheduler"
- "system:kubectl"
- "system:proxy"
register: tokens
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Set token facts
set_fact:
controller_manager_token: "{{ tokens.results[0].content|b64decode }}"
scheduler_token: "{{ tokens.results[1].content|b64decode }}"
kubectl_token: "{{ tokens.results[2].content|b64decode }}"
proxy_token: "{{ tokens.results[3].content|b64decode }}"
- name: write the config files for api server
template: src=apiserver.j2 dest={{ kube_config_dir }}/apiserver
notify:
- restart daemons
- name: write config file for controller-manager
template: src=controller-manager.j2 dest={{ kube_config_dir }}/controller-manager
notify:
- restart controller-manager
- name: write the kubecfg (auth) file for controller-manager
template: src=controller-manager.kubeconfig.j2 dest={{ kube_config_dir }}/controller-manager.kubeconfig
notify:
- restart controller-manager
- name: write the config file for scheduler
template: src=scheduler.j2 dest={{ kube_config_dir }}/scheduler
notify:
- restart scheduler
- name: write the kubecfg (auth) file for scheduler
template: src=scheduler.kubeconfig.j2 dest={{ kube_config_dir }}/scheduler.kubeconfig
notify:
- restart scheduler
- name: write the kubecfg (auth) file for kubectl
template: src=kubectl.kubeconfig.j2 dest={{ kube_config_dir }}/kubectl.kubeconfig
- name: write the config files for proxy
template: src=proxy.j2 dest={{ kube_config_dir }}/proxy
notify:
- restart daemons
- name: write the kubecfg (auth) file for proxy
template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig
- name: populate users for basic auth in API
lineinfile:
dest: "{{ kube_users_dir }}/known_users.csv"
create: yes
line: '{{ item.value.pass }},{{ item.key }},{{ item.value.role }}'
with_dict: "{{ kube_users }}"
notify:
- restart apiserver
- name: Enable apiserver
service:
name: kube-apiserver
enabled: yes
state: started
- name: Enable controller-manager
service:
name: kube-controller-manager
enabled: yes
state: started
- name: Enable scheduler
service:
name: kube-scheduler
enabled: yes
state: started
- name: Enable kube-proxy
service:
name: kube-proxy
enabled: yes
state: started

View file

@ -0,0 +1,34 @@
---
- name: Write kube-apiserver systemd init file
template: src=systemd-init/kube-apiserver.service.j2 dest=/etc/systemd/system/kube-apiserver.service
notify: restart daemons
- name: Write kube-controller-manager systemd init file
template: src=systemd-init/kube-controller-manager.service.j2 dest=/etc/systemd/system/kube-controller-manager.service
notify: restart daemons
- name: Write kube-scheduler systemd init file
template: src=systemd-init/kube-scheduler.service.j2 dest=/etc/systemd/system/kube-scheduler.service
notify: restart daemons
- name: Write kube-proxy systemd init file
template: src=systemd-init/kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service
notify: restart daemons
- name: Install kubernetes binaries
copy:
src={{ local_release_dir }}/kubernetes/bin/{{ item }}
dest={{ bin_dir }}
owner=kube
mode=u+x
with_items:
- kube-apiserver
- kube-controller-manager
- kube-scheduler
- kube-proxy
- kubectl
notify:
- restart daemons
- name: Allow apiserver to bind on both secure and insecure ports
shell: setcap cap_net_bind_service+ep {{ bin_dir }}/kube-apiserver

View file

@ -0,0 +1,3 @@
---
- include: install.yml
- include: config.yml

View file

@ -0,0 +1,25 @@
###
# kubernetes system config
#
# The following values are used to configure the kube-apiserver
#
# The address on the local server to listen to.
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
# The port on the local server to listen on.
KUBE_API_PORT="--insecure-port=8080 --secure-port={{ kube_master_port }}"
# KUBELET_PORT="--kubelet_port=10250"
# Address range to use for services
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range={{ kube_service_addresses }}"
# Location of the etcd cluster
KUBE_ETCD_SERVERS="--etcd_servers={% for node in groups['etcd'] %}http://{{ node }}:2379{% if not loop.last %},{% endif %}{% endfor %}"
# default admission control policies
KUBE_ADMISSION_CONTROL="--admission_control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
# Add you own!
KUBE_API_ARGS="--tls_cert_file={{ kube_cert_dir }}/server.crt --tls_private_key_file={{ kube_cert_dir }}/server.key --client_ca_file={{ kube_cert_dir }}/ca.crt --token_auth_file={{ kube_token_dir }}/known_tokens.csv --basic-auth-file={{ kube_users_dir }}/known_users.csv --service_account_key_file={{ kube_cert_dir }}/server.crt"

View file

@ -0,0 +1,6 @@
###
# The following values are used to configure the kubernetes controller-manager
# defaults from config and apiserver should be adequate
KUBE_CONTROLLER_MANAGER_ARGS="--kubeconfig={{ kube_config_dir }}/controller-manager.kubeconfig --service_account_private_key_file={{ kube_cert_dir }}/server.key --root_ca_file={{ kube_cert_dir }}/ca.crt"

View file

@ -0,0 +1,18 @@
apiVersion: v1
kind: Config
current-context: controller-manager-to-{{ cluster_name }}
preferences: {}
clusters:
- cluster:
certificate-authority: {{ kube_cert_dir }}/ca.crt
server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}
name: {{ cluster_name }}
contexts:
- context:
cluster: {{ cluster_name }}
user: controller-manager
name: controller-manager-to-{{ cluster_name }}
users:
- name: controller-manager
user:
token: {{ controller_manager_token }}

View file

@ -0,0 +1,18 @@
apiVersion: v1
kind: Config
current-context: kubectl-to-{{ cluster_name }}
preferences: {}
clusters:
- cluster:
certificate-authority-data: {{ kube_ca_cert|b64encode }}
server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}
name: {{ cluster_name }}
contexts:
- context:
cluster: {{ cluster_name }}
user: kubectl
name: kubectl-to-{{ cluster_name }}
users:
- name: kubectl
user:
token: {{ kubectl_token }}

View file

@ -0,0 +1,7 @@
###
# kubernetes proxy config
# default config should be adequate
# Add your own!
KUBE_PROXY_ARGS="--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig"

View file

@ -0,0 +1,18 @@
apiVersion: v1
kind: Config
current-context: proxy-to-{{ cluster_name }}
preferences: {}
contexts:
- context:
cluster: {{ cluster_name }}
user: proxy
name: proxy-to-{{ cluster_name }}
clusters:
- cluster:
certificate-authority: {{ kube_cert_dir }}/ca.crt
server: http://{{ groups['kube-master'][0] }}:8080
name: {{ cluster_name }}
users:
- name: proxy
user:
token: {{ proxy_token }}

View file

@ -0,0 +1,7 @@
###
# kubernetes scheduler config
# default config should be adequate
# Add your own!
KUBE_SCHEDULER_ARGS="--kubeconfig={{ kube_config_dir }}/scheduler.kubeconfig"

View file

@ -0,0 +1,18 @@
apiVersion: v1
kind: Config
current-context: scheduler-to-{{ cluster_name }}
preferences: {}
clusters:
- cluster:
certificate-authority: {{ kube_cert_dir }}/ca.crt
server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}
name: {{ cluster_name }}
contexts:
- context:
cluster: {{ cluster_name }}
user: scheduler
name: scheduler-to-{{ cluster_name }}
users:
- name: scheduler
user:
token: {{ scheduler_token }}

View file

@ -0,0 +1,28 @@
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
Requires=etcd2.service
After=etcd2.service
[Service]
EnvironmentFile=/etc/network-environment
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/apiserver
User=kube
ExecStart={{ bin_dir }}/kube-apiserver \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_ETCD_SERVERS \
$KUBE_API_ADDRESS \
$KUBE_API_PORT \
$KUBELET_PORT \
$KUBE_ALLOW_PRIV \
$KUBE_SERVICE_ADDRESSES \
$KUBE_ADMISSION_CONTROL \
$KUBE_API_ARGS
Restart=on-failure
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,20 @@
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
Requires=etcd2.service
After=etcd2.service
[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/controller-manager
User=kube
ExecStart={{ bin_dir }}/kube-controller-manager \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_MASTER \
$KUBE_CONTROLLER_MANAGER_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,21 @@
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
{% if overlay_network_plugin|default('') %}
After=docker.service calico-node.service
{% else %}
After=docker.service
{% endif %}
[Service]
EnvironmentFile=/etc/network-environment
ExecStart={{ bin_dir }}/kube-proxy \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_MASTER \
$KUBE_PROXY_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,20 @@
[Unit]
Description=Kubernetes Scheduler Plugin
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
Requires=etcd2.service
After=etcd2.service
[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/scheduler
User=kube
ExecStart={{ bin_dir }}/kube-scheduler \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_MASTER \
$KUBE_SCHEDULER_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,29 @@
apiVersion: v1
kind: Pod
metadata:
name: fluentd-elasticsearch
namespace: kube-system
spec:
containers:
- name: fluentd-elasticsearch
image: gcr.io/google_containers/fluentd-elasticsearch:1.11
resources:
limits:
cpu: 100m
args:
- -qq
volumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers

View file

@ -0,0 +1,19 @@
---
- name: restart daemons
command: /bin/true
notify:
- restart kubelet
- restart proxy
- name: restart kubelet
service:
name: kubelet
state: restarted
- name: restart proxy
service:
name: kube-proxy
state: restarted
- name: reload systemd
command: systemctl daemon-reload

View file

@ -0,0 +1,3 @@
---
dependencies:
- { role: kubernetes/common }

View file

@ -0,0 +1,61 @@
---
- name: Get the node token values
slurp:
src: "{{ kube_token_dir }}/{{ item }}-{{ inventory_hostname }}.token"
with_items:
- "system:kubelet"
- "system:proxy"
register: tokens
run_once: true
delegate_to: "{{ groups['kube-master'][0] }}"
- name: Set token facts
set_fact:
kubelet_token: "{{ tokens.results[0].content|b64decode }}"
proxy_token: "{{ tokens.results[1].content|b64decode }}"
- name: Create kubelet environment vars dir
file: path=/etc/systemd/system/kubelet.service.d state=directory
- name: Write kubelet config file
template: src=kubelet.j2 dest=/etc/systemd/system/kubelet.service.d/10-kubelet.conf
notify:
- reload systemd
- restart kubelet
- name: write the kubecfg (auth) file for kubelet
template: src=kubelet.kubeconfig.j2 dest={{ kube_config_dir }}/kubelet.kubeconfig
notify:
- restart kubelet
- name: Create proxy environment vars dir
file: path=/etc/systemd/system/kube-proxy.service.d state=directory
- name: Write proxy config file
template: src=proxy.j2 dest=/etc/systemd/system/kube-proxy.service.d/10-proxy-cluster.conf
notify:
- reload systemd
- restart proxy
- name: write the kubecfg (auth) file for kube-proxy
template: src=proxy.kubeconfig.j2 dest={{ kube_config_dir }}/proxy.kubeconfig
notify:
- restart proxy
- name: Enable kubelet
service:
name: kubelet
enabled: yes
state: started
- name: Enable proxy
service:
name: kube-proxy
enabled: yes
state: started
- name: addons | Logging | Create Fluentd pod
copy:
src: fluentd-es.yaml
dest: "{{ kube_manifest_dir }}/fluentd-es.yaml"
when: enable_logging

View file

@ -0,0 +1,20 @@
---
- name: Write kube-proxy systemd init file
template: src=systemd-init/kube-proxy.service.j2 dest=/etc/systemd/system/kube-proxy.service
notify: restart daemons
- name: Write kubelet systemd init file
template: src=systemd-init/kubelet.service.j2 dest=/etc/systemd/system/kubelet.service
notify: restart daemons
- name: Install kubernetes binaries
copy:
src={{ local_release_dir }}/kubernetes/bin/{{ item }}
dest={{ bin_dir }}
owner=kube
mode=u+x
with_items:
- kube-proxy
- kubelet
notify:
- restart daemons

View file

@ -0,0 +1,4 @@
---
- include: install.yml
- include: config.yml
- include: temp_workaround.yml

View file

@ -0,0 +1,5 @@
- name: Warning Temporary workaround !!! Disable kubelet and kube-proxy on node startup
service: name={{ item }} enabled=no
with_items:
- kubelet
- kube-proxy

View file

@ -0,0 +1,21 @@
[Service]
Environment="KUBE_LOGTOSTDERR=--logtostderr=true"
Environment="KUBE_LOG_LEVEL=--v=0"
Environment="KUBE_ALLOW_PRIV=--allow_privileged=true"
Environment="KUBE_MASTER=--master=https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}"
# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
Environment="KUBELET_ADDRESS=--address=0.0.0.0"
# The port for the info server to serve on
# Environment="KUBELET_PORT=--port=10250"
# You may leave this blank to use the actual hostname
Environment="KUBELET_HOSTNAME=--hostname_override={{ inventory_hostname }}"
# location of the api-server
Environment="KUBELET_API_SERVER=--api_servers=https://{{ groups['kube-master'][0]}}:{{ kube_master_port }}"
{% if dns_setup %}
Environment="KUBELET_ARGS=--cluster_dns={{ kube_dns_server }} --cluster_domain={{ dns_domain }} --kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }}"
{% else %}
Environment="KUBELET_ARGS=--kubeconfig={{ kube_config_dir}}/kubelet.kubeconfig --config={{ kube_manifest_dir }}"
{% endif %}
{% if overlay_network_plugin|default('') %}
Environment="KUBELET_NETWORK_PLUGIN=--network_plugin={{ overlay_network_plugin }}"
{% endif %}

View file

@ -0,0 +1,18 @@
apiVersion: v1
kind: Config
current-context: kubelet-to-{{ cluster_name }}
preferences: {}
clusters:
- cluster:
certificate-authority: {{ kube_cert_dir }}/ca.crt
server: https://{{ groups['kube-master'][0] }}:443
name: {{ cluster_name }}
contexts:
- context:
cluster: {{ cluster_name }}
user: kubelet
name: kubelet-to-{{ cluster_name }}
users:
- name: kubelet
user:
token: {{ kubelet_token }}

View file

@ -0,0 +1,6 @@
###
# kubernetes proxy config
# default config should be adequate
[Service]
Environment="KUBE_PROXY_ARGS=--kubeconfig={{ kube_config_dir }}/proxy.kubeconfig"

View file

@ -0,0 +1,18 @@
apiVersion: v1
kind: Config
current-context: proxy-to-{{ cluster_name }}
preferences: {}
contexts:
- context:
cluster: {{ cluster_name }}
user: proxy
name: proxy-to-{{ cluster_name }}
clusters:
- cluster:
certificate-authority: {{ kube_cert_dir }}/ca.crt
server: https://{{ groups['kube-master'][0] }}:{{ kube_master_port }}
name: {{ cluster_name }}
users:
- name: proxy
user:
token: {{ proxy_token }}

View file

@ -0,0 +1,21 @@
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
{% if overlay_network_plugin|default('') %}
After=docker.service calico-node.service
{% else %}
After=docker.service
{% endif %}
[Service]
EnvironmentFile=/etc/network-environment
ExecStart={{ bin_dir }}/kube-proxy \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBE_MASTER \
$KUBE_PROXY_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,26 @@
[Unit]
Description=Kubernetes Kubelet Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
{% if overlay_network_plugin|default('') %}
After=docker.service calico-node.service
{% else %}
After=docker.service
{% endif %}
[Service]
#WorkingDirectory=/var/lib/kubelet
EnvironmentFile=/etc/network-environment
ExecStart={{ bin_dir }}/kubelet \
$KUBE_LOGTOSTDERR \
$KUBE_LOG_LEVEL \
$KUBELET_API_SERVER \
$KUBELET_ADDRESS \
$KUBELET_PORT \
$KUBELET_HOSTNAME \
$KUBE_ALLOW_PRIV \
$KUBELET_ARGS \
$KUBELET_NETWORK_PLUGIN
Restart=on-failure
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1,28 @@
---
- name: restart calico-node
service: name=calico-node state=restarted
- name: restart docker
service: name=docker state=restarted
- name: restart flannel
service: name=flannel state=restarted
notify:
- reload systemd
- stop docker
- delete docker0
- start docker
when: inventory_hostname in groups['kube-node']
- name: stop docker
service: name=docker state=stopped
- name: delete docker0
command: ip link delete docker0
ignore_errors: yes
- name: start docker
service: name=docker state=started
- name : reload systemd
shell: systemctl daemon-reload

Some files were not shown because too many files have changed in this diff Show more