Merge pull request #2543 from hswong3i/cert-manager-0.2.3

Integrate jetstack/cert-manager 0.2.3 to Kubespray
This commit is contained in:
Andreas Krüger 2018-03-31 18:15:25 +02:00 committed by GitHub
commit 5b0da4279f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
19 changed files with 268 additions and 5 deletions

View file

@ -207,6 +207,10 @@ ingress_nginx_enabled: false
# ingress_nginx_configmap_udp_services: # ingress_nginx_configmap_udp_services:
# 53: "kube-system/kube-dns:53" # 53: "kube-system/kube-dns:53"
# Cert manager deployment
cert_manager_enabled: false
# cert_manager_namespace: "cert-manager"
# Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now ) # Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
persistent_volumes_enabled: false persistent_volumes_enabled: false

View file

@ -124,7 +124,6 @@ fluentd_image_tag: "{{ fluentd_version }}"
kibana_version: "v4.6.1" kibana_version: "v4.6.1"
kibana_image_repo: "gcr.io/google_containers/kibana" kibana_image_repo: "gcr.io/google_containers/kibana"
kibana_image_tag: "{{ kibana_version }}" kibana_image_tag: "{{ kibana_version }}"
helm_version: "v2.8.1" helm_version: "v2.8.1"
helm_image_repo: "lachlanevenson/k8s-helm" helm_image_repo: "lachlanevenson/k8s-helm"
helm_image_tag: "{{ helm_version }}" helm_image_tag: "{{ helm_version }}"
@ -132,6 +131,11 @@ tiller_image_repo: "gcr.io/kubernetes-helm/tiller"
tiller_image_tag: "{{ helm_version }}" tiller_image_tag: "{{ helm_version }}"
vault_image_repo: "vault" vault_image_repo: "vault"
vault_image_tag: "{{ vault_version }}" vault_image_tag: "{{ vault_version }}"
cert_manager_version: "v0.2.3"
cert_manager_controller_image_repo: "quay.io/jetstack/cert-manager-controller"
cert_manager_controller_image_tag: "{{ cert_manager_version }}"
cert_manager_ingress_shim_image_repo: "quay.io/jetstack/cert-manager-ingress-shim"
cert_manager_ingress_shim_image_tag: "{{ cert_manager_version }}"
downloads: downloads:
netcheck_server: netcheck_server:
@ -422,6 +426,22 @@ downloads:
version: "{{ vault_version }}" version: "{{ vault_version }}"
groups: groups:
- vault - vault
cert_manager_controller:
enabled: "{{ cert_manager_enabled }}"
container: true
repo: "{{ cert_manager_controller_image_repo }}"
tag: "{{ cert_manager_controller_image_tag }}"
sha256: "{{ cert_manager_controller_digest_checksum|default(None) }}"
groups:
- kube-node
cert_manager_ingress_shim:
enabled: "{{ cert_manager_enabled }}"
container: true
repo: "{{ cert_manager_ingress_shim_image_repo }}"
tag: "{{ cert_manager_ingress_shim_image_tag }}"
sha256: "{{ cert_manager_ingress_shim_digest_checksum|default(None) }}"
groups:
- kube-node
download_defaults: download_defaults:
container: false container: false

View file

@ -22,12 +22,12 @@ etcd_script_dir: "{{ bin_dir }}/etcd-scripts"
etcd_heartbeat_interval: "250" etcd_heartbeat_interval: "250"
etcd_election_timeout: "5000" etcd_election_timeout: "5000"
#etcd_snapshot_count: "10000" # etcd_snapshot_count: "10000"
# Parameters for ionice # Parameters for ionice
# -c takes an integer between 0 and 3 or one of the strings none, realtime, best-effort or idle. # -c takes an integer between 0 and 3 or one of the strings none, realtime, best-effort or idle.
# -n takes an integer between 0 (highest priority) and 7 (lowest priority) # -n takes an integer between 0 (highest priority) and 7 (lowest priority)
#etcd_ionice: "-c2 -n0" # etcd_ionice: "-c2 -n0"
etcd_metrics: "basic" etcd_metrics: "basic"

View file

@ -0,0 +1,17 @@
Deployment files
================
This directory contains example deployment manifests for cert-manager that can
be used in place of the official Helm chart.
This is useful if you are deploying cert-manager into an environment without
Helm, or want to inspect a 'bare minimum' deployment.
Where do these come from?
-------------------------
The manifests in these subdirectories are generated from the Helm chart
automatically. The `values.yaml` files used to configure cert-manager can be
found in [`hack/deploy`](../../hack/deploy/).
They are automatically generated by running `./hack/update-deploy-gen.sh`.

View file

@ -0,0 +1,6 @@
---
cert_manager_namespace: "cert-manager"
cert_manager_cpu_requests: 10m
cert_manager_cpu_limits: 30m
cert_manager_memory_requests: 32Mi
cert_manager_memory_limits: 200Mi

View file

@ -0,0 +1,38 @@
---
- name: Cert Manager | Create addon dir
file:
path: "{{ kube_config_dir }}/addons/cert_manager"
state: directory
owner: root
group: root
mode: 0755
- name: Cert Manager | Create manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/addons/cert_manager/{{ item.file }}"
with_items:
- { name: cert-manager-ns, file: cert-manager-ns.yml, type: ns }
- { name: cert-manager-sa, file: cert-manager-sa.yml, type: sa }
- { name: cert-manager-clusterrole, file: cert-manager-clusterrole.yml, type: clusterrole }
- { name: cert-manager-clusterrolebinding, file: cert-manager-clusterrolebinding.yml, type: clusterrolebinding }
- { name: cert-manager-issuer-crd, file: cert-manager-issuer-crd.yml, type: crd }
- { name: cert-manager-clusterissuer-crd, file: cert-manager-clusterissuer-crd.yml, type: crd }
- { name: cert-manager-certificate-crd, file: cert-manager-certificate-crd.yml, type: crd }
- { name: cert-manager-deploy, file: cert-manager-deploy.yml, type: deploy }
register: cert_manager_manifests
when:
- inventory_hostname == groups['kube-master'][0]
- name: Cert Manager | Apply manifests
kube:
name: "{{ item.item.name }}"
namespace: "{{ cert_manager_namespace }}"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/addons/cert_manager/{{ item.item.file }}"
state: "latest"
with_items: "{{ cert_manager_manifests.results }}"
when:
- inventory_hostname == groups['kube-master'][0]

View file

@ -0,0 +1,21 @@
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: certificates.certmanager.k8s.io
labels:
app: cert-manager
chart: cert-manager-0.2.5
release: cert-manager
heritage: Tiller
spec:
group: certmanager.k8s.io
version: v1alpha1
scope: Namespaced
names:
kind: Certificate
plural: certificates
shortNames:
- cert
- certs

View file

@ -0,0 +1,17 @@
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterissuers.certmanager.k8s.io
labels:
app: cert-manager
chart: cert-manager-0.2.5
release: cert-manager
heritage: Tiller
spec:
group: certmanager.k8s.io
version: v1alpha1
names:
kind: ClusterIssuer
plural: clusterissuers
scope: Cluster

View file

@ -0,0 +1,25 @@
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: cert-manager
labels:
app: cert-manager
chart: cert-manager-0.2.5
release: cert-manager
heritage: Tiller
rules:
- apiGroups: ["certmanager.k8s.io"]
resources: ["certificates", "issuers", "clusterissuers"]
verbs: ["*"]
- apiGroups: [""]
# TODO: remove endpoints once 0.4 is released. We include it here in case
# users use the 'master' version of the Helm chart with a 0.2.x release of
# cert-manager that still performs leader election with Endpoint resources.
# We advise users don't do this, but some will anyway and this will reduce
# friction.
resources: ["endpoints", "configmaps", "secrets", "events", "services", "pods"]
verbs: ["*"]
- apiGroups: ["extensions"]
resources: ["ingresses"]
verbs: ["*"]

View file

@ -0,0 +1,18 @@
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: cert-manager
labels:
app: cert-manager
chart: cert-manager-0.2.5
release: cert-manager
heritage: Tiller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cert-manager
subjects:
- name: cert-manager
namespace: {{ cert_manager_namespace }}
kind: ServiceAccount

View file

@ -0,0 +1,51 @@
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: cert-manager
namespace: {{ cert_manager_namespace }}
labels:
app: cert-manager
chart: cert-manager-0.2.5
release: cert-manager
heritage: Tiller
spec:
replicas: 1
template:
metadata:
labels:
k8s-app: cert-manager
release: cert-manager
annotations:
spec:
serviceAccountName: cert-manager
containers:
- name: cert-manager
image: {{ cert_manager_controller_image_repo }}:{{ cert_manager_controller_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
args:
- --cluster-resource-namespace=$(POD_NAMESPACE)
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
resources:
requests:
cpu: {{ cert_manager_cpu_requests }}
memory: {{ cert_manager_memory_requests }}
limits:
cpu: {{ cert_manager_cpu_limits }}
memory: {{ cert_manager_memory_limits }}
- name: ingress-shim
image: {{ cert_manager_ingress_shim_image_repo }}:{{ cert_manager_ingress_shim_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
resources:
requests:
cpu: {{ cert_manager_cpu_requests }}
memory: {{ cert_manager_memory_requests }}
limits:
cpu: {{ cert_manager_cpu_limits }}
memory: {{ cert_manager_memory_limits }}

View file

@ -0,0 +1,17 @@
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: issuers.certmanager.k8s.io
labels:
app: cert-manager
chart: cert-manager-0.2.5
release: cert-manager
heritage: Tiller
spec:
group: certmanager.k8s.io
version: v1alpha1
names:
kind: Issuer
plural: issuers
scope: Namespaced

View file

@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: {{ cert_manager_namespace }}
labels:
name: {{ cert_manager_namespace }}

View file

@ -0,0 +1,11 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cert-manager
namespace: {{ cert_manager_namespace }}
labels:
app: cert-manager
chart: cert-manager-0.2.5
release: cert-manager
heritage: Tiller

View file

@ -6,3 +6,10 @@ dependencies:
- apps - apps
- ingress-nginx - ingress-nginx
- ingress-controller - ingress-controller
- role: kubernetes-apps/ingress_controller/cert_manager
when: cert_manager_enabled
tags:
- apps
- cert-manager
- ingress-controller

View file

@ -96,4 +96,5 @@ volume_cross_zone_attachment: false
## Encrypting Secret Data at Rest ## Encrypting Secret Data at Rest
kube_encrypt_secret_data: false kube_encrypt_secret_data: false
kube_encrypt_token: "{{ lookup('password', inventory_dir + '/credentials/kube_encrypt_token.creds length=32 chars=ascii_letters,digits') }}" kube_encrypt_token: "{{ lookup('password', inventory_dir + '/credentials/kube_encrypt_token.creds length=32 chars=ascii_letters,digits') }}"
kube_encryption_algorithm: "aescbc" # Must be either: aescbc, secretbox or aesgcm # Must be either: aescbc, secretbox or aesgcm
kube_encryption_algorithm: "aescbc"

View file

@ -174,6 +174,7 @@ local_volume_provisioner_enabled: "{{ local_volumes_enabled | default('false') }
persistent_volumes_enabled: false persistent_volumes_enabled: false
cephfs_provisioner_enabled: false cephfs_provisioner_enabled: false
ingress_nginx_enabled: false ingress_nginx_enabled: false
cert_manager_enabled: false
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461) ## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
# openstack_blockstorage_version: "v1/v2/auto (default)" # openstack_blockstorage_version: "v1/v2/auto (default)"

View file

@ -50,4 +50,4 @@ rbac_resources:
# * can-reach=DESTINATION # * can-reach=DESTINATION
# * interface=INTERFACE-REGEX # * interface=INTERFACE-REGEX
# see https://docs.projectcalico.org/v3.0/reference/node/configuration#ip-autodetection-methods # see https://docs.projectcalico.org/v3.0/reference/node/configuration#ip-autodetection-methods
#calico_ip_auto_method: "interface=eth.*" # calico_ip_auto_method: "interface=eth.*"

View file

@ -16,3 +16,5 @@ deploy_netchecker: true
kubedns_min_replicas: 1 kubedns_min_replicas: 1
cloud_provider: gce cloud_provider: gce
kube_encrypt_secret_data: true kube_encrypt_secret_data: true
ingress_nginx_enabled: true
cert_manager_enabled: true