Upgrade Local Volume Provisioner Addon to v2.0.0

This commit is contained in:
Wong Hoi Sing Edison 2018-02-15 09:55:43 +08:00
parent 2de6da25a8
commit deef47c923
21 changed files with 170 additions and 139 deletions

View file

@ -94,6 +94,7 @@
- { role: kubespray-defaults}
- { role: kubernetes-apps/network_plugin, tags: network }
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
- hosts: calico-rr
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"

View file

@ -169,9 +169,11 @@ istio_enabled: false
registry_enabled: false
# Local volume provisioner deployment
# deprecated will be removed
local_volumes_enabled: false
local_volume_provisioner_enabled: "{{ local_volumes_enabled }}"
local_volume_provisioner_enabled: false
# local_volume_provisioner_namespace: "{{ system_namespace }}"
# local_volume_provisioner_base_dir: /mnt/disks
# local_volume_provisioner_mount_dir: /local-disks
# local_volume_provisioner_storage_class: local-storage
# CephFS provisioner deployment
cephfs_provisioner_enabled: false

View file

@ -1,58 +1,62 @@
# Local Storage Provisioner
Local Storage Provisioner
=========================
The local storage provisioner is NOT a dynamic storage provisioner as you would
expect from a cloud provider. Instead, it simply creates PersistentVolumes for
all manually created volumes located in the directory `local_volume_base_dir`.
all manually created volumes located in the directory `local_volume_provisioner_base_dir`.
The default path is /mnt/disks and the rest of this doc will use that path as
an example.
## Examples to create local storage volumes
Examples to create local storage volumes
----------------------------------------
### tmpfs method:
```
for vol in vol1 vol2 vol3; do
mkdir /mnt/disks/$vol
mount -t tmpfs -o size=5G $vol /mnt/disks/$vol
done
```
``` bash
for vol in vol1 vol2 vol3; do
mkdir /mnt/disks/$vol
mount -t tmpfs -o size=5G $vol /mnt/disks/$vol
done
```
The tmpfs method is not recommended for production because the mount is not
persistent and data will be deleted on reboot.
### Mount physical disks
```
mkdir /mnt/disks/ssd1
mount /dev/vdb1 /mnt/disks/ssd1
```
``` bash
mkdir /mnt/disks/ssd1
mount /dev/vdb1 /mnt/disks/ssd1
```
Physical disks are recommended for production environments because it offers
complete isolation in terms of I/O and capacity.
### File-backed sparsefile method
```
truncate /mnt/disks/disk5 --size 2G
mkfs.ext4 /mnt/disks/disk5
mkdir /mnt/disks/vol5
mount /mnt/disks/disk5 /mnt/disks/vol5
```
``` bash
truncate /mnt/disks/disk5 --size 2G
mkfs.ext4 /mnt/disks/disk5
mkdir /mnt/disks/vol5
mount /mnt/disks/disk5 /mnt/disks/vol5
```
If you have a development environment and only one disk, this is the best way
to limit the quota of persistent volumes.
### Simple directories
```
for vol in vol6 vol7 vol8; do
mkdir /mnt/disks/$vol
done
```
``` bash
for vol in vol6 vol7 vol8; do
mkdir /mnt/disks/$vol
done
```
This is also acceptable in a development environment, but there is no capacity
management.
## Usage notes
Usage notes
-----------
The volume provisioner cannot calculate volume sizes correctly, so you should
delete the daemonset pod on the relevant host after creating volumes. The pod
@ -62,6 +66,7 @@ Make sure to make any mounts persist via /etc/fstab or with systemd mounts (for
CoreOS/Container Linux). Pods with persistent volume claims will not be
able to start if the mounts become unavailable.
## Further reading
Further reading
---------------
Refer to the upstream docs here: https://github.com/kubernetes-incubator/external-storage/tree/master/local-volume
Refer to the upstream docs here: <https://github.com/kubernetes-incubator/external-storage/tree/master/local-volume>

View file

@ -0,0 +1,8 @@
---
local_volume_provisioner_image_repo: quay.io/external_storage/local-volume-provisioner
local_volume_provisioner_image_tag: v2.0.0
local_volume_provisioner_namespace: "{{ system_namespace }}"
local_volume_provisioner_base_dir: /mnt/disks
local_volume_provisioner_mount_dir: /mnt/disks
local_volume_provisioner_storage_class: local-storage

View file

@ -1,8 +1,9 @@
---
- name: Local Volume Provisioner | Ensure base dir is created on all hosts
file:
path: "{{ local_volume_base_dir }}"
ensure: directory
path: "{{ local_volume_provisioner_base_dir }}"
state: directory
owner: root
group: root
mode: 0700
@ -13,31 +14,32 @@
- name: Local Volume Provisioner | Create addon dir
file:
path: "{{ kube_config_dir }}/addons/local_volume_provisioner"
state: directory
owner: root
group: root
mode: 0755
recurse: true
- name: Local Volume Provisioner | Create manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.file }}"
with_items:
- { name: local-volume-serviceaccount, file: serviceaccount.yml, type, serviceaccount }
- { name: local-volume-clusterrolebinding, file: clusterrolebinding.yml, type, clusterrolebinding }
- { name: local-volume-configmap, file: configmap.yml, type, configmap }
- { name: local-volume-daemonset, file: daemonset.yml, type, daemonset }
register: local_volume_manifests
- { name: local-volume-provisioner-ns, file: local-volume-provisioner-ns.yml, type: ns }
- { name: local-volume-provisioner-sa, file: local-volume-provisioner-sa.yml, type: sa }
- { name: local-volume-provisioner-clusterrolebinding, file: local-volume-provisioner-clusterrolebinding.yml, type, clusterrolebinding }
- { name: local-volume-provisioner-cm, file: local-volume-provisioner-cm.yml, type, cm }
- { name: local-volume-provisioner-ds, file: local-volume-provisioner-ds.yml, type, ds }
- { name: local-volume-provisioner-sc, file: local-volume-provisioner-sc.yml, type, sc }
register: local_volume_provisioner_manifests
when: inventory_hostname == groups['kube-master'][0]
- name: Local Volume Provisioner | Apply manifests
kube:
name: "{{ item.item.name }}"
namespace: "{{ system_namespace }}"
namespace: "{{ local_volume_provisioner_namespace }}"
kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/addons/local_volume_provisioner/{{ item.item.file }}"
state: "latest"
with_items: "{{ local_volume_manifests.results }}"
with_items: "{{ local_volume_provisioner_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0]

View file

@ -1,27 +1,28 @@
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: local-storage-provisioner-pv-binding
namespace: {{ system_namespace }}
name: local-volume-provisioner-system-persistent-volume-provisioner
namespace: {{ local_volume_provisioner_namespace }}
subjects:
- kind: ServiceAccount
name: local-storage-admin
namespace: {{ system_namespace }}
name: local-volume-provisioner
namespace: {{ local_volume_provisioner_namespace }}
roleRef:
kind: ClusterRole
name: system:persistent-volume-provisioner
apiGroup: rbac.authorization.k8s.io
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: local-storage-provisioner-node-binding
namespace: {{ system_namespace }}
name: local-volume-provisioner-system-node
namespace: {{ local_volume_provisioner_namespace }}
subjects:
- kind: ServiceAccount
name: local-storage-admin
namespace: {{ system_namespace }}
name: local-volume-provisioner
namespace: {{ local_volume_provisioner_namespace }}
roleRef:
kind: ClusterRole
name: system:node

View file

@ -0,0 +1,11 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: local-volume-provisioner
namespace: {{ local_volume_provisioner_namespace }}
data:
storageClassMap: |
{{ local_volume_provisioner_storage_class }}:
hostDir: {{ local_volume_provisioner_base_dir }}
mountDir: {{ local_volume_provisioner_mount_dir }}

View file

@ -0,0 +1,45 @@
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: local-volume-provisioner
namespace: {{ local_volume_provisioner_namespace }}
labels:
k8s-app: local-volume-provisioner
version: {{ local_volume_provisioner_image_tag }}
spec:
selector:
matchLabels:
k8s-app: local-volume-provisioner
version: {{ local_volume_provisioner_image_tag }}
template:
metadata:
labels:
k8s-app: local-volume-provisioner
version: {{ local_volume_provisioner_image_tag }}
spec:
serviceAccountName: local-volume-provisioner
containers:
- name: provisioner
image: {{ local_volume_provisioner_image_repo }}:{{ local_volume_provisioner_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
securityContext:
privileged: true
env:
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: local-volume-provisioner
mountPath: /etc/provisioner/config
readOnly: true
- name: local-volume-provisioner-hostpath-mnt-disks
mountPath: {{ local_volume_provisioner_mount_dir }}
volumes:
- name: local-volume-provisioner
configMap:
name: local-volume-provisioner
- name: local-volume-provisioner-hostpath-mnt-disks
hostPath:
path: {{ local_volume_provisioner_base_dir }}

View file

@ -0,0 +1,5 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: {{ local_volume_provisioner_namespace }}

View file

@ -0,0 +1,6 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: local-volume-provisioner
namespace: {{ local_volume_provisioner_namespace }}

View file

@ -0,0 +1,7 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: {{ local_volume_provisioner_storage_class }}
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer

View file

@ -0,0 +1,8 @@
---
dependencies:
- role: kubernetes-apps/external_provisioner/local_volume_provisioner
when: local_volume_provisioner_enabled
tags:
- apps
- local-volume-provisioner
- external-provisioner

View file

@ -1,6 +0,0 @@
---
local_volume_provisioner_bootstrap_image_repo: quay.io/external_storage/local-volume-provisioner-bootstrap
local_volume_provisioner_bootstrap_image_tag: v1.0.1
local_volume_provisioner_image_repo: quay.io/external_storage/local-volume-provisioner
local_volume_provisioner_image_tag: v1.0.1

View file

@ -1,14 +0,0 @@
---
# The config map is used to configure local volume discovery for Local SSDs on GCE and GKE.
# It is a map from storage class to its mount configuration.
kind: ConfigMap
apiVersion: v1
metadata:
name: local-volume-config
namespace: {{ system_namespace }}
data:
"{{ local_volume_storage_class }}": |
{
"hostDir": "{{ local_volume_base_dir }}",
"mountDir": "{{ local_volume_mount_dir }}"
}

View file

@ -1,45 +0,0 @@
---
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: local-volume-provisioner
namespace: "{{ system_namespace }}"
spec:
template:
metadata:
labels:
app: local-volume-provisioner
spec:
containers:
- name: provisioner
image: {{ local_volume_provisioner_image_repo }}:{{ local_volume_provisioner_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
securityContext:
privileged: true
volumeMounts:
- name: discovery-vol
mountPath: "{{ local_volume_mount_dir }}"
- name: local-volume-config
mountPath: /etc/provisioner/config/
env:
- name: MY_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: MY_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: VOLUME_CONFIG_NAME
value: "local-volume-config"
volumes:
- name: discovery-vol
hostPath:
path: "{{ local_volume_base_dir }}"
- configMap:
defaultMode: 420
name: local-volume-config
name: local-volume-config
serviceAccount: local-storage-admin

View file

@ -1,5 +0,0 @@
---
kind: ServiceAccount
apiVersion: v1
metadata:
name: local-storage-admin

View file

@ -27,13 +27,6 @@ dependencies:
- apps
- registry
- role: kubernetes-apps/local_volume_provisioner
when: local_volume_provisioner_enabled
tags:
- apps
- local_volume_provisioner
- storage
- role: kubernetes-apps/cephfs_provisioner
when: cephfs_provisioner_enabled
tags:

View file

@ -27,7 +27,7 @@
-v {{kube_config_dir}}:{{kube_config_dir}}:ro \
-v /etc/os-release:/etc/os-release:ro \
{% if local_volume_provisioner_enabled == true %}
-v {{ local_volume_base_dir }}:{{ local_volume_base_dir }}:shared \
-v {{ local_volume_provisioner_base_dir }}:{{ local_volume_provisioner_base_dir }}:shared \
{% endif %}
{{ hyperkube_image_repo }}:{{ hyperkube_image_tag}} \
./hyperkube kubelet \

View file

@ -37,7 +37,7 @@ ExecStart=/usr/bin/rkt run \
--volume var-lib-kubelet,kind=host,source=/var/lib/kubelet,readOnly=false,recursive=true \
--volume var-log,kind=host,source=/var/log \
{% if local_volume_provisioner_enabled == true %}
--volume local-volume-base-dir,kind=host,source={{ local_volume_base_dir }},readOnly=false,recursive=true \
--volume local-volume-provisioner-base-dir,kind=host,source={{ local_volume_provisioner_base_dir }},readOnly=false,recursive=true \
{% endif %}
{% if kube_network_plugin in ["calico", "weave", "canal", "flannel", "contiv", "cilium"] %}
--volume etc-cni,kind=host,source=/etc/cni,readOnly=true \
@ -68,7 +68,7 @@ ExecStart=/usr/bin/rkt run \
--mount volume=var-log,target=/var/log \
--mount volume=hosts,target=/etc/hosts \
{% if local_volume_provisioner_enabled == true %}
--mount volume=local-volume-base-dir,target={{ local_volume_base_dir }} \
--mount volume=local-volume-provisioner-base-dir,target={{ local_volume_provisioner_base_dir }} \
{% endif %}
{% if kubelet_flexvolumes_plugins_dir is defined %}
--mount volume=flexvolumes,target={{ kubelet_flexvolumes_plugins_dir }} \

View file

@ -60,7 +60,15 @@
- "{{ kube_config_dir }}/ssl"
- "{{ kube_manifest_dir }}"
- "{{ kube_script_dir }}"
- "{{ local_volume_base_dir }}"
- name: Create local volume provisioner base dir
file:
path: "{{ local_volume_provisioner_base_dir }}"
state: directory
owner: kube
when:
- inventory_hostname in groups['k8s-cluster']
- local_volume_provisioner_enabled == true
- name: check cloud_provider value
fail:

View file

@ -166,15 +166,10 @@ helm_enabled: false
istio_enabled: false
registry_enabled: false
enable_network_policy: false
local_volume_provisioner_enabled: false
local_volume_provisioner_enabled: "{{ local_volumes_enabled | default('false') }}"
persistent_volumes_enabled: false
cephfs_provisioner_enabled: false
# Base path for local volume provisioner addon
local_volume_base_dir: /mnt/disks
local_volume_mount_dir: /local-disks
local_volume_storage_class: local-storage
## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461)
# openstack_blockstorage_version: "v1/v2/auto (default)"
## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables.
@ -199,7 +194,11 @@ rbac_enabled: "{{ 'RBAC' in authorization_modes or kubeadm_enabled }}"
## List of key=value pairs that describe feature gates for
## the k8s cluster.
kube_feature_gates: ['Initializers={{ istio_enabled|string }}', 'PersistentLocalVolumes={{ local_volume_provisioner_enabled|string }}']
kube_feature_gates:
- "Initializers={{ istio_enabled | string }}"
- "PersistentLocalVolumes={{ local_volume_provisioner_enabled | string }}"
- "VolumeScheduling={{ local_volume_provisioner_enabled | string }}"
- "MountPropagation={{ local_volume_provisioner_enabled | string }}"
# Vault data dirs.
vault_base_dir: /etc/vault