CephFS Provisioner Addon Fixup

This commit is contained in:
Wong Hoi Sing Edison 2018-02-13 09:55:59 +08:00
parent 4175431dcd
commit 206e24448b
14 changed files with 113 additions and 17 deletions

View file

@ -0,0 +1,78 @@
CephFS Volume Provisioner for Kubernetes 1.5+
=============================================
[![Docker Repository on Quay](https://quay.io/repository/external_storage/cephfs-provisioner/status "Docker Repository on Quay")](https://quay.io/repository/external_storage/cephfs-provisioner)
Using Ceph volume client
Development
-----------
Compile the provisioner
``` console
make
```
Make the container image and push to the registry
``` console
make push
```
Test instruction
----------------
- Start Kubernetes local cluster
See <a href="https://kubernetes.io/" class="uri" class="uri">https://kubernetes.io/</a>.
- Create a Ceph admin secret
``` bash
ceph auth get client.admin 2>&1 |grep "key = " |awk '{print $3'} |xargs echo -n > /tmp/secret
kubectl create ns cephfs
kubectl create secret generic ceph-secret-admin --from-file=/tmp/secret --namespace=cephfs
```
- Start CephFS provisioner
The following example uses `cephfs-provisioner-1` as the identity for the instance and assumes kubeconfig is at `/root/.kube`. The identity should remain the same if the provisioner restarts. If there are multiple provisioners, each should have a different identity.
``` bash
docker run -ti -v /root/.kube:/kube -v /var/run/kubernetes:/var/run/kubernetes --privileged --net=host cephfs-provisioner /usr/local/bin/cephfs-provisioner -master=http://127.0.0.1:8080 -kubeconfig=/kube/config -id=cephfs-provisioner-1
```
Alternatively, deploy it in kubernetes, see [deployment](deploy/README.md).
- Create a CephFS Storage Class
Replace Ceph monitor's IP in <a href="example/class.yaml" class="uri" class="uri">example/class.yaml</a> with your own and create storage class:
``` bash
kubectl create -f example/class.yaml
```
- Create a claim
``` bash
kubectl create -f example/claim.yaml
```
- Create a Pod using the claim
``` bash
kubectl create -f example/test-pod.yaml
```
Known limitations
-----------------
- Kernel CephFS doesn't work with SELinux, setting SELinux label in Pod's securityContext will not work.
- Kernel CephFS doesn't support quota or capacity, capacity requested by PVC is not enforced or validated.
- Currently each Ceph user created by the provisioner has `allow r` MDS cap to permit CephFS mount.
Acknowledgement
---------------
Inspired by CephFS Manila provisioner and conversation with John Spray

View file

@ -3,22 +3,23 @@
- name: CephFS Provisioner | Create addon dir - name: CephFS Provisioner | Create addon dir
file: file:
path: "{{ kube_config_dir }}/addons/cephfs_provisioner" path: "{{ kube_config_dir }}/addons/cephfs_provisioner"
state: directory
owner: root owner: root
group: root group: root
mode: 0755 mode: 0755
recurse: true
- name: CephFS Provisioner | Create manifests - name: CephFS Provisioner | Create manifests
template: template:
src: "{{ item.file }}.j2" src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.file }}" dest: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.file }}"
with_items: with_items:
- { name: cephfs-provisioner-ns, file: cephfs-provisioner-ns.yml, type: ns }
- { name: cephfs-provisioner-sa, file: cephfs-provisioner-sa.yml, type: sa } - { name: cephfs-provisioner-sa, file: cephfs-provisioner-sa.yml, type: sa }
- { name: cephfs-provisioner-role, file: cephfs-provisioner-role.yml, type: role } - { name: cephfs-provisioner-role, file: cephfs-provisioner-role.yml, type: role }
- { name: cephfs-provisioner-rolebinding, file: cephfs-provisioner-rolebinding.yml, type: rolebinding } - { name: cephfs-provisioner-rolebinding, file: cephfs-provisioner-rolebinding.yml, type: rolebinding }
- { name: cephfs-provisioner-clusterrole, file: cephfs-provisioner-clusterrole.yml, type: clusterrole } - { name: cephfs-provisioner-clusterrole, file: cephfs-provisioner-clusterrole.yml, type: clusterrole }
- { name: cephfs-provisioner-clusterrolebinding, file: cephfs-provisioner-clusterrolebinding.yml, type: clusterrolebinding } - { name: cephfs-provisioner-clusterrolebinding, file: cephfs-provisioner-clusterrolebinding.yml, type: clusterrolebinding }
- { name: cephfs-provisioner-deploy, file: cephfs-provisioner-deploy.yml, type: deploy } - { name: cephfs-provisioner-rs, file: cephfs-provisioner-rs.yml, type: rs }
- { name: cephfs-provisioner-secret, file: cephfs-provisioner-secret.yml, type: secret } - { name: cephfs-provisioner-secret, file: cephfs-provisioner-secret.yml, type: secret }
- { name: cephfs-provisioner-sc, file: cephfs-provisioner-sc.yml, type: sc } - { name: cephfs-provisioner-sc, file: cephfs-provisioner-sc.yml, type: sc }
register: cephfs_manifests register: cephfs_manifests
@ -27,7 +28,7 @@
- name: CephFS Provisioner | Apply manifests - name: CephFS Provisioner | Apply manifests
kube: kube:
name: "{{ item.item.name }}" name: "{{ item.item.name }}"
namespace: "{{ system_namespace }}" namespace: "{{ cephfs_provisioner_namespace }}"
kubectl: "{{ bin_dir }}/kubectl" kubectl: "{{ bin_dir }}/kubectl"
resource: "{{ item.item.type }}" resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.item.file }}" filename: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.item.file }}"

View file

@ -3,7 +3,7 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole kind: ClusterRole
metadata: metadata:
name: cephfs-provisioner name: cephfs-provisioner
namespace: {{ system_namespace }} namespace: {{ cephfs_provisioner_namespace }}
rules: rules:
- apiGroups: [""] - apiGroups: [""]
resources: ["persistentvolumes"] resources: ["persistentvolumes"]

View file

@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: {{ cephfs_provisioner_namespace }}
labels:
name: {{ cephfs_provisioner_namespace }}

View file

@ -7,6 +7,7 @@ metadata:
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: cephfs-provisioner name: cephfs-provisioner
namespace: {{ cephfs_provisioner_namespace }}
roleRef: roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
kind: Role kind: Role

View file

@ -1,21 +1,28 @@
--- ---
apiVersion: extensions/v1beta1 apiVersion: apps/v1
kind: Deployment kind: ReplicaSet
metadata: metadata:
name: cephfs-provisioner name: cephfs-provisioner-v{{ cephfs_provisioner_image_tag }}
namespace: {{ cephfs_provisioner_namespace }} namespace: {{ cephfs_provisioner_namespace }}
labels:
k8s-app: cephfs-provisioner
version: v{{ cephfs_provisioner_image_tag }}
spec: spec:
replicas: 1 replicas: 1
strategy: selector:
type: Recreate matchLabels:
k8s-app: cephfs-provisioner
version: v{{ cephfs_provisioner_image_tag }}
template: template:
metadata: metadata:
labels: labels:
app: cephfs-provisioner k8s-app: cephfs-provisioner
version: v{{ cephfs_provisioner_image_tag }}
spec: spec:
containers: containers:
- name: cephfs-provisioner - name: cephfs-provisioner
image: {{ cephfs_provisioner_image_repo }}:{{ cephfs_provisioner_image_tag }} image: {{ cephfs_provisioner_image_repo }}:{{ cephfs_provisioner_image_tag }}
imagePullPolicy: {{ k8s_image_pull_policy }}
env: env:
- name: PROVISIONER_NAME - name: PROVISIONER_NAME
value: ceph.com/cephfs value: ceph.com/cephfs
@ -23,4 +30,6 @@ spec:
- "/usr/local/bin/cephfs-provisioner" - "/usr/local/bin/cephfs-provisioner"
args: args:
- "-id=cephfs-provisioner-1" - "-id=cephfs-provisioner-1"
{% if rbac_enabled %}
serviceAccount: cephfs-provisioner serviceAccount: cephfs-provisioner
{% endif %}

View file

@ -6,3 +6,10 @@ dependencies:
- apps - apps
- local-volume-provisioner - local-volume-provisioner
- external-provisioner - external-provisioner
- role: kubernetes-apps/external_provisioner/cephfs_provisioner
when: cephfs_provisioner_enabled
tags:
- apps
- cephfs-provisioner
- external-provisioner

View file

@ -27,13 +27,6 @@ dependencies:
- apps - apps
- registry - registry
- role: kubernetes-apps/cephfs_provisioner
when: cephfs_provisioner_enabled
tags:
- apps
- cephfs_provisioner
- storage
# istio role should be last because it takes a long time to initialize and # istio role should be last because it takes a long time to initialize and
# will cause timeouts trying to start other addons. # will cause timeouts trying to start other addons.
- role: kubernetes-apps/istio - role: kubernetes-apps/istio