cephfs-provisioner: Upgrade to 06fddbe2

-   cephfs-provisioner 06fddbe2 (https://github.com/kubernetes-incubator/external-storage/tree/06fddbe2/ceph/cephfs)

Noteable changes from upstream:

-   Added storage class parameters to specify a root path within the backing cephfs and, optionally, use deterministic directory and user names (https://github.com/kubernetes-incubator/external-storage/pull/696)
-   Support capacity (https://github.com/kubernetes-incubator/external-storage/pull/770)
-   Enable metrics server (https://github.com/kubernetes-incubator/external-storage/pull/797)

Other noteable changes:

-   Clean up legacy manifests file naming
-   Remove legacy manifests, namespace and storageclass before upgrade
-   `cephfs_provisioner_monitors` simplified as string
-   Default to new deterministic naming
-   Add `reclaimPolicy` support in StorageClass

With legacy non-deterministic naming style (where $UUID are generated ramdonly):

-   cephfs_provisioner_claim_root: /volumes/kubernetes
-   cephfs_provisioner_deterministic_names: false
-   Generated CephFS volume: /volumes/kubernetes/kubernetes-dynamic-pvc-$UUID
-   Generated CephFS user: kubernetes-dynamic-user-$UUID

With new default deterministic naming style (where $NAMESPACE and $PVC are predictable):

-   cephfs_provisioner_claim_root: /volumes
-   cephfs_provisioner_deterministic_names: true
-   Generated CephFS volume: /volumes/$NAMESPACE/$PVC
-   Generated CephFS user: k8s.$NAMESPACE.$PVC
This commit is contained in:
Wong Hoi Sing Edison 2018-07-01 13:14:07 +08:00
parent 62df6ac724
commit 728024e8ff
15 changed files with 65 additions and 29 deletions

View file

@ -102,8 +102,9 @@ Supported Components
- [flanneld](https://github.com/coreos/flannel) v0.10.0
- [weave](https://github.com/weaveworks/weave) v2.3.0
- Application
- [cephfs-provisioner](https://github.com/kubernetes-incubator/external-storage) 06fddbe2
- [cert-manager](https://github.com/jetstack/cert-manager) v0.3.0
- [ingress-nginx](https://github.com/kubernetes/ingress-nginx) v0.15.0
- [cert-manager](https://github.com/jetstack/cert-manager/releases) v0.3.0
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).

View file

@ -8,8 +8,8 @@
version: "{{ item.version }}"
state: "{{ item.state }}"
with_items:
- { state: "present", name: "docker", version: "3.2.1" }
- { state: "present", name: "docker-compose", version: "1.21.0" }
- { state: "present", name: "docker", version: "3.4.1" }
- { state: "present", name: "docker-compose", version: "1.21.2" }
- name: CephFS Provisioner | Check Go version
shell: |
@ -35,19 +35,19 @@
- name: CephFS Provisioner | Clone repo
git:
repo: https://github.com/kubernetes-incubator/external-storage.git
dest: "~/go/src/github.com/kubernetes-incubator"
version: a71a49d4
clone: no
dest: "~/go/src/github.com/kubernetes-incubator/external-storage"
version: 06fddbe2
clone: yes
update: yes
- name: CephFS Provisioner | Build image
shell: |
cd ~/go/src/github.com/kubernetes-incubator/external-storage
REGISTRY=quay.io/kubespray/ VERSION=a71a49d4 make ceph/cephfs
REGISTRY=quay.io/kubespray/ VERSION=06fddbe2 make ceph/cephfs
- name: CephFS Provisioner | Push image
docker_image:
name: quay.io/kubespray/cephfs-provisioner:a71a49d4
name: quay.io/kubespray/cephfs-provisioner:06fddbe2
push: yes
retries: 10

View file

@ -197,13 +197,13 @@ local_volume_provisioner_enabled: false
cephfs_provisioner_enabled: false
# cephfs_provisioner_namespace: "cephfs-provisioner"
# cephfs_provisioner_cluster: ceph
# cephfs_provisioner_monitors:
# - 172.24.0.1:6789
# - 172.24.0.2:6789
# - 172.24.0.3:6789
# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789"
# cephfs_provisioner_admin_id: admin
# cephfs_provisioner_secret: secret
# cephfs_provisioner_storage_class: cephfs
# cephfs_provisioner_reclaim_policy: Delete
# cephfs_provisioner_claim_root: /volumes
# cephfs_provisioner_deterministic_names: true
# Nginx ingress controller deployment
ingress_nginx_enabled: false

View file

@ -155,7 +155,7 @@ registry_proxy_image_tag: "0.4"
local_volume_provisioner_image_repo: "quay.io/external_storage/local-volume-provisioner"
local_volume_provisioner_image_tag: "v2.0.0"
cephfs_provisioner_image_repo: "quay.io/kubespray/cephfs-provisioner"
cephfs_provisioner_image_tag: "a71a49d4"
cephfs_provisioner_image_tag: "06fddbe2"
ingress_nginx_controller_image_repo: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller"
ingress_nginx_controller_image_tag: "0.15.0"
ingress_nginx_default_backend_image_repo: "gcr.io/google_containers/defaultbackend"

View file

@ -1,7 +1,10 @@
---
cephfs_provisioner_namespace: "cephfs-provisioner"
cephfs_provisioner_cluster: ceph
cephfs_provisioner_monitors: []
cephfs_provisioner_monitors: ~
cephfs_provisioner_admin_id: admin
cephfs_provisioner_secret: secret
cephfs_provisioner_storage_class: cephfs
cephfs_provisioner_reclaim_policy: Delete
cephfs_provisioner_claim_root: /volumes
cephfs_provisioner_deterministic_names: true

View file

@ -1,5 +1,32 @@
---
- name: CephFS Provisioner | Remove legacy addon dir and manifests
file:
path: "{{ kube_config_dir }}/addons/cephfs_provisioner"
state: absent
when:
- inventory_hostname == groups['kube-master'][0]
tags:
- upgrade
- name: CephFS Provisioner | Remove legacy namespace
shell: |
{{ bin_dir }}/kubectl delete namespace {{ cephfs_provisioner_namespace }}
ignore_errors: yes
when:
- inventory_hostname == groups['kube-master'][0]
tags:
- upgrade
- name: CephFS Provisioner | Remove legacy storageclass
shell: |
{{ bin_dir }}/kubectl delete storageclass {{ cephfs_provisioner_storage_class }}
ignore_errors: yes
when:
- inventory_hostname == groups['kube-master'][0]
tags:
- upgrade
- name: CephFS Provisioner | Create addon dir
file:
path: "{{ kube_config_dir }}/addons/cephfs_provisioner"
@ -7,22 +34,24 @@
owner: root
group: root
mode: 0755
when:
- inventory_hostname == groups['kube-master'][0]
- name: CephFS Provisioner | Create manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.file }}"
with_items:
- { name: cephfs-provisioner-ns, file: cephfs-provisioner-ns.yml, type: ns }
- { name: cephfs-provisioner-sa, file: cephfs-provisioner-sa.yml, type: sa }
- { name: cephfs-provisioner-role, file: cephfs-provisioner-role.yml, type: role }
- { name: cephfs-provisioner-rolebinding, file: cephfs-provisioner-rolebinding.yml, type: rolebinding }
- { name: cephfs-provisioner-clusterrole, file: cephfs-provisioner-clusterrole.yml, type: clusterrole }
- { name: cephfs-provisioner-clusterrolebinding, file: cephfs-provisioner-clusterrolebinding.yml, type: clusterrolebinding }
- { name: cephfs-provisioner-rs, file: cephfs-provisioner-rs.yml, type: rs }
- { name: cephfs-provisioner-secret, file: cephfs-provisioner-secret.yml, type: secret }
- { name: cephfs-provisioner-sc, file: cephfs-provisioner-sc.yml, type: sc }
register: cephfs_manifests
- { name: 00-namespace, file: 00-namespace.yml, type: ns }
- { name: secret-cephfs-provisioner, file: secret-cephfs-provisioner.yml, type: secret }
- { name: sa-cephfs-provisioner, file: sa-cephfs-provisioner.yml, type: sa }
- { name: clusterrole-cephfs-provisioner, file: clusterrole-cephfs-provisioner.yml, type: clusterrole }
- { name: clusterrolebinding-cephfs-provisioner, file: clusterrolebinding-cephfs-provisioner.yml, type: clusterrolebinding }
- { name: role-cephfs-provisioner, file: role-cephfs-provisioner.yml, type: role }
- { name: rolebinding-cephfs-provisioner, file: rolebinding-cephfs-provisioner.yml, type: rolebinding }
- { name: deploy-cephfs-provisioner, file: deploy-cephfs-provisioner.yml, type: rs }
- { name: sc-cephfs-provisioner, file: sc-cephfs-provisioner.yml, type: sc }
register: cephfs_provisioner_manifests
when: inventory_hostname == groups['kube-master'][0]
- name: CephFS Provisioner | Apply manifests
@ -33,5 +62,5 @@
resource: "{{ item.item.type }}"
filename: "{{ kube_config_dir }}/addons/cephfs_provisioner/{{ item.item.file }}"
state: "latest"
with_items: "{{ cephfs_manifests.results }}"
with_items: "{{ cephfs_provisioner_manifests.results }}"
when: inventory_hostname == groups['kube-master'][0]

View file

@ -1,6 +1,6 @@
---
apiVersion: apps/v1
kind: ReplicaSet
kind: Deployment
metadata:
name: cephfs-provisioner-v{{ cephfs_provisioner_image_tag }}
namespace: {{ cephfs_provisioner_namespace }}

View file

@ -4,9 +4,12 @@ kind: StorageClass
metadata:
name: {{ cephfs_provisioner_storage_class }}
provisioner: ceph.com/cephfs
reclaimPolicy: {{ cephfs_provisioner_reclaim_policy }}
parameters:
cluster: {{ cephfs_provisioner_cluster }}
monitors: {{ cephfs_provisioner_monitors | join(',') }}
monitors: {{ cephfs_provisioner_monitors }}
adminId: {{ cephfs_provisioner_admin_id }}
adminSecretName: cephfs-provisioner-{{ cephfs_provisioner_admin_id }}-secret
adminSecretName: cephfs-provisioner
adminSecretNamespace: {{ cephfs_provisioner_namespace }}
claimRoot: {{ cephfs_provisioner_claim_root }}
deterministicNames: "{{ cephfs_provisioner_deterministic_names | bool | lower }}"

View file

@ -2,7 +2,7 @@
kind: Secret
apiVersion: v1
metadata:
name: cephfs-provisioner-{{ cephfs_provisioner_admin_id }}-secret
name: cephfs-provisioner
namespace: {{ cephfs_provisioner_namespace }}
type: Opaque
data: