OCI Cloud Provider Update (#4186)

* OCI subnet AD 2 is not required for CCM >= 0.7.0

Reorganize OCI provider to generate configuration, rather than pull

Add pull secret option to OCI cloud provider

* Updated oci example to document new parameters
This commit is contained in:
Jeff Bornemann 2019-02-11 15:08:53 -05:00 committed by Kubernetes Prow Robot
parent befa8a6cbd
commit c41c1e771f
10 changed files with 241 additions and 61 deletions

View file

@ -23,3 +23,6 @@
#rate_limit_qps_write:
#rate_limit_bucket_read:
#rate_limit_bucket_write:
# Other optional variables
#oci_cloud_controller_pull_source: (default iad.ocir.io/oracle/cloud-provider-oci)
#oci_cloud_controller_pull_secret: (name of pull secret to use if you define your own mirror above)

View file

@ -2,4 +2,5 @@
oci_security_list_management: All
oci_use_instance_principals: false
oci_cloud_controller_version: 0.6.0
oci_cloud_controller_version: 0.7.0
oci_cloud_controller_pull_source: iad.ocir.io/oracle/cloud-provider-oci

View file

@ -48,9 +48,11 @@
- name: "OCI Cloud Controller | Credentials Check | oci_subnet2_id"
fail:
msg: "oci_subnet2_id is missing. Two subnets are required for load balancer high availability"
when: oci_subnet2_id is not defined or oci_subnet2_id == ""
when:
- oci_cloud_controller_version | version_compare('0.7.0', '<')
- oci_subnet2_id is not defined or oci_subnet2_id == ""
- name: "OCI Cloud Controller | Credentials Check | oci_security_list_management"
fail:
msg: "oci_security_list_management is missing, or not defined correctly. Valid options are (All, Frontend, None)."
when: oci_security_list_management is not defined or oci_security_list_management not in ["All", "Frontend", "None"]
when: oci_security_list_management is not defined or oci_security_list_management not in ["All", "Frontend", "None"]

View file

@ -3,51 +3,35 @@
- include: credentials-check.yml
tags: oci
- name: "OCI Cloud Controller | Generate Configuration"
- name: "OCI Cloud Controller | Generate Cloud Provider Configuration"
template:
src: controller-manager-config.yml.j2
dest: /tmp/controller-manager-config.yml
register: controller_manager_config
dest: "{{ kube_config_dir }}/controller-manager-config.yml"
when: inventory_hostname == groups['kube-master'][0]
tags: oci
- name: "OCI Cloud Controller | Slurp Configuration"
slurp:
src: "{{ kube_config_dir }}/controller-manager-config.yml"
register: controller_manager_config
- name: "OCI Cloud Controller | Encode Configuration"
set_fact:
controller_manager_config_base64: "{{ lookup('file', '/tmp/controller-manager-config.yml') | b64encode }}"
controller_manager_config_base64: "{{ controller_manager_config.content }}"
when: inventory_hostname == groups['kube-master'][0]
tags: oci
- name: "OCI Cloud Controller | Apply Configuration To Secret"
- name: "OCI Cloud Controller | Generate Manifests"
template:
src: cloud-provider.yml.j2
dest: /tmp/cloud-provider.yml
src: oci-cloud-provider.yml.j2
dest: "{{ kube_config_dir }}/oci-cloud-provider.yml"
when: inventory_hostname == groups['kube-master'][0]
tags: oci
- name: "OCI Cloud Controller | Apply Configuration"
- name: "OCI Cloud Controller | Apply Manifests"
kube:
kubectl: "{{ bin_dir }}/kubectl"
filename: "/tmp/cloud-provider.yml"
state: latest
when: inventory_hostname == groups['kube-master'][0]
tags: oci
- name: "OCI Cloud Controller | Download Controller Manifest"
get_url:
url: "https://raw.githubusercontent.com/oracle/oci-cloud-controller-manager/{{oci_cloud_controller_version}}/manifests/oci-cloud-controller-manager.yaml"
dest: "/tmp/oci-cloud-controller-manager.yml"
force: yes
register: result
until: "'OK' in result.msg"
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
when: inventory_hostname == groups['kube-master'][0]
tags: oci
- name: "OCI Cloud Controller | Apply Controller Manifest"
kube:
kubectl: "{{ bin_dir }}/kubectl"
filename: "/tmp/oci-cloud-controller-manager.yml"
filename: "{{ kube_config_dir }}/oci-cloud-provider.yml"
state: latest
when: inventory_hostname == groups['kube-master'][0]
tags: oci

View file

@ -1,8 +0,0 @@
apiVersion: v1
data:
cloud-provider.yaml: {{ controller_manager_config_base64 }}
kind: Secret
metadata:
name: oci-cloud-controller-manager
namespace: kube-system
type: Opaque

View file

@ -1,4 +1,4 @@
auth:
{% macro private_key() %}{{ oci_private_key }}{% endmacro %}
{% if oci_use_instance_principals %}
# (https://docs.us-phoenix-1.oraclecloud.com/Content/Identity/Tasks/callingservicesfrominstances.htm).
@ -6,6 +6,15 @@ auth:
# allow dynamic-group [your dynamic group name] to read instance-family in compartment [your compartment name]
# allow dynamic-group [your dynamic group name] to use virtual-network-family in compartment [your compartment name]
# allow dynamic-group [your dynamic group name] to manage load-balancers in compartment [your compartment name]
useInstancePrincipals: true
{% else %}
useInstancePrincipals: false
{% endif %}
auth:
{% if oci_use_instance_principals %}
# This key is put here too for backwards compatibility
useInstancePrincipals: true
{% else %}
useInstancePrincipals: false
@ -34,11 +43,11 @@ loadBalancer:
# subnet1 configures one of two subnets to which load balancers will be added.
# OCI load balancers require two subnets to ensure high availability.
subnet1: {{ oci_subnet1_id }}
{% if oci_subnet2_id is defined %}
# subnet2 configures the second of two subnets to which load balancers will be
# added. OCI load balancers require two subnets to ensure high availability.
subnet2: {{ oci_subnet2_id }}
{% endif %}
# SecurityListManagementMode configures how security lists are managed by the CCM.
# "All" (default): Manage all required security list rules for load balancer services.
# "Frontend": Manage only security list rules for ingress to the load

View file

@ -0,0 +1,69 @@
apiVersion: v1
data:
cloud-provider.yaml: {{ controller_manager_config_base64 }}
kind: Secret
metadata:
name: oci-cloud-controller-manager
namespace: kube-system
type: Opaque
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: oci-cloud-controller-manager
namespace: kube-system
labels:
k8s-app: oci-cloud-controller-manager
spec:
selector:
matchLabels:
component: oci-cloud-controller-manager
tier: control-plane
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
component: oci-cloud-controller-manager
tier: control-plane
spec:
{% if oci_cloud_controller_pull_secret is defined %}
imagePullSecrets:
- name: {{oci_cloud_controller_pull_secret}}
{% endif %}
serviceAccountName: cloud-controller-manager
hostNetwork: true
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
effect: NoSchedule
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
volumes:
- name: cfg
secret:
secretName: oci-cloud-controller-manager
- name: kubernetes
hostPath:
path: /etc/kubernetes
containers:
- name: oci-cloud-controller-manager
image: {{oci_cloud_controller_pull_source}}:{{oci_cloud_controller_version}}
command: ["/usr/local/bin/oci-cloud-controller-manager"]
args:
- --cloud-config=/etc/oci/cloud-provider.yaml
- --cloud-provider=oci
- --leader-elect-resource-lock=configmaps
- -v=2
volumeMounts:
- name: cfg
mountPath: /etc/oci
readOnly: true
- name: kubernetes
mountPath: /etc/kubernetes
readOnly: true

View file

@ -1,2 +1 @@
---
oci_cloud_controller_version: 0.5.0

View file

@ -0,0 +1,126 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: cloud-controller-manager
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: system:cloud-controller-manager
labels:
kubernetes.io/cluster-service: "true"
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- '*'
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- apiGroups:
- ""
resources:
- services
verbs:
- list
- watch
- patch
- apiGroups:
- ""
resources:
- services/status
verbs:
- update
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
# For leader election
- apiGroups:
- ""
resources:
- endpoints
verbs:
- create
- apiGroups:
- ""
resources:
- endpoints
resourceNames:
- "cloud-controller-manager"
verbs:
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
- "cloud-controller-manager"
verbs:
- get
- update
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
# For the PVL
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- list
- watch
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: oci-cloud-controller-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:cloud-controller-manager
subjects:
- kind: ServiceAccount
name: cloud-controller-manager
namespace: kube-system

View file

@ -1,23 +1,18 @@
---
- name: Get OCI ClusterRole, and ClusterRoleBinding
get_url:
url: "https://raw.githubusercontent.com/oracle/oci-cloud-controller-manager/{{oci_cloud_controller_version}}/manifests/oci-cloud-controller-manager-rbac.yaml"
dest: "/tmp/oci-cloud-controller-manager-rbac.yaml"
force: yes
register: result
until: "'OK' in result.msg"
retries: 4
delay: "{{ retry_stagger | random + 3 }}"
- name: Copy OCI RBAC Manifest
copy:
src: "oci-rbac.yml"
dest: "{{ kube_config_dir }}/oci-rbac.yml"
when:
- cloud_provider is defined
- cloud_provider == 'oci'
- inventory_hostname == groups['kube-master'][0]
- cloud_provider is defined
- cloud_provider == 'oci'
- inventory_hostname == groups['kube-master'][0]
- name: Apply OCI ClusterRole, and ClusterRoleBinding
- name: Apply OCI RBAC
kube:
kubectl: "{{bin_dir}}/kubectl"
filename: "/tmp/oci-cloud-controller-manager-rbac.yaml"
filename: "{{ kube_config_dir }}/oci-rbac.yml"
when:
- cloud_provider is defined
- cloud_provider == 'oci'
- inventory_hostname == groups['kube-master'][0]
- cloud_provider is defined
- cloud_provider == 'oci'
- inventory_hostname == groups['kube-master'][0]