Merge pull request #3115 from oracle/oracle_oci_controller
Cloud provider support for OCI (Oracle Cloud Infrastructure)
This commit is contained in:
commit
4882531c29
17 changed files with 247 additions and 7 deletions
|
@ -5,7 +5,7 @@ Deploy a Production Ready Kubernetes Cluster
|
|||
|
||||
If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
||||
|
||||
- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere or Baremetal**
|
||||
- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere, Oracle Cloud Infrastructure (Experimental), or Baremetal**
|
||||
- **Highly available** cluster
|
||||
- **Composable** (Choice of the network plugin for instance)
|
||||
- Supports most popular **Linux distributions**
|
||||
|
|
|
@ -60,7 +60,7 @@ bin_dir: /usr/local/bin
|
|||
|
||||
## There are some changes specific to the cloud providers
|
||||
## for instance we need to encapsulate packets with some network plugins
|
||||
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', or 'external'
|
||||
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
|
||||
## When openstack is used make sure to source in the openstack credentials
|
||||
## like you would do when using nova-client before starting the playbook.
|
||||
#cloud_provider:
|
||||
|
@ -95,6 +95,22 @@ bin_dir: /usr/local/bin
|
|||
#openstack_lbaas_monitor_timeout: "30s"
|
||||
#openstack_lbaas_monitor_max_retries: "3"
|
||||
|
||||
## When Oracle Cloud Infrastructure is used, set these variables
|
||||
#oci_private_key:
|
||||
#oci_region_id:
|
||||
#oci_tenancy_id:
|
||||
#oci_user_id:
|
||||
#oci_user_fingerprint:
|
||||
#oci_compartment_id:
|
||||
#oci_vnc_id:
|
||||
#oci_subnet1_id:
|
||||
#oci_subnet2_id:
|
||||
## Overide these default behaviors if you wish
|
||||
#oci_security_list_management: All
|
||||
# If oci_use_instance_principals is true, you do not need to set the region, tenancy, user, key, passphrase, or fingerprint
|
||||
#oci_use_instance_principals: false
|
||||
#oci_cloud_controller_version: 0.5.0
|
||||
|
||||
## Uncomment to enable experimental kubeadm deployment mode
|
||||
#kubeadm_enabled: false
|
||||
## Set these proxy values in order to update package manager and docker daemon to use proxies
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
---
|
||||
|
||||
oci_security_list_management: All
|
||||
oci_use_instance_principals: false
|
||||
oci_cloud_controller_version: 0.5.0
|
|
@ -0,0 +1,56 @@
|
|||
---
|
||||
|
||||
- name: "OCI Cloud Controller | Credentials Check | oci_private_key"
|
||||
fail:
|
||||
msg: "oci_private_key is missing"
|
||||
when: (oci_use_instance_principals == false) and
|
||||
(oci_private_key is not defined or oci_private_key == "")
|
||||
|
||||
- name: "OCI Cloud Controller | Credentials Check | oci_region_id"
|
||||
fail:
|
||||
msg: "oci_region_id is missing"
|
||||
when: (oci_use_instance_principals == false) and
|
||||
(oci_region_id is not defined or oci_region_id == "")
|
||||
|
||||
- name: "OCI Cloud Controller | Credentials Check | oci_tenancy_id"
|
||||
fail:
|
||||
msg: "oci_tenancy_id is missing"
|
||||
when: (oci_use_instance_principals == false) and
|
||||
(oci_tenancy_id is not defined or oci_tenancy_id == "")
|
||||
|
||||
- name: "OCI Cloud Controller | Credentials Check | oci_user_id"
|
||||
fail:
|
||||
msg: "oci_user_id is missing"
|
||||
when: (oci_use_instance_principals == false) and
|
||||
(oci_user_id is not defined or oci_user_id == "")
|
||||
|
||||
- name: "OCI Cloud Controller | Credentials Check | oci_user_fingerprint"
|
||||
fail:
|
||||
msg: "oci_user_fingerprint is missing"
|
||||
when: (oci_use_instance_principals == false) and
|
||||
(oci_user_fingerprint is not defined or oci_user_fingerprint == "")
|
||||
|
||||
- name: "OCI Cloud Controller | Credentials Check | oci_compartment_id"
|
||||
fail:
|
||||
msg: "oci_compartment_id is missing. This is the compartment in which the cluster resides"
|
||||
when: oci_compartment_id is not defined or oci_compartment_id == ""
|
||||
|
||||
- name: "OCI Cloud Controller | Credentials Check | oci_vnc_id"
|
||||
fail:
|
||||
msg: "oci_vnc_id is missin. This is the Virtual Cloud Network in which the cluster resides"
|
||||
when: oci_vnc_id is not defined or oci_vnc_id == ""
|
||||
|
||||
- name: "OCI Cloud Controller | Credentials Check | oci_subnet1_id"
|
||||
fail:
|
||||
msg: "oci_subnet1_id is missing. This is the first subnet to which loadbalancers will be added"
|
||||
when: oci_subnet1_id is not defined or oci_subnet1_id == ""
|
||||
|
||||
- name: "OCI Cloud Controller | Credentials Check | oci_subnet2_id"
|
||||
fail:
|
||||
msg: "oci_subnet2_id is missing. Two subnets are required for load balancer high availability"
|
||||
when: oci_subnet2_id is not defined or oci_subnet2_id == ""
|
||||
|
||||
- name: "OCI Cloud Controller | Credentials Check | oci_security_list_management"
|
||||
fail:
|
||||
msg: "oci_security_list_management is missing, or not defined correctly. Valid options are (All, Frontend, None)."
|
||||
when: oci_security_list_management is not defined or oci_security_list_management not in ["All", "Frontend", "None"]
|
51
roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml
Normal file
51
roles/kubernetes-apps/cloud_controller/oci/tasks/main.yml
Normal file
|
@ -0,0 +1,51 @@
|
|||
---
|
||||
|
||||
- include: credentials-check.yml
|
||||
tags: oci
|
||||
|
||||
- name: "OCI Cloud Controller | Generate Configuration"
|
||||
template:
|
||||
src: controller-manager-config.yml.j2
|
||||
dest: /tmp/controller-manager-config.yml
|
||||
register: controller_manager_config
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
tags: oci
|
||||
|
||||
- name: "OCI Cloud Controller | Encode Configuration"
|
||||
set_fact:
|
||||
controller_manager_config_base64: "{{ lookup('file', '/tmp/controller-manager-config.yml') | b64encode }}"
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
tags: oci
|
||||
|
||||
- name: "OCI Cloud Controller | Apply Configuration To Secret"
|
||||
template:
|
||||
src: cloud-provider.yml.j2
|
||||
dest: /tmp/cloud-provider.yml
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
tags: oci
|
||||
|
||||
- name: "OCI Cloud Controller | Apply Configuration"
|
||||
kube:
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "/tmp/cloud-provider.yml"
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
tags: oci
|
||||
|
||||
- name: "OCI Cloud Controller | Download Controller Manifest"
|
||||
get_url:
|
||||
url: "https://raw.githubusercontent.com/oracle/oci-cloud-controller-manager/{{oci_cloud_controller_version}}/manifests/oci-cloud-controller-manager.yaml"
|
||||
dest: "/tmp/oci-cloud-controller-manager.yml"
|
||||
force: yes
|
||||
register: result
|
||||
until: "'OK' in result.msg"
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
tags: oci
|
||||
|
||||
- name: "OCI Cloud Controller | Apply Controller Manifest"
|
||||
kube:
|
||||
kubectl: "{{ bin_dir }}/kubectl"
|
||||
filename: "/tmp/oci-cloud-controller-manager.yml"
|
||||
when: inventory_hostname == groups['kube-master'][0]
|
||||
tags: oci
|
|
@ -0,0 +1,8 @@
|
|||
apiVersion: v1
|
||||
data:
|
||||
cloud-provider.yaml: {{ controller_manager_config_base64 }}
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: oci-cloud-controller-manager
|
||||
namespace: kube-system
|
||||
type: Opaque
|
|
@ -0,0 +1,56 @@
|
|||
auth:
|
||||
|
||||
{% if oci_use_instance_principals %}
|
||||
# (https://docs.us-phoenix-1.oraclecloud.com/Content/Identity/Tasks/callingservicesfrominstances.htm).
|
||||
# Ensure you have setup the following OCI policies and your kubernetes nodes are running within them
|
||||
# allow dynamic-group [your dynamic group name] to read instance-family in compartment [your compartment name]
|
||||
# allow dynamic-group [your dynamic group name] to use virtual-network-family in compartment [your compartment name]
|
||||
# allow dynamic-group [your dynamic group name] to manage load-balancers in compartment [your compartment name]
|
||||
useInstancePrincipals: true
|
||||
{% else %}
|
||||
useInstancePrincipals: false
|
||||
|
||||
region: {{ oci_region_id }}
|
||||
tenancy: {{ oci_tenancy_id }}
|
||||
user: {{ oci_user_id }}
|
||||
key: |
|
||||
{{ oci_private_key }}
|
||||
|
||||
{% if oci_private_key_passphrase is defined %}
|
||||
passphrase: {{ oci_private_key_passphrase }}
|
||||
{% endif %}
|
||||
|
||||
|
||||
fingerprint: {{ oci_user_fingerprint }}
|
||||
{% endif %}
|
||||
|
||||
# compartment configures Compartment within which the cluster resides.
|
||||
compartment: {{ oci_compartment_id }}
|
||||
|
||||
# vcn configures the Virtual Cloud Network (VCN) within which the cluster resides.
|
||||
vcn: {{ oci_vnc_id }}
|
||||
|
||||
loadBalancer:
|
||||
# subnet1 configures one of two subnets to which load balancers will be added.
|
||||
# OCI load balancers require two subnets to ensure high availability.
|
||||
subnet1: {{ oci_subnet1_id }}
|
||||
|
||||
# subnet2 configures the second of two subnets to which load balancers will be
|
||||
# added. OCI load balancers require two subnets to ensure high availability.
|
||||
subnet2: {{ oci_subnet2_id }}
|
||||
|
||||
# SecurityListManagementMode configures how security lists are managed by the CCM.
|
||||
# "All" (default): Manage all required security list rules for load balancer services.
|
||||
# "Frontend": Manage only security list rules for ingress to the load
|
||||
# balancer. Requires that the user has setup a rule that
|
||||
# allows inbound traffic to the appropriate ports for kube
|
||||
# proxy health port, node port ranges, and health check port ranges.
|
||||
# E.g. 10.82.0.0/16 30000-32000.
|
||||
# "None": Disables all security list management. Requires that the
|
||||
# user has setup a rule that allows inbound traffic to the
|
||||
# appropriate ports for kube proxy health port, node port
|
||||
# ranges, and health check port ranges. E.g. 10.82.0.0/16 30000-32000.
|
||||
# Additionally requires the user to mange rules to allow
|
||||
# inbound traffic to load balancers.
|
||||
securityListManagementMode: {{ oci_security_list_management }}
|
||||
|
2
roles/kubernetes-apps/cluster_roles/defaults/main.yml
Normal file
2
roles/kubernetes-apps/cluster_roles/defaults/main.yml
Normal file
|
@ -0,0 +1,2 @@
|
|||
---
|
||||
oci_cloud_controller_version: 0.5.0
|
|
@ -168,3 +168,9 @@
|
|||
- kube_version | version_compare('v1.9.3', '<=')
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
tags: vsphere
|
||||
|
||||
- include_tasks: oci.yml
|
||||
tags: oci
|
||||
when:
|
||||
- cloud_provider is defined
|
||||
- cloud_provider == 'oci'
|
||||
|
|
23
roles/kubernetes-apps/cluster_roles/tasks/oci.yml
Normal file
23
roles/kubernetes-apps/cluster_roles/tasks/oci.yml
Normal file
|
@ -0,0 +1,23 @@
|
|||
---
|
||||
- name: Get OCI ClusterRole, and ClusterRoleBinding
|
||||
get_url:
|
||||
url: "https://raw.githubusercontent.com/oracle/oci-cloud-controller-manager/{{oci_cloud_controller_version}}/manifests/oci-cloud-controller-manager-rbac.yaml"
|
||||
dest: "/tmp/oci-cloud-controller-manager-rbac.yaml"
|
||||
force: yes
|
||||
register: result
|
||||
until: "'OK' in result.msg"
|
||||
retries: 4
|
||||
delay: "{{ retry_stagger | random + 3 }}"
|
||||
when:
|
||||
- cloud_provider is defined
|
||||
- cloud_provider == 'oci'
|
||||
- inventory_hostname == groups['kube-master'][0]
|
||||
|
||||
- name: Apply OCI ClusterRole, and ClusterRoleBinding
|
||||
kube:
|
||||
kubectl: "{{bin_dir}}/kubectl"
|
||||
filename: "/tmp/oci-cloud-controller-manager-rbac.yaml"
|
||||
when:
|
||||
- cloud_provider is defined
|
||||
- cloud_provider == 'oci'
|
||||
- inventory_hostname == groups['kube-master'][0]
|
|
@ -27,3 +27,8 @@ dependencies:
|
|||
tags:
|
||||
- apps
|
||||
- persistent_volumes
|
||||
|
||||
- role: kubernetes-apps/cloud_controller/oci
|
||||
when: cloud_provider is defined and cloud_provider == "oci"
|
||||
tags:
|
||||
- oci
|
||||
|
|
|
@ -19,7 +19,7 @@ networking:
|
|||
serviceSubnet: {{ kube_service_addresses }}
|
||||
podSubnet: {{ kube_pods_subnet }}
|
||||
kubernetesVersion: {{ kube_version }}
|
||||
{% if cloud_provider is defined and cloud_provider != "gce" %}
|
||||
{% if cloud_provider is defined and cloud_provider not in ["gce", "oci"] %}
|
||||
cloudProvider: {{ cloud_provider }}
|
||||
{% endif %}
|
||||
{% if kube_proxy_mode == 'ipvs' and kube_version | version_compare('v1.10', '<') %}
|
||||
|
|
|
@ -48,6 +48,8 @@ spec:
|
|||
- --cloud-config={{ kube_config_dir }}/cloud_config
|
||||
{% elif cloud_provider is defined and cloud_provider in ["aws", "external"] %}
|
||||
- --cloud-provider={{cloud_provider}}
|
||||
{% elif cloud_provider is defined and cloud_provider == "oci" %}
|
||||
- --cloud_provider=external
|
||||
{% endif %}
|
||||
{% if kube_network_plugin is defined and kube_network_plugin == 'cloud' %}
|
||||
- --configure-cloud-routes=true
|
||||
|
|
|
@ -93,8 +93,10 @@ KUBELET_NETWORK_PLUGIN="--hairpin-mode=promiscuous-bridge --network-plugin=kuben
|
|||
KUBE_ALLOW_PRIV="--allow-privileged=true"
|
||||
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %}
|
||||
KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config"
|
||||
{% elif cloud_provider is defined and cloud_provider == "aws" %}
|
||||
{% elif cloud_provider is defined and cloud_provider in ["aws", "external"] %}
|
||||
KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }}"
|
||||
{% elif cloud_provider is defined and cloud_provider == "oci" %}
|
||||
KUBELET_CLOUDPROVIDER="--cloud-provider=external"
|
||||
{% else %}
|
||||
KUBELET_CLOUDPROVIDER=""
|
||||
{% endif %}
|
||||
|
|
|
@ -117,8 +117,10 @@ KUBELET_VOLUME_PLUGIN="--volume-plugin-dir={{ kubelet_flexvolumes_plugins_dir }}
|
|||
KUBE_ALLOW_PRIV="--allow-privileged=true"
|
||||
{% if cloud_provider is defined and cloud_provider in ["openstack", "azure", "vsphere"] %}
|
||||
KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config"
|
||||
{% elif cloud_provider is defined and cloud_provider == "aws" %}
|
||||
{% elif cloud_provider is defined and cloud_provider in ["aws", "external"] %}
|
||||
KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }}"
|
||||
{% elif cloud_provider is defined and cloud_provider == "oci" %}
|
||||
KUBELET_CLOUDPROVIDER="--cloud-provider=external"
|
||||
{% else %}
|
||||
KUBELET_CLOUDPROVIDER=""
|
||||
{% endif %}
|
||||
|
|
|
@ -73,10 +73,10 @@
|
|||
|
||||
- name: check cloud_provider value
|
||||
fail:
|
||||
msg: "If set the 'cloud_provider' var must be set either to 'generic', 'gce', 'aws', 'azure', 'openstack', 'vsphere', or external"
|
||||
msg: "If set the 'cloud_provider' var must be set either to 'generic', 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or external"
|
||||
when:
|
||||
- cloud_provider is defined
|
||||
- cloud_provider not in ['generic', 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'external']
|
||||
- cloud_provider not in ['generic', 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', 'external']
|
||||
tags:
|
||||
- cloud-provider
|
||||
- facts
|
||||
|
|
|
@ -92,6 +92,12 @@
|
|||
when: dashboard_enabled
|
||||
ignore_errors: "{{ ignore_assert_errors }}"
|
||||
|
||||
- name: Stop if RBAC is not enabled when OCI cloud controller is enabled
|
||||
assert:
|
||||
that: rbac_enabled
|
||||
when: cloud_provider is defined and cloud_provider == "oci"
|
||||
ignore_errors: "{{ ignore_assert_errors }}"
|
||||
|
||||
- name: Stop if RBAC and anonymous-auth are not enabled when insecure port is disabled
|
||||
assert:
|
||||
that: rbac_enabled and kube_api_anonymous_auth
|
||||
|
|
Loading…
Reference in a new issue