From a2fcf0be5d000e4bf97f3924ed8837da4acdc181 Mon Sep 17 00:00:00 2001 From: Spencer Smith Date: Wed, 24 Aug 2016 09:48:32 -0400 Subject: [PATCH] updated to no longer handle gce as cloud-provider. provided aws setup doc --- docs/aws.md | 10 ++++++++++ .../templates/manifests/kube-apiserver.manifest.j2 | 2 +- .../manifests/kube-controller-manager.manifest.j2 | 2 +- roles/kubernetes/node/templates/kubelet.j2 | 2 +- 4 files changed, 13 insertions(+), 3 deletions(-) create mode 100644 docs/aws.md diff --git a/docs/aws.md b/docs/aws.md new file mode 100644 index 000000000..e2bab6f15 --- /dev/null +++ b/docs/aws.md @@ -0,0 +1,10 @@ +OpenStack +=============== + +To deploy kubespray on [AWS](https://www.openstack.org/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`. + +Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes/kubernetes/tree/master/cluster/aws/templates/iam). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role. + +The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`. + +You can now create your cluster! diff --git a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 index 1df850dbf..2f69666d8 100644 --- a/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-apiserver.manifest.j2 @@ -35,7 +35,7 @@ spec: {% if cloud_provider is defined and cloud_provider == "openstack" %} - --cloud-provider={{ cloud_provider }} - --cloud-config={{ kube_config_dir }}/cloud_config -{% elif cloud_provider is defined %} +{% elif cloud_provider is defined and cloud_provider == "aws" %} - --cloud-provider={{ cloud_provider }} {% endif %} - 2>&1 >> {{ kube_log_dir }}/kube-apiserver.log diff --git a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 index 0195e5eab..dbce2bfbf 100644 --- a/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 +++ b/roles/kubernetes/master/templates/manifests/kube-controller-manager.manifest.j2 @@ -20,7 +20,7 @@ spec: {% if cloud_provider is defined and cloud_provider == "openstack"%} - --cloud-provider={{cloud_provider}} - --cloud-config={{ kube_config_dir }}/cloud_config -{% elif cloud_provider is defined %} +{% elif cloud_provider is defined and cloud_provider == "aws" %} - --cloud-provider={{cloud_provider}} {% endif %} livenessProbe: diff --git a/roles/kubernetes/node/templates/kubelet.j2 b/roles/kubernetes/node/templates/kubelet.j2 index 7eb76de2d..c7d20c420 100644 --- a/roles/kubernetes/node/templates/kubelet.j2 +++ b/roles/kubernetes/node/templates/kubelet.j2 @@ -33,7 +33,7 @@ DOCKER_SOCKET="--docker-endpoint=unix:/var/run/weave/weave.sock" KUBE_ALLOW_PRIV="--allow-privileged=true" {% if cloud_provider is defined and cloud_provider == "openstack" %} KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }} --cloud-config={{ kube_config_dir }}/cloud_config" -{% elif cloud_provider is defined %} +{% elif cloud_provider is defined and cloud_provider == "aws" %} KUBELET_CLOUDPROVIDER="--cloud-provider={{ cloud_provider }}" {% else %} KUBELET_CLOUDPROVIDER=""