Fixes for AWS Terraform Deployment and Updated Readme
This commit is contained in:
parent
9b3aa3451e
commit
c48ffa24be
6 changed files with 41 additions and 9 deletions
|
@ -14,20 +14,42 @@ This project will create:
|
||||||
|
|
||||||
**How to Use:**
|
**How to Use:**
|
||||||
|
|
||||||
- Export the variables for your AWS credentials or edit credentials.tfvars:
|
- Export the variables for your AWS credentials or edit `credentials.tfvars`:
|
||||||
|
|
||||||
```
|
```
|
||||||
export aws_access_key="xxx"
|
export AWS_ACCESS_KEY_ID="www"
|
||||||
export aws_secret_key="yyy"
|
export AWS_SECRET_ACCESS_KEY ="xxx"
|
||||||
export aws_ssh_key_name="zzz"
|
export AWS_SSH_KEY_NAME="yyy"
|
||||||
|
export AWS_DEFAULT_REGION="zzz"
|
||||||
```
|
```
|
||||||
|
- Rename `contrib/terraform/aws/terraform.tfvars.example` to `terraform.tfvars`
|
||||||
|
|
||||||
- Update contrib/terraform/aws/terraform.tfvars with your data
|
- Update `contrib/terraform/aws/terraform.tfvars` with your data
|
||||||
|
- Allocate new AWS Elastic IPs: Depending on # of Availability Zones used (2 for each AZ)
|
||||||
|
- Create an AWS EC2 SSH Key
|
||||||
|
|
||||||
- Run with `terraform apply -var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
|
|
||||||
|
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
|
||||||
|
|
||||||
|
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
|
||||||
|
|
||||||
- Once the infrastructure is created, you can run the kargo playbooks and supply inventory/hosts with the `-i` flag.
|
- Once the infrastructure is created, you can run the kargo playbooks and supply inventory/hosts with the `-i` flag.
|
||||||
|
|
||||||
|
**Troubleshooting**
|
||||||
|
|
||||||
|
***Remaining AWS IAM Instance Profile***:
|
||||||
|
|
||||||
|
If the cluster was destroyed without using Terraform it is possible that
|
||||||
|
the AWS IAM Instance Profiles still remain. To delete them you can use
|
||||||
|
the `AWS CLI` with the following command:
|
||||||
|
```
|
||||||
|
aws iam delete-instance-profile --region <region_name> --instance-profile-name <profile_name>
|
||||||
|
```
|
||||||
|
|
||||||
|
***Ansible Inventory doesnt get created:***
|
||||||
|
|
||||||
|
It could happen that Terraform doesnt create an Ansible Inventory file automatically. If this is the case copy the output after `inventory=` and create a file named `hosts`in the directory `inventory` and paste the inventory into the file.
|
||||||
|
|
||||||
**Architecture**
|
**Architecture**
|
||||||
|
|
||||||
Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones.
|
Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones.
|
||||||
|
|
|
@ -173,6 +173,7 @@ data "template_file" "inventory" {
|
||||||
list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}"
|
list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}"
|
||||||
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
|
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
|
||||||
elb_api_port = "loadbalancer_apiserver.port=${var.aws_elb_api_port}"
|
elb_api_port = "loadbalancer_apiserver.port=${var.aws_elb_api_port}"
|
||||||
|
kube_insecure_apiserver_address = "kube_apiserver_insecure_bind_address: ${var.kube_insecure_apiserver_address}"
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,3 +18,7 @@ output "etcd" {
|
||||||
output "aws_elb_api_fqdn" {
|
output "aws_elb_api_fqdn" {
|
||||||
value = "${module.aws-elb.aws_elb_api_fqdn}:${var.aws_elb_api_port}"
|
value = "${module.aws-elb.aws_elb_api_fqdn}:${var.aws_elb_api_port}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
output "inventory" {
|
||||||
|
value = "${data.template_file.inventory.rendered}"
|
||||||
|
}
|
||||||
|
|
|
@ -25,3 +25,4 @@ kube-master
|
||||||
[k8s-cluster:vars]
|
[k8s-cluster:vars]
|
||||||
${elb_api_fqdn}
|
${elb_api_fqdn}
|
||||||
${elb_api_port}
|
${elb_api_port}
|
||||||
|
${kube_insecure_apiserver_address}
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
#Global Vars
|
#Global Vars
|
||||||
aws_cluster_name = "devtest"
|
aws_cluster_name = "devtest"
|
||||||
aws_region = "eu-central-1"
|
|
||||||
|
|
||||||
#VPC Vars
|
#VPC Vars
|
||||||
aws_vpc_cidr_block = "10.250.192.0/18"
|
aws_vpc_cidr_block = "10.250.192.0/18"
|
||||||
|
@ -28,5 +27,6 @@ aws_cluster_ami = "ami-903df7ff"
|
||||||
|
|
||||||
#Settings AWS ELB
|
#Settings AWS ELB
|
||||||
|
|
||||||
aws_elb_api_port = 443
|
aws_elb_api_port = 6443
|
||||||
k8s_secure_api_port = 443
|
k8s_secure_api_port = 6443
|
||||||
|
kube_insecure_apiserver_address = 0.0.0.0
|
||||||
|
|
|
@ -95,3 +95,7 @@ variable "aws_elb_api_port" {
|
||||||
variable "k8s_secure_api_port" {
|
variable "k8s_secure_api_port" {
|
||||||
description = "Secure Port of K8S API Server"
|
description = "Secure Port of K8S API Server"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "kube_insecure_apiserver_address" {
|
||||||
|
description= "Bind Address for insecure Port of K8s API Server"
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in a new issue