diff --git a/contrib/terraform/aws/.gitignore b/contrib/terraform/aws/.gitignore index 28901e146..84fcb5821 100644 --- a/contrib/terraform/aws/.gitignore +++ b/contrib/terraform/aws/.gitignore @@ -1,2 +1,2 @@ *.tfstate* -inventory +.terraform diff --git a/contrib/terraform/aws/00-create-infrastructure.tf b/contrib/terraform/aws/00-create-infrastructure.tf deleted file mode 100755 index 09cfac37c..000000000 --- a/contrib/terraform/aws/00-create-infrastructure.tf +++ /dev/null @@ -1,261 +0,0 @@ -variable "deploymentName" { - type = "string" - description = "The desired name of your deployment." -} - -variable "numControllers"{ - type = "string" - description = "Desired # of controllers." -} - -variable "numEtcd" { - type = "string" - description = "Desired # of etcd nodes. Should be an odd number." -} - -variable "numNodes" { - type = "string" - description = "Desired # of nodes." -} - -variable "volSizeController" { - type = "string" - description = "Volume size for the controllers (GB)." -} - -variable "volSizeEtcd" { - type = "string" - description = "Volume size for etcd (GB)." -} - -variable "volSizeNodes" { - type = "string" - description = "Volume size for nodes (GB)." -} - -variable "subnet" { - type = "string" - description = "The subnet in which to put your cluster." -} - -variable "securityGroups" { - type = "string" - description = "The sec. groups in which to put your cluster." -} - -variable "ami"{ - type = "string" - description = "AMI to use for all VMs in cluster." -} - -variable "SSHKey" { - type = "string" - description = "SSH key to use for VMs." -} - -variable "master_instance_type" { - type = "string" - description = "Size of VM to use for masters." -} - -variable "etcd_instance_type" { - type = "string" - description = "Size of VM to use for etcd." -} - -variable "node_instance_type" { - type = "string" - description = "Size of VM to use for nodes." -} - -variable "terminate_protect" { - type = "string" - default = "false" -} - -variable "awsRegion" { - type = "string" -} - -provider "aws" { - region = "${var.awsRegion}" -} - -variable "iam_prefix" { - type = "string" - description = "Prefix name for IAM profiles" -} - -resource "aws_iam_instance_profile" "kubernetes_master_profile" { - name = "${var.iam_prefix}_kubernetes_master_profile" - roles = ["${aws_iam_role.kubernetes_master_role.name}"] -} - -resource "aws_iam_role" "kubernetes_master_role" { - name = "${var.iam_prefix}_kubernetes_master_role" - assume_role_policy = < inventory" - } - provisioner "local-exec" { - command = "echo \"${join("\n",formatlist("%s ansible_ssh_user=%s", aws_instance.master.*.private_ip, var.SSHUser))}\" >> inventory" - } - - ##Create ETCD Inventory - provisioner "local-exec" { - command = "echo \"\n[etcd]\" >> inventory" - } - provisioner "local-exec" { - command = "echo \"${join("\n",formatlist("%s ansible_ssh_user=%s", aws_instance.etcd.*.private_ip, var.SSHUser))}\" >> inventory" - } - - ##Create Nodes Inventory - provisioner "local-exec" { - command = "echo \"\n[kube-node]\" >> inventory" - } - provisioner "local-exec" { - command = "echo \"${join("\n",formatlist("%s ansible_ssh_user=%s", aws_instance.minion.*.private_ip, var.SSHUser))}\" >> inventory" - } - - provisioner "local-exec" { - command = "echo \"\n[k8s-cluster:children]\nkube-node\nkube-master\" >> inventory" - } -} diff --git a/contrib/terraform/aws/README.md b/contrib/terraform/aws/README.md index c7ede59ef..03bc4e23e 100644 --- a/contrib/terraform/aws/README.md +++ b/contrib/terraform/aws/README.md @@ -2,27 +2,34 @@ **Overview:** -- This will create nodes in a VPC inside of AWS +This project will create: +* VPC with Public and Private Subnets in # Availability Zones +* Bastion Hosts and NAT Gateways in the Public Subnet +* A dynamic number of masters, etcd, and worker nodes in the Private Subnet + * even distributed over the # of Availability Zones +* AWS ELB in the Public Subnet for accessing the Kubernetes API from the internet -- A dynamic number of masters, etcd, and nodes can be created - -- These scripts currently expect Private IP connectivity with the nodes that are created. This means that you may need a tunnel to your VPC or to run these scripts from a VM inside the VPC. Will be looking into how to work around this later. +**Requirements** +- Terraform 0.8.7 or newer **How to Use:** -- Export the variables for your Amazon credentials: +- Export the variables for your AWS credentials or edit credentials.tfvars: ``` -export AWS_ACCESS_KEY_ID="xxx" -export AWS_SECRET_ACCESS_KEY="yyy" +export aws_access_key="xxx" +export aws_secret_key="yyy" +export aws_ssh_key_name="zzz" ``` - Update contrib/terraform/aws/terraform.tfvars with your data -- Run with `terraform apply` +- Run with `terraform apply -var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials -- Once the infrastructure is created, you can run the kubespray playbooks and supply contrib/terraform/aws/inventory with the `-i` flag. +- Once the infrastructure is created, you can run the kargo playbooks and supply inventory/hosts with the `-i` flag. -**Future Work:** +**Architecture** -- Update the inventory creation file to be something a little more reasonable. It's just a local-exec from Terraform now, using terraform.py or something may make sense in the future. \ No newline at end of file +Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones. + +![AWS Infrastructure with Terraform ](docs/aws_kargo.png) diff --git a/contrib/terraform/aws/create-infrastructure.tf b/contrib/terraform/aws/create-infrastructure.tf new file mode 100644 index 000000000..14da95492 --- /dev/null +++ b/contrib/terraform/aws/create-infrastructure.tf @@ -0,0 +1,185 @@ +terraform { + required_version = ">= 0.8.7" +} + +provider "aws" { + access_key = "${var.AWS_ACCESS_KEY_ID}" + secret_key = "${var.AWS_SECRET_ACCESS_KEY}" + region = "${var.AWS_DEFAULT_REGION}" +} + +/* +* Calling modules who create the initial AWS VPC / AWS ELB +* and AWS IAM Roles for Kubernetes Deployment +*/ + +module "aws-vpc" { + source = "modules/vpc" + + aws_cluster_name = "${var.aws_cluster_name}" + aws_vpc_cidr_block = "${var.aws_vpc_cidr_block}" + aws_avail_zones="${var.aws_avail_zones}" + + aws_cidr_subnets_private="${var.aws_cidr_subnets_private}" + aws_cidr_subnets_public="${var.aws_cidr_subnets_public}" + +} + + +module "aws-elb" { + source = "modules/elb" + + aws_cluster_name="${var.aws_cluster_name}" + aws_vpc_id="${module.aws-vpc.aws_vpc_id}" + aws_avail_zones="${var.aws_avail_zones}" + aws_subnet_ids_public="${module.aws-vpc.aws_subnet_ids_public}" + aws_elb_api_port = "${var.aws_elb_api_port}" + k8s_secure_api_port = "${var.k8s_secure_api_port}" + +} + +module "aws-iam" { + source = "modules/iam" + + aws_cluster_name="${var.aws_cluster_name}" +} + +/* +* Create Bastion Instances in AWS +* +*/ +resource "aws_instance" "bastion-server" { + ami = "${var.aws_bastion_ami}" + instance_type = "${var.aws_bastion_size}" + count = "${length(var.aws_cidr_subnets_public)}" + associate_public_ip_address = true + availability_zone = "${element(var.aws_avail_zones,count.index)}" + subnet_id = "${element(module.aws-vpc.aws_subnet_ids_public,count.index)}" + + + vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ] + + key_name = "${var.AWS_SSH_KEY_NAME}" + + tags { + Name = "kubernetes-${var.aws_cluster_name}-bastion-${count.index}" + Cluster = "${var.aws_cluster_name}" + Role = "bastion-${var.aws_cluster_name}-${count.index}" + } +} + + +/* +* Create K8s Master and worker nodes and etcd instances +* +*/ + +resource "aws_instance" "k8s-master" { + ami = "${var.aws_cluster_ami}" + instance_type = "${var.aws_kube_master_size}" + + count = "${var.aws_kube_master_num}" + + + availability_zone = "${element(var.aws_avail_zones,count.index)}" + subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}" + + + vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ] + + + iam_instance_profile = "${module.aws-iam.kube-master-profile}" + key_name = "${var.AWS_SSH_KEY_NAME}" + + + tags { + Name = "kubernetes-${var.aws_cluster_name}-master${count.index}" + Cluster = "${var.aws_cluster_name}" + Role = "master" + } +} + +resource "aws_elb_attachment" "attach_master_nodes" { + count = "${var.aws_kube_master_num}" + elb = "${module.aws-elb.aws_elb_api_id}" + instance = "${element(aws_instance.k8s-master.*.id,count.index)}" +} + + +resource "aws_instance" "k8s-etcd" { + ami = "${var.aws_cluster_ami}" + instance_type = "${var.aws_etcd_size}" + + count = "${var.aws_etcd_num}" + + + availability_zone = "${element(var.aws_avail_zones,count.index)}" + subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}" + + + vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ] + + key_name = "${var.AWS_SSH_KEY_NAME}" + + + tags { + Name = "kubernetes-${var.aws_cluster_name}-etcd${count.index}" + Cluster = "${var.aws_cluster_name}" + Role = "etcd" + } + +} + + +resource "aws_instance" "k8s-worker" { + ami = "${var.aws_cluster_ami}" + instance_type = "${var.aws_kube_worker_size}" + + count = "${var.aws_kube_worker_num}" + + availability_zone = "${element(var.aws_avail_zones,count.index)}" + subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}" + + vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ] + + iam_instance_profile = "${module.aws-iam.kube-worker-profile}" + key_name = "${var.AWS_SSH_KEY_NAME}" + + + tags { + Name = "kubernetes-${var.aws_cluster_name}-worker${count.index}" + Cluster = "${var.aws_cluster_name}" + Role = "worker" + } + +} + + + +/* +* Create Kargo Inventory File +* +*/ +data "template_file" "inventory" { + template = "${file("${path.module}/templates/inventory.tpl")}" + + vars { + public_ip_address_bastion = "${join("\n",formatlist("bastion ansible_ssh_host=%s" , aws_instance.bastion-server.*.public_ip))}" + connection_strings_master = "${join("\n",formatlist("%s ansible_ssh_host=%s",aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}" + connection_strings_node = "${join("\n", formatlist("%s ansible_ssh_host=%s", aws_instance.k8s-worker.*.tags.Name, aws_instance.k8s-worker.*.private_ip))}" + connection_strings_etcd = "${join("\n",formatlist("%s ansible_ssh_host=%s", aws_instance.k8s-etcd.*.tags.Name, aws_instance.k8s-etcd.*.private_ip))}" + list_master = "${join("\n",aws_instance.k8s-master.*.tags.Name)}" + list_node = "${join("\n",aws_instance.k8s-worker.*.tags.Name)}" + list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}" + elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\"" + elb_api_port = "loadbalancer_apiserver.port=${var.aws_elb_api_port}" + + } +} + +resource "null_resource" "inventories" { + provisioner "local-exec" { + command = "echo '${data.template_file.inventory.rendered}' > ../../../inventory/hosts" + } + +} diff --git a/contrib/terraform/aws/credentials.tfvars.example b/contrib/terraform/aws/credentials.tfvars.example new file mode 100644 index 000000000..19420c5a7 --- /dev/null +++ b/contrib/terraform/aws/credentials.tfvars.example @@ -0,0 +1,8 @@ +#AWS Access Key +AWS_ACCESS_KEY_ID = "" +#AWS Secret Key +AWS_SECRET_ACCESS_KEY = "" +#EC2 SSH Key Name +AWS_SSH_KEY_NAME = "" +#AWS Region +AWS_DEFAULT_REGION = "eu-central-1" diff --git a/contrib/terraform/aws/docs/aws_kargo.png b/contrib/terraform/aws/docs/aws_kargo.png new file mode 100644 index 000000000..40245b845 Binary files /dev/null and b/contrib/terraform/aws/docs/aws_kargo.png differ diff --git a/contrib/terraform/aws/modules/elb/main.tf b/contrib/terraform/aws/modules/elb/main.tf new file mode 100644 index 000000000..270e0cb35 --- /dev/null +++ b/contrib/terraform/aws/modules/elb/main.tf @@ -0,0 +1,50 @@ +resource "aws_security_group" "aws-elb" { + name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb" + vpc_id = "${var.aws_vpc_id}" + + tags { + Name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb" + } +} + + +resource "aws_security_group_rule" "aws-allow-api-access" { + type = "ingress" + from_port = "${var.aws_elb_api_port}" + to_port = "${var.k8s_secure_api_port}" + protocol = "TCP" + cidr_blocks = ["0.0.0.0/0"] + security_group_id = "${aws_security_group.aws-elb.id}" +} + + +# Create a new AWS ELB for K8S API +resource "aws_elb" "aws-elb-api" { + name = "kubernetes-elb-${var.aws_cluster_name}" + subnets = ["${var.aws_subnet_ids_public}"] + security_groups = ["${aws_security_group.aws-elb.id}"] + + listener { + instance_port = "${var.k8s_secure_api_port}" + instance_protocol = "tcp" + lb_port = "${var.aws_elb_api_port}" + lb_protocol = "tcp" + } + + health_check { + healthy_threshold = 2 + unhealthy_threshold = 2 + timeout = 3 + target = "HTTP:8080/" + interval = 30 + } + + cross_zone_load_balancing = true + idle_timeout = 400 + connection_draining = true + connection_draining_timeout = 400 + + tags { + Name = "kubernetes-${var.aws_cluster_name}-elb-api" + } +} diff --git a/contrib/terraform/aws/modules/elb/outputs.tf b/contrib/terraform/aws/modules/elb/outputs.tf new file mode 100644 index 000000000..075c751e4 --- /dev/null +++ b/contrib/terraform/aws/modules/elb/outputs.tf @@ -0,0 +1,7 @@ +output "aws_elb_api_id" { + value = "${aws_elb.aws-elb-api.id}" +} + +output "aws_elb_api_fqdn" { + value = "${aws_elb.aws-elb-api.dns_name}" +} diff --git a/contrib/terraform/aws/modules/elb/variables.tf b/contrib/terraform/aws/modules/elb/variables.tf new file mode 100644 index 000000000..c7f86847d --- /dev/null +++ b/contrib/terraform/aws/modules/elb/variables.tf @@ -0,0 +1,28 @@ +variable "aws_cluster_name" { + description = "Name of Cluster" +} + +variable "aws_vpc_id" { + description = "AWS VPC ID" +} + +variable "aws_elb_api_port" { + description = "Port for AWS ELB" +} + +variable "k8s_secure_api_port" { + description = "Secure Port of K8S API Server" +} + + + +variable "aws_avail_zones" { + description = "Availability Zones Used" + type = "list" +} + + +variable "aws_subnet_ids_public" { + description = "IDs of Public Subnets" + type = "list" +} diff --git a/contrib/terraform/aws/modules/iam/main.tf b/contrib/terraform/aws/modules/iam/main.tf new file mode 100644 index 000000000..88da00d90 --- /dev/null +++ b/contrib/terraform/aws/modules/iam/main.tf @@ -0,0 +1,138 @@ +#Add AWS Roles for Kubernetes + +resource "aws_iam_role" "kube-master" { + name = "kubernetes-${var.aws_cluster_name}-master" + assume_role_policy = <