fix(misc): contrib/terraform/aws (#7818)

* fix(misc): terraform/aws

- handles deployment with a single availability zone
- handles deployment with more than two availability zone
- handles etcd collocation with control-plane nodes (`aws_etcd_num=0`)
- allows to set a bastion instances count (`aws_bastion_num`)
- allows to set bastion/etcd/control-plane/workers rootfs volume size
- removes variables from terraform.tfvars that were not re-used
- adds .terraform.lock.hcl to .gitignore
- changes/updates base image from ubuntu-18.03 to debian-10

tested by a few coworkers of mine, and myself: thanks for the outstanding
work, on both those terraform samples and kubespray playbooks.
I did not test ubuntu deployments, I could still swap from buster to
focal. LMK.

* fix(gitlab-ci)

AFAIU, terraform.tfvars indentation should be fixed for / no diff
returned running `terraform fmt -check -diff`

https://gitlab.com/kargo-ci/kubernetes-sigs-kubespray/-/jobs/1445622114
This commit is contained in:
Samuel 2021-07-23 11:43:16 +02:00 committed by GitHub
parent 56e230863a
commit bfebcfa2c5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
8 changed files with 84 additions and 41 deletions

View file

@ -1,2 +1,3 @@
*.tfstate* *.tfstate*
.terraform.lock.hcl
.terraform .terraform

View file

@ -20,7 +20,7 @@ module "aws-vpc" {
aws_cluster_name = var.aws_cluster_name aws_cluster_name = var.aws_cluster_name
aws_vpc_cidr_block = var.aws_vpc_cidr_block aws_vpc_cidr_block = var.aws_vpc_cidr_block
aws_avail_zones = slice(data.aws_availability_zones.available.names, 0, 2) aws_avail_zones = slice(data.aws_availability_zones.available.names, 0, length(var.aws_cidr_subnets_public) <= length(data.aws_availability_zones.available.names) ? length(var.aws_cidr_subnets_public) : length(data.aws_availability_zones.available.names))
aws_cidr_subnets_private = var.aws_cidr_subnets_private aws_cidr_subnets_private = var.aws_cidr_subnets_private
aws_cidr_subnets_public = var.aws_cidr_subnets_public aws_cidr_subnets_public = var.aws_cidr_subnets_public
default_tags = var.default_tags default_tags = var.default_tags
@ -31,7 +31,7 @@ module "aws-elb" {
aws_cluster_name = var.aws_cluster_name aws_cluster_name = var.aws_cluster_name
aws_vpc_id = module.aws-vpc.aws_vpc_id aws_vpc_id = module.aws-vpc.aws_vpc_id
aws_avail_zones = slice(data.aws_availability_zones.available.names, 0, 2) aws_avail_zones = slice(data.aws_availability_zones.available.names, 0, length(var.aws_cidr_subnets_public) <= length(data.aws_availability_zones.available.names) ? length(var.aws_cidr_subnets_public) : length(data.aws_availability_zones.available.names))
aws_subnet_ids_public = module.aws-vpc.aws_subnet_ids_public aws_subnet_ids_public = module.aws-vpc.aws_subnet_ids_public
aws_elb_api_port = var.aws_elb_api_port aws_elb_api_port = var.aws_elb_api_port
k8s_secure_api_port = var.k8s_secure_api_port k8s_secure_api_port = var.k8s_secure_api_port
@ -52,9 +52,9 @@ module "aws-iam" {
resource "aws_instance" "bastion-server" { resource "aws_instance" "bastion-server" {
ami = data.aws_ami.distro.id ami = data.aws_ami.distro.id
instance_type = var.aws_bastion_size instance_type = var.aws_bastion_size
count = length(var.aws_cidr_subnets_public) count = var.aws_bastion_num
associate_public_ip_address = true associate_public_ip_address = true
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index) availability_zone = element(slice(data.aws_availability_zones.available.names, 0, length(var.aws_cidr_subnets_public) <= length(data.aws_availability_zones.available.names) ? length(var.aws_cidr_subnets_public) : length(data.aws_availability_zones.available.names)), count.index)
subnet_id = element(module.aws-vpc.aws_subnet_ids_public, count.index) subnet_id = element(module.aws-vpc.aws_subnet_ids_public, count.index)
vpc_security_group_ids = module.aws-vpc.aws_security_group vpc_security_group_ids = module.aws-vpc.aws_security_group
@ -79,11 +79,15 @@ resource "aws_instance" "k8s-master" {
count = var.aws_kube_master_num count = var.aws_kube_master_num
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index) availability_zone = element(slice(data.aws_availability_zones.available.names, 0, length(var.aws_cidr_subnets_public) <= length(data.aws_availability_zones.available.names) ? length(var.aws_cidr_subnets_public) : length(data.aws_availability_zones.available.names)), count.index)
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index) subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
vpc_security_group_ids = module.aws-vpc.aws_security_group vpc_security_group_ids = module.aws-vpc.aws_security_group
root_block_device {
volume_size = var.aws_kube_master_disk_size
}
iam_instance_profile = module.aws-iam.kube_control_plane-profile iam_instance_profile = module.aws-iam.kube_control_plane-profile
key_name = var.AWS_SSH_KEY_NAME key_name = var.AWS_SSH_KEY_NAME
@ -106,11 +110,15 @@ resource "aws_instance" "k8s-etcd" {
count = var.aws_etcd_num count = var.aws_etcd_num
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index) availability_zone = element(slice(data.aws_availability_zones.available.names, 0, length(var.aws_cidr_subnets_public) <= length(data.aws_availability_zones.available.names) ? length(var.aws_cidr_subnets_public) : length(data.aws_availability_zones.available.names)), count.index)
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index) subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
vpc_security_group_ids = module.aws-vpc.aws_security_group vpc_security_group_ids = module.aws-vpc.aws_security_group
root_block_device {
volume_size = var.aws_etcd_disk_size
}
key_name = var.AWS_SSH_KEY_NAME key_name = var.AWS_SSH_KEY_NAME
tags = merge(var.default_tags, tomap({ tags = merge(var.default_tags, tomap({
@ -126,11 +134,15 @@ resource "aws_instance" "k8s-worker" {
count = var.aws_kube_worker_num count = var.aws_kube_worker_num
availability_zone = element(slice(data.aws_availability_zones.available.names, 0, 2), count.index) availability_zone = element(slice(data.aws_availability_zones.available.names, 0, length(var.aws_cidr_subnets_public) <= length(data.aws_availability_zones.available.names) ? length(var.aws_cidr_subnets_public) : length(data.aws_availability_zones.available.names)), count.index)
subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index) subnet_id = element(module.aws-vpc.aws_subnet_ids_private, count.index)
vpc_security_group_ids = module.aws-vpc.aws_security_group vpc_security_group_ids = module.aws-vpc.aws_security_group
root_block_device {
volume_size = var.aws_kube_worker_disk_size
}
iam_instance_profile = module.aws-iam.kube-worker-profile iam_instance_profile = module.aws-iam.kube-worker-profile
key_name = var.AWS_SSH_KEY_NAME key_name = var.AWS_SSH_KEY_NAME
@ -152,10 +164,10 @@ data "template_file" "inventory" {
public_ip_address_bastion = join("\n", formatlist("bastion ansible_host=%s", aws_instance.bastion-server.*.public_ip)) public_ip_address_bastion = join("\n", formatlist("bastion ansible_host=%s", aws_instance.bastion-server.*.public_ip))
connection_strings_master = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-master.*.private_dns, aws_instance.k8s-master.*.private_ip)) connection_strings_master = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-master.*.private_dns, aws_instance.k8s-master.*.private_ip))
connection_strings_node = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.private_dns, aws_instance.k8s-worker.*.private_ip)) connection_strings_node = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.private_dns, aws_instance.k8s-worker.*.private_ip))
connection_strings_etcd = join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.private_dns, aws_instance.k8s-etcd.*.private_ip))
list_master = join("\n", aws_instance.k8s-master.*.private_dns) list_master = join("\n", aws_instance.k8s-master.*.private_dns)
list_node = join("\n", aws_instance.k8s-worker.*.private_dns) list_node = join("\n", aws_instance.k8s-worker.*.private_dns)
list_etcd = join("\n", aws_instance.k8s-etcd.*.private_dns) connection_strings_etcd = join("\n", formatlist("%s ansible_host=%s", ((var.aws_etcd_num > 0) ? (aws_instance.k8s-etcd.*.private_dns) : (aws_instance.k8s-master.*.private_dns)), ((var.aws_etcd_num > 0) ? (aws_instance.k8s-etcd.*.private_ip) : (aws_instance.k8s-master.*.private_ip))))
list_etcd = join("\n", ((var.aws_etcd_num > 0) ? (aws_instance.k8s-etcd.*.private_dns) : (aws_instance.k8s-master.*.private_dns)))
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\"" elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
} }
} }

View file

@ -11,7 +11,7 @@ output "workers" {
} }
output "etcd" { output "etcd" {
value = join("\n", aws_instance.k8s-etcd.*.private_ip) value = join("\n", ((var.aws_etcd_num > 0) ? (aws_instance.k8s-etcd.*.private_ip) : (aws_instance.k8s-master.*.private_ip)))
} }
output "aws_elb_api_fqdn" { output "aws_elb_api_fqdn" {

View file

@ -9,6 +9,8 @@ aws_cidr_subnets_private = ["10.250.192.0/20", "10.250.208.0/20"]
aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"] aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"]
#Bastion Host #Bastion Host
aws_bastion_num = 1
aws_bastion_size = "t2.medium" aws_bastion_size = "t2.medium"
#Kubernetes Cluster #Kubernetes Cluster
@ -17,22 +19,26 @@ aws_kube_master_num = 3
aws_kube_master_size = "t2.medium" aws_kube_master_size = "t2.medium"
aws_kube_master_disk_size = 50
aws_etcd_num = 3 aws_etcd_num = 3
aws_etcd_size = "t2.medium" aws_etcd_size = "t2.medium"
aws_etcd_disk_size = 50
aws_kube_worker_num = 4 aws_kube_worker_num = 4
aws_kube_worker_size = "t2.medium" aws_kube_worker_size = "t2.medium"
aws_kube_worker_disk_size = 50
#Settings AWS ELB #Settings AWS ELB
aws_elb_api_port = 6443 aws_elb_api_port = 6443
k8s_secure_api_port = 6443 k8s_secure_api_port = 6443
kube_insecure_apiserver_address = "0.0.0.0"
default_tags = { default_tags = {
# Env = "devtest" # Product = "kubernetes" # Env = "devtest" # Product = "kubernetes"
} }

View file

@ -10,19 +10,18 @@ ${public_ip_address_bastion}
[kube_control_plane] [kube_control_plane]
${list_master} ${list_master}
[kube_node] [kube_node]
${list_node} ${list_node}
[etcd] [etcd]
${list_etcd} ${list_etcd}
[calico_rr]
[k8s_cluster:children] [k8s_cluster:children]
kube_node kube_node
kube_control_plane kube_control_plane
calico_rr
[k8s_cluster:vars] [k8s_cluster:vars]
${elb_api_fqdn} ${elb_api_fqdn}

View file

@ -6,26 +6,34 @@ aws_vpc_cidr_block = "10.250.192.0/18"
aws_cidr_subnets_private = ["10.250.192.0/20", "10.250.208.0/20"] aws_cidr_subnets_private = ["10.250.192.0/20", "10.250.208.0/20"]
aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"] aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"]
#Bastion Host # single AZ deployment
aws_bastion_size = "t2.medium" #aws_cidr_subnets_private = ["10.250.192.0/20"]
#aws_cidr_subnets_public = ["10.250.224.0/20"]
# 3+ AZ deployment
#aws_cidr_subnets_private = ["10.250.192.0/24","10.250.193.0/24","10.250.194.0/24","10.250.195.0/24"]
#aws_cidr_subnets_public = ["10.250.224.0/24","10.250.225.0/24","10.250.226.0/24","10.250.227.0/24"]
#Bastion Host
aws_bastion_num = 1
aws_bastion_size = "t3.small"
#Kubernetes Cluster #Kubernetes Cluster
aws_kube_master_num = 3 aws_kube_master_num = 3
aws_kube_master_size = "t2.medium" aws_kube_master_size = "t3.medium"
aws_kube_master_disk_size = 50
aws_etcd_num = 3 aws_etcd_num = 0
aws_etcd_size = "t2.medium" aws_etcd_size = "t3.medium"
aws_etcd_disk_size = 50
aws_kube_worker_num = 4 aws_kube_worker_num = 4
aws_kube_worker_size = "t2.medium" aws_kube_worker_size = "t3.medium"
aws_kube_worker_disk_size = 50
#Settings AWS ELB #Settings AWS ELB
aws_elb_api_port = 6443 aws_elb_api_port = 6443
k8s_secure_api_port = 6443 k8s_secure_api_port = 6443
kube_insecure_apiserver_address = "0.0.0.0"
default_tags = { default_tags = {
# Env = "devtest" # Env = "devtest"

View file

@ -8,25 +8,26 @@ aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
aws_avail_zones = ["eu-central-1a","eu-central-1b"] aws_avail_zones = ["eu-central-1a","eu-central-1b"]
#Bastion Host #Bastion Host
aws_bastion_ami = "ami-5900cc36" aws_bastion_num = 1
aws_bastion_size = "t2.small" aws_bastion_size = "t3.small"
#Kubernetes Cluster #Kubernetes Cluster
aws_kube_master_num = 3 aws_kube_master_num = 3
aws_kube_master_size = "t2.medium" aws_kube_master_size = "t3.medium"
aws_kube_master_disk_size = 50
aws_etcd_num = 3 aws_etcd_num = 3
aws_etcd_size = "t2.medium" aws_etcd_size = "t3.medium"
aws_etcd_disk_size = 50
aws_kube_worker_num = 4 aws_kube_worker_num = 4
aws_kube_worker_size = "t2.medium" aws_kube_worker_size = "t3.medium"
aws_kube_worker_disk_size = 50
aws_cluster_ami = "ami-903df7ff"
#Settings AWS ELB #Settings AWS ELB
aws_elb_api_port = 6443 aws_elb_api_port = 6443
k8s_secure_api_port = 6443 k8s_secure_api_port = 6443
kube_insecure_apiserver_address = 0.0.0.0
default_tags = { }
inventory_file = "../../../inventory/hosts"

View file

@ -25,7 +25,7 @@ data "aws_ami" "distro" {
filter { filter {
name = "name" name = "name"
values = ["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"] values = ["debian-10-amd64-*"]
} }
filter { filter {
@ -33,7 +33,7 @@ data "aws_ami" "distro" {
values = ["hvm"] values = ["hvm"]
} }
owners = ["099720109477"] # Canonical owners = ["136693071363"] # Debian-10
} }
//AWS VPC Variables //AWS VPC Variables
@ -63,10 +63,18 @@ variable "aws_bastion_size" {
* The number should be divisable by the number of used * The number should be divisable by the number of used
* AWS Availability Zones without an remainder. * AWS Availability Zones without an remainder.
*/ */
variable "aws_bastion_num" {
description = "Number of Bastion Nodes"
}
variable "aws_kube_master_num" { variable "aws_kube_master_num" {
description = "Number of Kubernetes Master Nodes" description = "Number of Kubernetes Master Nodes"
} }
variable "aws_kube_master_disk_size" {
description = "Disk size for Kubernetes Master Nodes (in GiB)"
}
variable "aws_kube_master_size" { variable "aws_kube_master_size" {
description = "Instance size of Kube Master Nodes" description = "Instance size of Kube Master Nodes"
} }
@ -75,6 +83,10 @@ variable "aws_etcd_num" {
description = "Number of etcd Nodes" description = "Number of etcd Nodes"
} }
variable "aws_etcd_disk_size" {
description = "Disk size for etcd Nodes (in GiB)"
}
variable "aws_etcd_size" { variable "aws_etcd_size" {
description = "Instance size of etcd Nodes" description = "Instance size of etcd Nodes"
} }
@ -83,6 +95,10 @@ variable "aws_kube_worker_num" {
description = "Number of Kubernetes Worker Nodes" description = "Number of Kubernetes Worker Nodes"
} }
variable "aws_kube_worker_disk_size" {
description = "Disk size for Kubernetes Worker Nodes (in GiB)"
}
variable "aws_kube_worker_size" { variable "aws_kube_worker_size" {
description = "Instance size of Kubernetes Worker Nodes" description = "Instance size of Kubernetes Worker Nodes"
} }