Run terraform fmt and add step to CI (#4405)

* Run terraform fmt

* Add terraform fmt to .terraform-validate CI step

* Add tf-validate-aws CI step

* Revert "Add tf-validate-aws CI step"

This reverts commit e007225fac.
This commit is contained in:
Andreas Holmsten 2019-04-08 11:22:24 +02:00 committed by Kubernetes Prow Robot
parent 29825e6873
commit 01cf11b961
16 changed files with 231 additions and 261 deletions

View file

@ -769,6 +769,7 @@ tox-inventory-builder:
stage: unit-tests
script:
- terraform validate -var-file=cluster.tf ../../contrib/terraform/$PROVIDER
- terraform fmt -check -diff ../../contrib/terraform/$PROVIDER
.terraform_apply: &terraform_apply
<<: *terraform_install

View file

@ -20,31 +20,28 @@ module "aws-vpc" {
aws_cluster_name = "${var.aws_cluster_name}"
aws_vpc_cidr_block = "${var.aws_vpc_cidr_block}"
aws_avail_zones="${slice(data.aws_availability_zones.available.names,0,2)}"
aws_cidr_subnets_private="${var.aws_cidr_subnets_private}"
aws_cidr_subnets_public="${var.aws_cidr_subnets_public}"
default_tags="${var.default_tags}"
aws_avail_zones = "${slice(data.aws_availability_zones.available.names,0,2)}"
aws_cidr_subnets_private = "${var.aws_cidr_subnets_private}"
aws_cidr_subnets_public = "${var.aws_cidr_subnets_public}"
default_tags = "${var.default_tags}"
}
module "aws-elb" {
source = "modules/elb"
aws_cluster_name="${var.aws_cluster_name}"
aws_vpc_id="${module.aws-vpc.aws_vpc_id}"
aws_avail_zones="${slice(data.aws_availability_zones.available.names,0,2)}"
aws_subnet_ids_public="${module.aws-vpc.aws_subnet_ids_public}"
aws_cluster_name = "${var.aws_cluster_name}"
aws_vpc_id = "${module.aws-vpc.aws_vpc_id}"
aws_avail_zones = "${slice(data.aws_availability_zones.available.names,0,2)}"
aws_subnet_ids_public = "${module.aws-vpc.aws_subnet_ids_public}"
aws_elb_api_port = "${var.aws_elb_api_port}"
k8s_secure_api_port = "${var.k8s_secure_api_port}"
default_tags="${var.default_tags}"
default_tags = "${var.default_tags}"
}
module "aws-iam" {
source = "modules/iam"
aws_cluster_name="${var.aws_cluster_name}"
aws_cluster_name = "${var.aws_cluster_name}"
}
/*
@ -60,8 +57,7 @@ resource "aws_instance" "bastion-server" {
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_public,count.index)}"
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"]
key_name = "${var.AWS_SSH_KEY_NAME}"
@ -72,7 +68,6 @@ resource "aws_instance" "bastion-server" {
))}"
}
/*
* Create K8s Master and worker nodes and etcd instances
*
@ -84,18 +79,14 @@ resource "aws_instance" "k8s-master" {
count = "${var.aws_kube_master_num}"
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"]
iam_instance_profile = "${module.aws-iam.kube-master-profile}"
key_name = "${var.AWS_SSH_KEY_NAME}"
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-master${count.index}",
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
@ -109,19 +100,16 @@ resource "aws_elb_attachment" "attach_master_nodes" {
instance = "${element(aws_instance.k8s-master.*.id,count.index)}"
}
resource "aws_instance" "k8s-etcd" {
ami = "${data.aws_ami.distro.id}"
instance_type = "${var.aws_etcd_size}"
count = "${var.aws_etcd_num}"
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"]
key_name = "${var.AWS_SSH_KEY_NAME}"
@ -130,10 +118,8 @@ resource "aws_instance" "k8s-etcd" {
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
"Role", "etcd"
))}"
}
resource "aws_instance" "k8s-worker" {
ami = "${data.aws_ami.distro.id}"
instance_type = "${var.aws_kube_worker_size}"
@ -143,22 +129,18 @@ resource "aws_instance" "k8s-worker" {
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"]
iam_instance_profile = "${module.aws-iam.kube-worker-profile}"
key_name = "${var.AWS_SSH_KEY_NAME}"
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-worker${count.index}",
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
"Role", "worker"
))}"
}
/*
* Create Kubespray Inventory File
*
@ -176,7 +158,6 @@ data "template_file" "inventory" {
list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}"
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
}
}
resource "null_resource" "inventories" {
@ -187,5 +168,4 @@ resource "null_resource" "inventories" {
triggers {
template = "${data.template_file.inventory.rendered}"
}
}

View file

@ -7,7 +7,6 @@ resource "aws_security_group" "aws-elb" {
))}"
}
resource "aws_security_group_rule" "aws-allow-api-access" {
type = "ingress"
from_port = "${var.aws_elb_api_port}"

View file

@ -14,14 +14,11 @@ variable "k8s_secure_api_port" {
description = "Secure Port of K8S API Server"
}
variable "aws_avail_zones" {
description = "Availability Zones Used"
type = "list"
}
variable "aws_subnet_ids_public" {
description = "IDs of Public Subnets"
type = "list"

View file

@ -2,6 +2,7 @@
resource "aws_iam_role" "kube-master" {
name = "kubernetes-${var.aws_cluster_name}-master"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
@ -20,6 +21,7 @@ EOF
resource "aws_iam_role" "kube-worker" {
name = "kubernetes-${var.aws_cluster_name}-node"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
@ -41,6 +43,7 @@ EOF
resource "aws_iam_role_policy" "kube-master" {
name = "kubernetes-${var.aws_cluster_name}-master"
role = "${aws_iam_role.kube-master.id}"
policy = <<EOF
{
"Version": "2012-10-17",
@ -75,6 +78,7 @@ EOF
resource "aws_iam_role_policy" "kube-worker" {
name = "kubernetes-${var.aws_cluster_name}-node"
role = "${aws_iam_role.kube-worker.id}"
policy = <<EOF
{
"Version": "2012-10-17",
@ -124,7 +128,6 @@ resource "aws_iam_role_policy" "kube-worker" {
EOF
}
#Create AWS Instance Profiles
resource "aws_iam_instance_profile" "kube-master" {

View file

@ -1,4 +1,3 @@
resource "aws_vpc" "cluster-vpc" {
cidr_block = "${var.aws_vpc_cidr_block}"
@ -11,17 +10,14 @@ resource "aws_vpc" "cluster-vpc" {
))}"
}
resource "aws_eip" "cluster-nat-eip" {
count = "${length(var.aws_cidr_subnets_public)}"
vpc = true
}
resource "aws_internet_gateway" "cluster-vpc-internetgw" {
vpc_id = "${aws_vpc.cluster-vpc.id}"
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-internetgw"
))}"
@ -29,7 +25,7 @@ resource "aws_internet_gateway" "cluster-vpc-internetgw" {
resource "aws_subnet" "cluster-vpc-subnets-public" {
vpc_id = "${aws_vpc.cluster-vpc.id}"
count="${length(var.aws_avail_zones)}"
count = "${length(var.aws_avail_zones)}"
availability_zone = "${element(var.aws_avail_zones, count.index)}"
cidr_block = "${element(var.aws_cidr_subnets_public, count.index)}"
@ -43,12 +39,11 @@ resource "aws_nat_gateway" "cluster-nat-gateway" {
count = "${length(var.aws_cidr_subnets_public)}"
allocation_id = "${element(aws_eip.cluster-nat-eip.*.id, count.index)}"
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index)}"
}
resource "aws_subnet" "cluster-vpc-subnets-private" {
vpc_id = "${aws_vpc.cluster-vpc.id}"
count="${length(var.aws_avail_zones)}"
count = "${length(var.aws_avail_zones)}"
availability_zone = "${element(var.aws_avail_zones, count.index)}"
cidr_block = "${element(var.aws_cidr_subnets_private, count.index)}"
@ -63,6 +58,7 @@ resource "aws_subnet" "cluster-vpc-subnets-private" {
resource "aws_route_table" "kubernetes-public" {
vpc_id = "${aws_vpc.cluster-vpc.id}"
route {
cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.cluster-vpc-internetgw.id}"
@ -76,6 +72,7 @@ resource "aws_route_table" "kubernetes-public" {
resource "aws_route_table" "kubernetes-private" {
count = "${length(var.aws_cidr_subnets_private)}"
vpc_id = "${aws_vpc.cluster-vpc.id}"
route {
cidr_block = "0.0.0.0/0"
nat_gateway_id = "${element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index)}"
@ -84,24 +81,20 @@ resource "aws_route_table" "kubernetes-private" {
tags = "${merge(var.default_tags, map(
"Name", "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
))}"
}
resource "aws_route_table_association" "kubernetes-public" {
count = "${length(var.aws_cidr_subnets_public)}"
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id,count.index)}"
route_table_id = "${aws_route_table.kubernetes-public.id}"
}
resource "aws_route_table_association" "kubernetes-private" {
count = "${length(var.aws_cidr_subnets_private)}"
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-private.*.id,count.index)}"
route_table_id = "${element(aws_route_table.kubernetes-private.*.id,count.index)}"
}
#Kubernetes Security Groups
resource "aws_security_group" "kubernetes" {
@ -118,7 +111,7 @@ resource "aws_security_group_rule" "allow-all-ingress" {
from_port = 0
to_port = 65535
protocol = "-1"
cidr_blocks= ["${var.aws_vpc_cidr_block}"]
cidr_blocks = ["${var.aws_vpc_cidr_block}"]
security_group_id = "${aws_security_group.kubernetes.id}"
}
@ -131,7 +124,6 @@ resource "aws_security_group_rule" "allow-all-egress" {
security_group_id = "${aws_security_group.kubernetes.id}"
}
resource "aws_security_group_rule" "allow-ssh-connections" {
type = "ingress"
from_port = 22

View file

@ -12,10 +12,8 @@ output "aws_subnet_ids_public" {
output "aws_security_group" {
value = ["${aws_security_group.kubernetes.*.id}"]
}
output "default_tags" {
value = "${var.default_tags}"
}

View file

@ -2,12 +2,10 @@ variable "aws_vpc_cidr_block" {
description = "CIDR Blocks for AWS VPC"
}
variable "aws_cluster_name" {
description = "Name of Cluster"
}
variable "aws_avail_zones" {
description = "AWS Availability Zones Used"
type = "list"

View file

@ -14,7 +14,6 @@ output "etcd" {
value = "${join("\n", aws_instance.k8s-etcd.*.private_ip)}"
}
output "aws_elb_api_fqdn" {
value = "${module.aws-elb.aws_elb_api_fqdn}:${var.aws_elb_api_port}"
}

View file

@ -1,6 +1,5 @@
# Configure the Packet Provider
provider "packet" {
}
provider "packet" {}
resource "packet_ssh_key" "k8s" {
count = "${var.public_key_path != "" ? 1 : 0}"
@ -19,7 +18,6 @@ resource "packet_device" "k8s_master" {
billing_cycle = "${var.billing_cycle}"
project_id = "${var.packet_project_id}"
tags = ["cluster-${var.cluster_name}", "k8s-cluster", "kube-master", "etcd", "kube-node"]
}
resource "packet_device" "k8s_master_no_etcd" {

View file

@ -14,14 +14,19 @@ facility = "ewr1"
# standalone etcds
number_of_etcd = 0
plan_etcd = "t1.small.x86"
# masters
number_of_k8s_masters = 1
number_of_k8s_masters_no_etcd = 0
plan_k8s_masters = "t1.small.x86"
plan_k8s_masters_no_etcd = "t1.small.x86"
# nodes
number_of_k8s_nodes = 2
plan_k8s_nodes = "t1.small.x86"