Support Openstack servergroups (#5412)
* add support for nova servergroups * Add documentation for openstack nova servergroups * uppdate to TF 0.12.12 format and fix etcd * revert for_each change * fix variables and formatting in main.tf * try to avoid errors * update variable * Update main.tf * Update main.tf * update all other instance resources
This commit is contained in:
parent
b15d41a96a
commit
40e35b3fa6
5 changed files with 141 additions and 1 deletions
|
@ -253,6 +253,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||||
|`gfs_root_volume_size_in_gb` | Size of the root volume for gluster, 0 to use ephemeral storage |
|
|`gfs_root_volume_size_in_gb` | Size of the root volume for gluster, 0 to use ephemeral storage |
|
||||||
|`etcd_root_volume_size_in_gb` | Size of the root volume for etcd nodes, 0 to use ephemeral storage |
|
|`etcd_root_volume_size_in_gb` | Size of the root volume for etcd nodes, 0 to use ephemeral storage |
|
||||||
|`bastion_root_volume_size_in_gb` | Size of the root volume for bastions, 0 to use ephemeral storage |
|
|`bastion_root_volume_size_in_gb` | Size of the root volume for bastions, 0 to use ephemeral storage |
|
||||||
|
|`use_server_group` | Create and use openstack nova servergroups, default: false |
|
||||||
|
|
||||||
#### Terraform state files
|
#### Terraform state files
|
||||||
|
|
||||||
|
|
|
@ -71,6 +71,7 @@ module "compute" {
|
||||||
worker_allowed_ports = "${var.worker_allowed_ports}"
|
worker_allowed_ports = "${var.worker_allowed_ports}"
|
||||||
wait_for_floatingip = "${var.wait_for_floatingip}"
|
wait_for_floatingip = "${var.wait_for_floatingip}"
|
||||||
use_access_ip = "${var.use_access_ip}"
|
use_access_ip = "${var.use_access_ip}"
|
||||||
|
use_server_groups = "${var.use_server_groups}"
|
||||||
|
|
||||||
network_id = "${module.network.router_id}"
|
network_id = "${module.network.router_id}"
|
||||||
}
|
}
|
||||||
|
|
|
@ -95,6 +95,24 @@ resource "openstack_networking_secgroup_rule_v2" "worker" {
|
||||||
security_group_id = "${openstack_networking_secgroup_v2.worker.id}"
|
security_group_id = "${openstack_networking_secgroup_v2.worker.id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_servergroup_v2" "k8s_master" {
|
||||||
|
count = "%{ if var.use_server_groups }1%{else}0%{endif}"
|
||||||
|
name = "k8s-master-srvgrp"
|
||||||
|
policies = ["anti-affinity"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_servergroup_v2" "k8s_node" {
|
||||||
|
count = "%{ if var.use_server_groups }1%{else}0%{endif}"
|
||||||
|
name = "k8s-node-srvgrp"
|
||||||
|
policies = ["anti-affinity"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_servergroup_v2" "k8s_etcd" {
|
||||||
|
count = "%{ if var.use_server_groups }1%{else}0%{endif}"
|
||||||
|
name = "k8s-etcd-srvgrp"
|
||||||
|
policies = ["anti-affinity"]
|
||||||
|
}
|
||||||
|
|
||||||
resource "openstack_compute_instance_v2" "bastion" {
|
resource "openstack_compute_instance_v2" "bastion" {
|
||||||
name = "${var.cluster_name}-bastion-${count.index+1}"
|
name = "${var.cluster_name}-bastion-${count.index+1}"
|
||||||
count = "${var.bastion_root_volume_size_in_gb == 0 ? var.number_of_bastions : 0}"
|
count = "${var.bastion_root_volume_size_in_gb == 0 ? var.number_of_bastions : 0}"
|
||||||
|
@ -174,6 +192,13 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
dynamic "scheduler_hints" {
|
||||||
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||||
|
content {
|
||||||
|
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||||
|
@ -210,6 +235,13 @@ resource "openstack_compute_instance_v2" "k8s_master_custom_volume_size" {
|
||||||
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
dynamic "scheduler_hints" {
|
||||||
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||||
|
content {
|
||||||
|
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
|
@ -238,6 +270,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||||
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
dynamic "scheduler_hints" {
|
||||||
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||||
|
content {
|
||||||
|
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
|
@ -276,6 +315,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd_custom_volume_size"
|
||||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
dynamic "scheduler_hints" {
|
||||||
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||||
|
content {
|
||||||
|
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
|
||||||
|
@ -302,6 +348,13 @@ resource "openstack_compute_instance_v2" "etcd" {
|
||||||
|
|
||||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
||||||
|
|
||||||
|
dynamic "scheduler_hints" {
|
||||||
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : []
|
||||||
|
content {
|
||||||
|
group = "${openstack_compute_servergroup_v2.k8s_etcd[0].id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "etcd,vault,no-floating"
|
kubespray_groups = "etcd,vault,no-floating"
|
||||||
|
@ -332,6 +385,13 @@ resource "openstack_compute_instance_v2" "etcd_custom_volume_size" {
|
||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
||||||
|
|
||||||
|
dynamic "scheduler_hints" {
|
||||||
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : []
|
||||||
|
content {
|
||||||
|
group = "${openstack_compute_servergroup_v2.k8s_etcd[0].id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
|
@ -356,6 +416,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||||
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
dynamic "scheduler_hints" {
|
||||||
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||||
|
content {
|
||||||
|
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
|
@ -389,6 +456,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_custom_volum
|
||||||
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
dynamic "scheduler_hints" {
|
||||||
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||||
|
content {
|
||||||
|
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
|
@ -413,6 +487,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||||
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
dynamic "scheduler_hints" {
|
||||||
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||||
|
content {
|
||||||
|
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
|
@ -446,6 +527,13 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd_cust
|
||||||
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s_master.name}",
|
||||||
"${openstack_networking_secgroup_v2.k8s.name}",
|
"${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
dynamic "scheduler_hints" {
|
||||||
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||||
|
content {
|
||||||
|
group = "${openstack_compute_servergroup_v2.k8s_master[0].id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
|
@ -471,6 +559,13 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||||
"${openstack_networking_secgroup_v2.worker.name}",
|
"${openstack_networking_secgroup_v2.worker.name}",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
dynamic "scheduler_hints" {
|
||||||
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||||
|
content {
|
||||||
|
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}"
|
kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}"
|
||||||
|
@ -507,6 +602,13 @@ resource "openstack_compute_instance_v2" "k8s_node_custom_volume_size" {
|
||||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
"${openstack_networking_secgroup_v2.worker.name}",
|
"${openstack_networking_secgroup_v2.worker.name}",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
dynamic "scheduler_hints" {
|
||||||
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||||
|
content {
|
||||||
|
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
|
@ -535,6 +637,13 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
"${openstack_networking_secgroup_v2.worker.name}",
|
"${openstack_networking_secgroup_v2.worker.name}",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
dynamic "scheduler_hints" {
|
||||||
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||||
|
content {
|
||||||
|
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
|
@ -568,6 +677,13 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip_custom_volume_
|
||||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}",
|
||||||
"${openstack_networking_secgroup_v2.worker.name}",
|
"${openstack_networking_secgroup_v2.worker.name}",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
dynamic "scheduler_hints" {
|
||||||
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||||
|
content {
|
||||||
|
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
|
@ -658,6 +774,13 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
||||||
|
|
||||||
|
dynamic "scheduler_hints" {
|
||||||
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||||
|
content {
|
||||||
|
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user_gfs}"
|
ssh_user = "${var.ssh_user_gfs}"
|
||||||
|
@ -689,6 +812,13 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip_custom_v
|
||||||
}
|
}
|
||||||
|
|
||||||
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
security_groups = ["${openstack_networking_secgroup_v2.k8s.name}"]
|
||||||
|
|
||||||
|
dynamic "scheduler_hints" {
|
||||||
|
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||||
|
content {
|
||||||
|
group = "${openstack_compute_servergroup_v2.k8s_node[0].id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user_gfs}"
|
ssh_user = "${var.ssh_user_gfs}"
|
||||||
|
|
|
@ -106,4 +106,8 @@ variable "worker_allowed_ports" {
|
||||||
type = "list"
|
type = "list"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "use_access_ip" {}
|
variable "use_access_ip" {}
|
||||||
|
|
||||||
|
variable "use_server_groups" {
|
||||||
|
type = bool
|
||||||
|
}
|
|
@ -210,3 +210,7 @@ variable "worker_allowed_ports" {
|
||||||
variable "use_access_ip" {
|
variable "use_access_ip" {
|
||||||
default = 1
|
default = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "use_server_groups" {
|
||||||
|
default = false
|
||||||
|
}
|
Loading…
Reference in a new issue