Add option to set different server group policy for etcd, node, and master server (#8046)
This commit is contained in:
parent
e35a87e3eb
commit
fe0810aff9
5 changed files with 44 additions and 21 deletions
|
@ -278,7 +278,9 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|
||||||
|`gfs_root_volume_size_in_gb` | Size of the root volume for gluster, 0 to use ephemeral storage |
|
|`gfs_root_volume_size_in_gb` | Size of the root volume for gluster, 0 to use ephemeral storage |
|
||||||
|`etcd_root_volume_size_in_gb` | Size of the root volume for etcd nodes, 0 to use ephemeral storage |
|
|`etcd_root_volume_size_in_gb` | Size of the root volume for etcd nodes, 0 to use ephemeral storage |
|
||||||
|`bastion_root_volume_size_in_gb` | Size of the root volume for bastions, 0 to use ephemeral storage |
|
|`bastion_root_volume_size_in_gb` | Size of the root volume for bastions, 0 to use ephemeral storage |
|
||||||
|`use_server_group` | Create and use openstack nova servergroups, default: false |
|
|`master_server_group_policy` | Enable and use openstack nova servergroups for masters with set policy, default: "" (disabled) |
|
||||||
|
|`node_server_group_policy` | Enable and use openstack nova servergroups for nodes with set policy, default: "" (disabled) |
|
||||||
|
|`etcd_server_group_policy` | Enable and use openstack nova servergroups for etcd with set policy, default: "" (disabled) |
|
||||||
|`use_access_ip` | If 1, nodes with floating IPs will transmit internal cluster traffic via floating IPs; if 0 private IPs will be used instead. Default value is 1. |
|
|`use_access_ip` | If 1, nodes with floating IPs will transmit internal cluster traffic via floating IPs; if 0 private IPs will be used instead. Default value is 1. |
|
||||||
|`k8s_nodes` | Map containing worker node definition, see explanation below |
|
|`k8s_nodes` | Map containing worker node definition, see explanation below |
|
||||||
|
|
||||||
|
|
|
@ -81,7 +81,9 @@ module "compute" {
|
||||||
worker_allowed_ports = var.worker_allowed_ports
|
worker_allowed_ports = var.worker_allowed_ports
|
||||||
wait_for_floatingip = var.wait_for_floatingip
|
wait_for_floatingip = var.wait_for_floatingip
|
||||||
use_access_ip = var.use_access_ip
|
use_access_ip = var.use_access_ip
|
||||||
use_server_groups = var.use_server_groups
|
master_server_group_policy = var.master_server_group_policy
|
||||||
|
node_server_group_policy = var.node_server_group_policy
|
||||||
|
etcd_server_group_policy = var.etcd_server_group_policy
|
||||||
extra_sec_groups = var.extra_sec_groups
|
extra_sec_groups = var.extra_sec_groups
|
||||||
extra_sec_groups_name = var.extra_sec_groups_name
|
extra_sec_groups_name = var.extra_sec_groups_name
|
||||||
group_vars_path = var.group_vars_path
|
group_vars_path = var.group_vars_path
|
||||||
|
|
|
@ -130,21 +130,21 @@ resource "openstack_networking_secgroup_rule_v2" "worker" {
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_servergroup_v2" "k8s_master" {
|
resource "openstack_compute_servergroup_v2" "k8s_master" {
|
||||||
count = "%{if var.use_server_groups}1%{else}0%{endif}"
|
count = var.master_server_group_policy != "" ? 1 : 0
|
||||||
name = "k8s-master-srvgrp"
|
name = "k8s-master-srvgrp"
|
||||||
policies = ["anti-affinity"]
|
policies = [var.master_server_group_policy]
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_servergroup_v2" "k8s_node" {
|
resource "openstack_compute_servergroup_v2" "k8s_node" {
|
||||||
count = "%{if var.use_server_groups}1%{else}0%{endif}"
|
count = var.node_server_group_policy != "" ? 1 : 0
|
||||||
name = "k8s-node-srvgrp"
|
name = "k8s-node-srvgrp"
|
||||||
policies = ["anti-affinity"]
|
policies = [var.node_server_group_policy]
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_servergroup_v2" "k8s_etcd" {
|
resource "openstack_compute_servergroup_v2" "k8s_etcd" {
|
||||||
count = "%{if var.use_server_groups}1%{else}0%{endif}"
|
count = var.etcd_server_group_policy != "" ? 1 : 0
|
||||||
name = "k8s-etcd-srvgrp"
|
name = "k8s-etcd-srvgrp"
|
||||||
policies = ["anti-affinity"]
|
policies = [var.etcd_server_group_policy]
|
||||||
}
|
}
|
||||||
|
|
||||||
locals {
|
locals {
|
||||||
|
@ -237,7 +237,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
||||||
security_groups = local.master_sec_groups
|
security_groups = local.master_sec_groups
|
||||||
|
|
||||||
dynamic "scheduler_hints" {
|
dynamic "scheduler_hints" {
|
||||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||||
content {
|
content {
|
||||||
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
||||||
}
|
}
|
||||||
|
@ -284,7 +284,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||||
security_groups = local.master_sec_groups
|
security_groups = local.master_sec_groups
|
||||||
|
|
||||||
dynamic "scheduler_hints" {
|
dynamic "scheduler_hints" {
|
||||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||||
content {
|
content {
|
||||||
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
||||||
}
|
}
|
||||||
|
@ -329,7 +329,7 @@ resource "openstack_compute_instance_v2" "etcd" {
|
||||||
security_groups = [openstack_networking_secgroup_v2.k8s.name]
|
security_groups = [openstack_networking_secgroup_v2.k8s.name]
|
||||||
|
|
||||||
dynamic "scheduler_hints" {
|
dynamic "scheduler_hints" {
|
||||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : []
|
for_each = var.etcd_server_group_policy ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : []
|
||||||
content {
|
content {
|
||||||
group = openstack_compute_servergroup_v2.k8s_etcd[0].id
|
group = openstack_compute_servergroup_v2.k8s_etcd[0].id
|
||||||
}
|
}
|
||||||
|
@ -371,7 +371,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||||
security_groups = local.master_sec_groups
|
security_groups = local.master_sec_groups
|
||||||
|
|
||||||
dynamic "scheduler_hints" {
|
dynamic "scheduler_hints" {
|
||||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||||
content {
|
content {
|
||||||
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
||||||
}
|
}
|
||||||
|
@ -413,7 +413,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||||
security_groups = local.master_sec_groups
|
security_groups = local.master_sec_groups
|
||||||
|
|
||||||
dynamic "scheduler_hints" {
|
dynamic "scheduler_hints" {
|
||||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
|
||||||
content {
|
content {
|
||||||
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
group = openstack_compute_servergroup_v2.k8s_master[0].id
|
||||||
}
|
}
|
||||||
|
@ -454,7 +454,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
||||||
security_groups = local.worker_sec_groups
|
security_groups = local.worker_sec_groups
|
||||||
|
|
||||||
dynamic "scheduler_hints" {
|
dynamic "scheduler_hints" {
|
||||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||||
content {
|
content {
|
||||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
||||||
}
|
}
|
||||||
|
@ -499,7 +499,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||||
security_groups = local.worker_sec_groups
|
security_groups = local.worker_sec_groups
|
||||||
|
|
||||||
dynamic "scheduler_hints" {
|
dynamic "scheduler_hints" {
|
||||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||||
content {
|
content {
|
||||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
||||||
}
|
}
|
||||||
|
@ -540,7 +540,7 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
|
||||||
security_groups = local.worker_sec_groups
|
security_groups = local.worker_sec_groups
|
||||||
|
|
||||||
dynamic "scheduler_hints" {
|
dynamic "scheduler_hints" {
|
||||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||||
content {
|
content {
|
||||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
||||||
}
|
}
|
||||||
|
@ -585,7 +585,7 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||||
security_groups = [openstack_networking_secgroup_v2.k8s.name]
|
security_groups = [openstack_networking_secgroup_v2.k8s.name]
|
||||||
|
|
||||||
dynamic "scheduler_hints" {
|
dynamic "scheduler_hints" {
|
||||||
for_each = var.use_server_groups ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
|
||||||
content {
|
content {
|
||||||
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
group = openstack_compute_servergroup_v2.k8s_node[0].id
|
||||||
}
|
}
|
||||||
|
|
|
@ -124,8 +124,16 @@ variable "worker_allowed_ports" {
|
||||||
|
|
||||||
variable "use_access_ip" {}
|
variable "use_access_ip" {}
|
||||||
|
|
||||||
variable "use_server_groups" {
|
variable "master_server_group_policy" {
|
||||||
type = bool
|
type = string
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "node_server_group_policy" {
|
||||||
|
type = string
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "etcd_server_group_policy" {
|
||||||
|
type = string
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "extra_sec_groups" {
|
variable "extra_sec_groups" {
|
||||||
|
|
|
@ -239,8 +239,19 @@ variable "use_access_ip" {
|
||||||
default = 1
|
default = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "use_server_groups" {
|
variable "master_server_group_policy" {
|
||||||
default = false
|
description = "desired server group policy, e.g. anti-affinity"
|
||||||
|
default = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "node_server_group_policy" {
|
||||||
|
description = "desired server group policy, e.g. anti-affinity"
|
||||||
|
default = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "etcd_server_group_policy" {
|
||||||
|
description = "desired server group policy, e.g. anti-affinity"
|
||||||
|
default = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "router_id" {
|
variable "router_id" {
|
||||||
|
|
Loading…
Reference in a new issue