[contrib] terraform openstack: allow disabling port security (#8410)

This commit is contained in:
Cristian Calin 2022-01-14 22:58:32 +02:00 committed by GitHub
parent 1a69f8c3ad
commit ea44d64511
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 52 additions and 27 deletions

View file

@ -283,6 +283,7 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
|`node_server_group_policy` | Enable and use openstack nova servergroups for nodes with set policy, default: "" (disabled) | |`node_server_group_policy` | Enable and use openstack nova servergroups for nodes with set policy, default: "" (disabled) |
|`etcd_server_group_policy` | Enable and use openstack nova servergroups for etcd with set policy, default: "" (disabled) | |`etcd_server_group_policy` | Enable and use openstack nova servergroups for etcd with set policy, default: "" (disabled) |
|`use_access_ip` | If 1, nodes with floating IPs will transmit internal cluster traffic via floating IPs; if 0 private IPs will be used instead. Default value is 1. | |`use_access_ip` | If 1, nodes with floating IPs will transmit internal cluster traffic via floating IPs; if 0 private IPs will be used instead. Default value is 1. |
|`port_security_enabled` | Allow to disable port security by setting this to `false`. `true` by default |
|`k8s_nodes` | Map containing worker node definition, see explanation below | |`k8s_nodes` | Map containing worker node definition, see explanation below |
##### k8s_nodes ##### k8s_nodes
@ -411,7 +412,7 @@ plugins. This is accomplished as follows:
```ShellSession ```ShellSession
cd inventory/$CLUSTER cd inventory/$CLUSTER
terraform init ../../contrib/terraform/openstack terraform -chdir="../../contrib/terraform/openstack" init
``` ```
This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules. This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules.
@ -443,7 +444,7 @@ You can apply the Terraform configuration to your cluster with the following com
issued from your cluster's inventory directory (`inventory/$CLUSTER`): issued from your cluster's inventory directory (`inventory/$CLUSTER`):
```ShellSession ```ShellSession
terraform apply -var-file=cluster.tfvars ../../contrib/terraform/openstack terraform -chdir="../../contrib/terraform/openstack" apply -var-file=cluster.tfvars
``` ```
if you chose to create a bastion host, this script will create if you chose to create a bastion host, this script will create
@ -458,7 +459,7 @@ pick it up automatically.
You can destroy your new cluster with the following command issued from the cluster's inventory directory: You can destroy your new cluster with the following command issued from the cluster's inventory directory:
```ShellSession ```ShellSession
terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/openstack terraform -chdir="../../contrib/terraform/openstack" destroy -var-file=cluster.tfvars
``` ```
If you've started the Ansible run, it may also be a good idea to do some manual cleanup: If you've started the Ansible run, it may also be a good idea to do some manual cleanup:

View file

@ -1,14 +1,15 @@
module "network" { module "network" {
source = "./modules/network" source = "./modules/network"
external_net = var.external_net external_net = var.external_net
network_name = var.network_name network_name = var.network_name
subnet_cidr = var.subnet_cidr subnet_cidr = var.subnet_cidr
cluster_name = var.cluster_name cluster_name = var.cluster_name
dns_nameservers = var.dns_nameservers dns_nameservers = var.dns_nameservers
network_dns_domain = var.network_dns_domain network_dns_domain = var.network_dns_domain
use_neutron = var.use_neutron use_neutron = var.use_neutron
router_id = var.router_id port_security_enabled = var.port_security_enabled
router_id = var.router_id
} }
module "ips" { module "ips" {
@ -88,6 +89,7 @@ module "compute" {
extra_sec_groups = var.extra_sec_groups extra_sec_groups = var.extra_sec_groups
extra_sec_groups_name = var.extra_sec_groups_name extra_sec_groups_name = var.extra_sec_groups_name
group_vars_path = var.group_vars_path group_vars_path = var.group_vars_path
port_security_enabled = var.port_security_enabled
network_id = module.network.router_id network_id = module.network.router_id
} }

View file

@ -164,6 +164,15 @@ locals {
openstack_networking_secgroup_v2.worker.name, openstack_networking_secgroup_v2.worker.name,
var.extra_sec_groups ? openstack_networking_secgroup_v2.worker_extra[0].name : "", var.extra_sec_groups ? openstack_networking_secgroup_v2.worker_extra[0].name : "",
]) ])
# bastion groups
bastion_sec_groups = compact(concat([
openstack_networking_secgroup_v2.k8s.name,
openstack_networking_secgroup_v2.bastion[0].name,
]))
# etcd groups
etcd_sec_groups = compact([openstack_networking_secgroup_v2.k8s.name])
# glusterfs groups
gfs_sec_groups = compact([openstack_networking_secgroup_v2.k8s.name])
# Image uuid # Image uuid
image_to_use_node = var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.vm_image[0].id image_to_use_node = var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.vm_image[0].id
@ -197,9 +206,7 @@ resource "openstack_compute_instance_v2" "bastion" {
name = var.network_name name = var.network_name
} }
security_groups = [openstack_networking_secgroup_v2.k8s.name, security_groups = var.port_security_enabled ? local.bastion_sec_groups : null
element(openstack_networking_secgroup_v2.bastion.*.name, count.index),
]
metadata = { metadata = {
ssh_user = var.ssh_user ssh_user = var.ssh_user
@ -240,7 +247,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
name = var.network_name name = var.network_name
} }
security_groups = local.master_sec_groups security_groups = var.port_security_enabled ? local.master_sec_groups : null
dynamic "scheduler_hints" { dynamic "scheduler_hints" {
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
@ -288,7 +295,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
name = var.network_name name = var.network_name
} }
security_groups = local.master_sec_groups security_groups = var.port_security_enabled ? local.master_sec_groups : null
dynamic "scheduler_hints" { dynamic "scheduler_hints" {
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
@ -334,7 +341,7 @@ resource "openstack_compute_instance_v2" "etcd" {
name = var.network_name name = var.network_name
} }
security_groups = [openstack_networking_secgroup_v2.k8s.name] security_groups = var.port_security_enabled ? local.etcd_sec_groups : null
dynamic "scheduler_hints" { dynamic "scheduler_hints" {
for_each = var.etcd_server_group_policy ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : [] for_each = var.etcd_server_group_policy ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : []
@ -376,7 +383,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
name = var.network_name name = var.network_name
} }
security_groups = local.master_sec_groups security_groups = var.port_security_enabled ? local.master_sec_groups : null
dynamic "scheduler_hints" { dynamic "scheduler_hints" {
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
@ -419,7 +426,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
name = var.network_name name = var.network_name
} }
security_groups = local.master_sec_groups security_groups = var.port_security_enabled ? local.master_sec_groups : null
dynamic "scheduler_hints" { dynamic "scheduler_hints" {
for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : [] for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
@ -462,7 +469,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
name = var.network_name name = var.network_name
} }
security_groups = local.worker_sec_groups security_groups = var.port_security_enabled ? local.worker_sec_groups : null
dynamic "scheduler_hints" { dynamic "scheduler_hints" {
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : [] for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
@ -509,7 +516,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
name = var.network_name name = var.network_name
} }
security_groups = local.worker_sec_groups security_groups = var.port_security_enabled ? local.worker_sec_groups : null
dynamic "scheduler_hints" { dynamic "scheduler_hints" {
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : [] for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
@ -552,7 +559,7 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
name = var.network_name name = var.network_name
} }
security_groups = local.worker_sec_groups security_groups = var.port_security_enabled ? local.worker_sec_groups : null
dynamic "scheduler_hints" { dynamic "scheduler_hints" {
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : [] for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
@ -597,7 +604,7 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
name = var.network_name name = var.network_name
} }
security_groups = [openstack_networking_secgroup_v2.k8s.name] security_groups = var.port_security_enabled ? local.gfs_sec_groups : null
dynamic "scheduler_hints" { dynamic "scheduler_hints" {
for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : [] for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []

View file

@ -165,3 +165,7 @@ variable "image_master_uuid" {
variable "group_vars_path" { variable "group_vars_path" {
type = string type = string
} }
variable "port_security_enabled" {
type = bool
}

View file

@ -11,10 +11,11 @@ data "openstack_networking_router_v2" "k8s" {
} }
resource "openstack_networking_network_v2" "k8s" { resource "openstack_networking_network_v2" "k8s" {
name = var.network_name name = var.network_name
count = var.use_neutron count = var.use_neutron
dns_domain = var.network_dns_domain != null ? var.network_dns_domain : null dns_domain = var.network_dns_domain != null ? var.network_dns_domain : null
admin_state_up = "true" admin_state_up = "true"
port_security_enabled = var.port_security_enabled
} }
resource "openstack_networking_subnet_v2" "k8s" { resource "openstack_networking_subnet_v2" "k8s" {

View file

@ -10,6 +10,10 @@ variable "dns_nameservers" {
type = list type = list
} }
variable "port_security_enabled" {
type = bool
}
variable "subnet_cidr" {} variable "subnet_cidr" {}
variable "use_neutron" {} variable "use_neutron" {}

View file

@ -148,6 +148,12 @@ variable "use_neutron" {
default = 1 default = 1
} }
variable "port_security_enabled" {
description = "Enable port security on the internal network"
type = bool
default = "true"
}
variable "subnet_cidr" { variable "subnet_cidr" {
description = "Subnet CIDR block." description = "Subnet CIDR block."
type = string type = string