Make it possible to disable access_ip (openstack provider) (#5239)

* Add a variable do disable access_ip

* Document the use of use_access_ip
This commit is contained in:
Hugo Blom 2019-10-07 13:09:09 +02:00 committed by Kubernetes Prow Robot
parent 0ba336b04e
commit a8c5a0afdc
7 changed files with 36 additions and 3 deletions

View file

@ -426,7 +426,10 @@ resolvconf_mode: host_resolvconf
``` ```
node_volume_attach_limit: 26 node_volume_attach_limit: 26
``` ```
- Disable access_ip, this will make all innternal cluster traffic to be sent over local network when a floating IP is attached (default this value is set to 1)
```
use_access_ip: 0
```
### Deploy Kubernetes ### Deploy Kubernetes

View file

@ -70,6 +70,7 @@ module "compute" {
supplementary_node_groups = "${var.supplementary_node_groups}" supplementary_node_groups = "${var.supplementary_node_groups}"
worker_allowed_ports = "${var.worker_allowed_ports}" worker_allowed_ports = "${var.worker_allowed_ports}"
wait_for_floatingip = "${var.wait_for_floatingip}" wait_for_floatingip = "${var.wait_for_floatingip}"
use_access_ip = "${var.use_access_ip}"
network_id = "${module.network.router_id}" network_id = "${module.network.router_id}"
} }

View file

@ -114,6 +114,7 @@ resource "openstack_compute_instance_v2" "bastion" {
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "bastion" kubespray_groups = "bastion"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
provisioner "local-exec" { provisioner "local-exec" {
@ -149,6 +150,7 @@ resource "openstack_compute_instance_v2" "bastion_custom_volume_size" {
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "bastion" kubespray_groups = "bastion"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
provisioner "local-exec" { provisioner "local-exec" {
@ -176,6 +178,7 @@ resource "openstack_compute_instance_v2" "k8s_master" {
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault" kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
provisioner "local-exec" { provisioner "local-exec" {
@ -212,8 +215,9 @@ resource "openstack_compute_instance_v2" "k8s_master_custom_volume_size" {
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault" kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
provisioner "local-exec" { provisioner "local-exec" {
command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml" command = "sed s/USER/${var.ssh_user}/ ../../contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element( concat(var.bastion_fips, var.k8s_master_fips), 0)}/ > group_vars/no-floating.yml"
} }
@ -239,6 +243,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault" kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
provisioner "local-exec" { provisioner "local-exec" {
@ -275,6 +280,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd_custom_volume_size"
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault" kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
provisioner "local-exec" { provisioner "local-exec" {
@ -300,6 +306,7 @@ resource "openstack_compute_instance_v2" "etcd" {
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,vault,no-floating" kubespray_groups = "etcd,vault,no-floating"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
} }
@ -330,6 +337,7 @@ resource "openstack_compute_instance_v2" "etcd_custom_volume_size" {
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,vault,no-floating" kubespray_groups = "etcd,vault,no-floating"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
} }
@ -353,6 +361,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating" kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
} }
@ -385,6 +394,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_custom_volum
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating" kubespray_groups = "etcd,kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
} }
@ -408,6 +418,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating" kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
} }
@ -440,6 +451,7 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd_cust
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating" kubespray_groups = "kube-master,${var.supplementary_master_groups},k8s-cluster,vault,no-floating"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
} }
@ -463,6 +475,7 @@ resource "openstack_compute_instance_v2" "k8s_node" {
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}" kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
provisioner "local-exec" { provisioner "local-exec" {
@ -499,6 +512,7 @@ resource "openstack_compute_instance_v2" "k8s_node_custom_volume_size" {
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}" kubespray_groups = "kube-node,k8s-cluster,${var.supplementary_node_groups}"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
provisioner "local-exec" { provisioner "local-exec" {
@ -526,6 +540,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}" kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
} }
@ -558,6 +573,7 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip_custom_volume_
ssh_user = "${var.ssh_user}" ssh_user = "${var.ssh_user}"
kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}" kubespray_groups = "kube-node,k8s-cluster,no-floating,${var.supplementary_node_groups}"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
} }
@ -647,6 +663,7 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
ssh_user = "${var.ssh_user_gfs}" ssh_user = "${var.ssh_user_gfs}"
kubespray_groups = "gfs-cluster,network-storage,no-floating" kubespray_groups = "gfs-cluster,network-storage,no-floating"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
} }
@ -677,6 +694,7 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip_custom_v
ssh_user = "${var.ssh_user_gfs}" ssh_user = "${var.ssh_user_gfs}"
kubespray_groups = "gfs-cluster,network-storage,no-floating" kubespray_groups = "gfs-cluster,network-storage,no-floating"
depends_on = "${var.network_id}" depends_on = "${var.network_id}"
use_access_ip = "${var.use_access_ip}"
} }
} }

View file

@ -105,3 +105,5 @@ variable "supplementary_node_groups" {
variable "worker_allowed_ports" { variable "worker_allowed_ports" {
type = "list" type = "list"
} }
variable "use_access_ip" {}

View file

@ -14,4 +14,4 @@ variable "network_name" {}
variable "router_id" { variable "router_id" {
default = "" default = ""
} }

View file

@ -206,3 +206,7 @@ variable "worker_allowed_ports" {
}, },
] ]
} }
variable "use_access_ip" {
default = 1
}

View file

@ -339,14 +339,19 @@ def iter_host_ips(hosts, ips):
'''Update hosts that have an entry in the floating IP list''' '''Update hosts that have an entry in the floating IP list'''
for host in hosts: for host in hosts:
host_id = host[1]['id'] host_id = host[1]['id']
use_access_ip = host[1]['metadata']['use_access_ip']
if host_id in ips: if host_id in ips:
ip = ips[host_id] ip = ips[host_id]
host[1].update({ host[1].update({
'access_ip_v4': ip, 'access_ip_v4': ip,
'access_ip': ip, 'access_ip': ip,
'public_ipv4': ip, 'public_ipv4': ip,
'ansible_ssh_host': ip, 'ansible_ssh_host': ip,
}) })
if use_access_ip == "0":
host[1].pop('access_ip')
yield host yield host