contrib/terraform/exoscale: Rework SSH public keys (#7242)
* contrib/terraform/exoscale: Rework SSH public keys Exoscale has a few limitations with `exoscale_ssh_keypair` resources. Creating several clusters with these scripts may lead to an error like: ``` Error: API error ParamError 431 (InvalidParameterValueException 4350): The key pair "lj-sc-ssh-key" already has this fingerprint ``` This patch reworks handling of SSH public keys. Specifically, we rely on the more cloud-agnostic way of configuring SSH public keys via `cloud-init`. * contrib/terraform/exoscale: terraform fmt * contrib/terraform/exoscale: Add terraform validate * contrib/terraform/exoscale: Inline public SSH keys The Terraform scripts need to install some SSH key, so that Kubespray (i.e., the "Ansible part") can take over. Initially, we pointed the Terraform scripts to `~/.ssh/id_rsa.pub`. This proved to be suboptimal: Operators sharing responbility for a cluster risk unnecessarily replacing resources. Therefore, it has been determined that it's best to inline the public SSH keys. The chosen variable `ssh_public_keys` provides some uniformity with `contrib/azurerm`. * Fix Terraform Exoscale test * Fix Terraform 0.14 test
This commit is contained in:
parent
88bee6c68e
commit
b77460ec34
11 changed files with 161 additions and 75 deletions
|
@ -95,6 +95,13 @@ tf-0.13.x-validate-aws:
|
|||
PROVIDER: aws
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.13.x-validate-exoscale:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.13.5
|
||||
PROVIDER: exoscale
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-openstack:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
|
@ -116,6 +123,13 @@ tf-0.14.x-validate-aws:
|
|||
PROVIDER: aws
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
tf-0.14.x-validate-exoscale:
|
||||
extends: .terraform_validate
|
||||
variables:
|
||||
TF_VERSION: 0.14.3
|
||||
PROVIDER: exoscale
|
||||
CLUSTER: $CI_COMMIT_REF_NAME
|
||||
|
||||
# tf-packet-ubuntu16-default:
|
||||
# extends: .terraform_apply
|
||||
# variables:
|
||||
|
|
|
@ -48,7 +48,7 @@ cp contrib/terraform/exoscale/default.tfvars inventory/$CLUSTER/
|
|||
cd inventory/$CLUSTER
|
||||
```
|
||||
|
||||
Edit `default.tfvars` to match your setup
|
||||
Edit `default.tfvars` to match your setup. You MUST, at the very least, change `ssh_public_keys`.
|
||||
|
||||
```bash
|
||||
# Ensure $EDITOR points to your favorite editor, e.g., vim, emacs, VS Code, etc.
|
||||
|
@ -122,7 +122,7 @@ terraform destroy -var-file default.tfvars ../../contrib/terraform/exoscale
|
|||
|
||||
### Required
|
||||
|
||||
* `ssh_pub_key`: Path to public ssh key to use for all machines
|
||||
* `ssh_public_keys`: List of public SSH keys to install on all machines
|
||||
* `zone`: The zone where to run the cluster
|
||||
* `machines`: Machines to provision. Key of this object will be used as the name of the machine
|
||||
* `node_type`: The role of this node *(master|worker)*
|
||||
|
|
|
@ -3,49 +3,53 @@ zone = "ch-gva-2"
|
|||
|
||||
inventory_file = "inventory.ini"
|
||||
|
||||
ssh_pub_key = "~/.ssh/id_rsa.pub"
|
||||
ssh_public_keys = [
|
||||
# Put your public SSH key here
|
||||
"ssh-rsa I-did-not-read-the-docs",
|
||||
"ssh-rsa I-did-not-read-the-docs 2",
|
||||
]
|
||||
|
||||
machines = {
|
||||
"master-0": {
|
||||
"node_type": "master",
|
||||
"size": "Small",
|
||||
"boot_disk": {
|
||||
"image_name": "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size": 50,
|
||||
"node_local_partition_size": 0,
|
||||
"ceph_partition_size": 0
|
||||
}
|
||||
},
|
||||
"worker-0": {
|
||||
"node_type": "worker",
|
||||
"size": "Large",
|
||||
"boot_disk": {
|
||||
"image_name": "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size": 50,
|
||||
"node_local_partition_size": 0,
|
||||
"ceph_partition_size": 0
|
||||
}
|
||||
},
|
||||
"worker-1": {
|
||||
"node_type": "worker",
|
||||
"size": "Large",
|
||||
"boot_disk": {
|
||||
"image_name": "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size": 50,
|
||||
"node_local_partition_size": 0,
|
||||
"ceph_partition_size": 0
|
||||
}
|
||||
},
|
||||
"worker-2": {
|
||||
"node_type": "worker",
|
||||
"size": "Large",
|
||||
"boot_disk": {
|
||||
"image_name": "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size": 50,
|
||||
"node_local_partition_size": 0,
|
||||
"ceph_partition_size": 0
|
||||
}
|
||||
}
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
"size" : "Small",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
},
|
||||
"worker-2" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nodeport_whitelist = [
|
||||
|
|
|
@ -7,7 +7,7 @@ module "kubernetes" {
|
|||
|
||||
machines = var.machines
|
||||
|
||||
ssh_pub_key = var.ssh_pub_key
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
|
||||
ssh_whitelist = var.ssh_whitelist
|
||||
api_server_whitelist = var.api_server_whitelist
|
||||
|
@ -23,18 +23,18 @@ data "template_file" "inventory" {
|
|||
|
||||
vars = {
|
||||
connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s etcd_member_name=etcd%d",
|
||||
keys(module.kubernetes.master_ip_addresses),
|
||||
values(module.kubernetes.master_ip_addresses).*.public_ip,
|
||||
values(module.kubernetes.master_ip_addresses).*.private_ip,
|
||||
range(1, length(module.kubernetes.master_ip_addresses) + 1)))
|
||||
keys(module.kubernetes.master_ip_addresses),
|
||||
values(module.kubernetes.master_ip_addresses).*.public_ip,
|
||||
values(module.kubernetes.master_ip_addresses).*.private_ip,
|
||||
range(1, length(module.kubernetes.master_ip_addresses) + 1)))
|
||||
connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s",
|
||||
keys(module.kubernetes.worker_ip_addresses),
|
||||
values(module.kubernetes.worker_ip_addresses).*.public_ip,
|
||||
values(module.kubernetes.worker_ip_addresses).*.private_ip))
|
||||
keys(module.kubernetes.worker_ip_addresses),
|
||||
values(module.kubernetes.worker_ip_addresses).*.public_ip,
|
||||
values(module.kubernetes.worker_ip_addresses).*.private_ip))
|
||||
|
||||
list_master = join("\n", keys(module.kubernetes.master_ip_addresses))
|
||||
list_worker = join("\n", keys(module.kubernetes.worker_ip_addresses))
|
||||
api_lb_ip_address = module.kubernetes.control_plane_lb_ip_address
|
||||
list_master = join("\n", keys(module.kubernetes.master_ip_addresses))
|
||||
list_worker = join("\n", keys(module.kubernetes.worker_ip_addresses))
|
||||
api_lb_ip_address = module.kubernetes.control_plane_lb_ip_address
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -45,7 +45,6 @@ resource "exoscale_compute" "master" {
|
|||
template_id = data.exoscale_compute_template.os_image[each.key].id
|
||||
size = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
key_pair = exoscale_ssh_keypair.ssh_key.name
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_groups = [exoscale_security_group.master_sg.name]
|
||||
|
@ -58,6 +57,7 @@ resource "exoscale_compute" "master" {
|
|||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||
root_partition_size = each.value.boot_disk.root_partition_size
|
||||
node_type = "master"
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
}
|
||||
)
|
||||
}
|
||||
|
@ -73,7 +73,6 @@ resource "exoscale_compute" "worker" {
|
|||
template_id = data.exoscale_compute_template.os_image[each.key].id
|
||||
size = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
key_pair = exoscale_ssh_keypair.ssh_key.name
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_groups = [exoscale_security_group.worker_sg.name]
|
||||
|
@ -86,6 +85,7 @@ resource "exoscale_compute" "worker" {
|
|||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||
root_partition_size = each.value.boot_disk.root_partition_size
|
||||
node_type = "worker"
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
}
|
||||
)
|
||||
}
|
||||
|
@ -191,8 +191,3 @@ resource "exoscale_secondary_ipaddress" "control_plane_lb" {
|
|||
compute_id = each.value.id
|
||||
ip_address = exoscale_ipaddress.control_plane_lb.ip_address
|
||||
}
|
||||
|
||||
resource "exoscale_ssh_keypair" "ssh_key" {
|
||||
name = "${var.prefix}-ssh-key"
|
||||
public_key = trimspace(file(pathexpand(var.ssh_pub_key)))
|
||||
}
|
||||
|
|
|
@ -13,6 +13,11 @@ bootcmd:
|
|||
%{ endif }
|
||||
%{ endif }
|
||||
|
||||
ssh_authorized_keys:
|
||||
%{ for ssh_public_key in ssh_public_keys ~}
|
||||
- ${ssh_public_key}
|
||||
%{ endfor ~}
|
||||
|
||||
write_files:
|
||||
- path: /etc/netplan/eth1.yaml
|
||||
content: |
|
||||
|
|
|
@ -21,7 +21,9 @@ variable "machines" {
|
|||
}))
|
||||
}
|
||||
|
||||
variable "ssh_pub_key" {}
|
||||
variable "ssh_public_keys" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ssh_whitelist" {
|
||||
type = list(string)
|
||||
|
|
65
contrib/terraform/exoscale/sample-inventory/cluster.tfvars
Normal file
65
contrib/terraform/exoscale/sample-inventory/cluster.tfvars
Normal file
|
@ -0,0 +1,65 @@
|
|||
prefix = "default"
|
||||
zone = "ch-gva-2"
|
||||
|
||||
inventory_file = "inventory.ini"
|
||||
|
||||
ssh_public_keys = [
|
||||
# Put your public SSH key here
|
||||
"ssh-rsa I-did-not-read-the-docs",
|
||||
"ssh-rsa I-did-not-read-the-docs 2",
|
||||
]
|
||||
|
||||
machines = {
|
||||
"master-0" : {
|
||||
"node_type" : "master",
|
||||
"size" : "Small",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
},
|
||||
"worker-0" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
},
|
||||
"worker-1" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
},
|
||||
"worker-2" : {
|
||||
"node_type" : "worker",
|
||||
"size" : "Large",
|
||||
"boot_disk" : {
|
||||
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
|
||||
"root_partition_size" : 50,
|
||||
"node_local_partition_size" : 0,
|
||||
"ceph_partition_size" : 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nodeport_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
ssh_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
||||
|
||||
api_server_whitelist = [
|
||||
"0.0.0.0/0"
|
||||
]
|
1
contrib/terraform/exoscale/sample-inventory/group_vars
Symbolic link
1
contrib/terraform/exoscale/sample-inventory/group_vars
Symbolic link
|
@ -0,0 +1 @@
|
|||
../../../../inventory/sample/group_vars
|
|
@ -1,13 +1,13 @@
|
|||
variable zone {
|
||||
variable "zone" {
|
||||
description = "The zone where to run the cluster"
|
||||
}
|
||||
|
||||
variable prefix {
|
||||
variable "prefix" {
|
||||
description = "Prefix for resource names"
|
||||
default = "default"
|
||||
}
|
||||
|
||||
variable machines {
|
||||
variable "machines" {
|
||||
description = "Cluster machines"
|
||||
type = map(object({
|
||||
node_type = string
|
||||
|
@ -21,24 +21,24 @@ variable machines {
|
|||
}))
|
||||
}
|
||||
|
||||
variable ssh_pub_key {
|
||||
description = "Path to public SSH key file which is injected into the VMs."
|
||||
type = string
|
||||
variable "ssh_public_keys" {
|
||||
description = "List of public SSH keys which are injected into the VMs."
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable ssh_whitelist {
|
||||
variable "ssh_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for ssh"
|
||||
type = list(string)
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable api_server_whitelist {
|
||||
variable "api_server_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for kubernetes api server"
|
||||
type = list(string)
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable nodeport_whitelist {
|
||||
variable "nodeport_whitelist" {
|
||||
description = "List of IP ranges (CIDR) to whitelist for kubernetes nodeports"
|
||||
type = list(string)
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "inventory_file" {
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
terraform {
|
||||
required_providers {
|
||||
exoscale = {
|
||||
source = "exoscale/exoscale"
|
||||
source = "exoscale/exoscale"
|
||||
version = ">= 0.21"
|
||||
}
|
||||
null = {
|
||||
|
|
Loading…
Reference in a new issue