contrib/terraform/exoscale: Rework SSH public keys (#7242)

* contrib/terraform/exoscale: Rework SSH public keys

Exoscale has a few limitations with `exoscale_ssh_keypair` resources.
Creating several clusters with these scripts may lead to an error like:

```
Error: API error ParamError 431 (InvalidParameterValueException 4350): The key pair "lj-sc-ssh-key" already has this fingerprint
```

This patch reworks handling of SSH public keys. Specifically, we rely on
the more cloud-agnostic way of configuring SSH public keys via
`cloud-init`.

* contrib/terraform/exoscale: terraform fmt

* contrib/terraform/exoscale: Add terraform validate

* contrib/terraform/exoscale: Inline public SSH keys

The Terraform scripts need to install some SSH key, so that Kubespray
(i.e., the "Ansible part") can take over. Initially, we pointed the
Terraform scripts to `~/.ssh/id_rsa.pub`. This proved to be suboptimal:
Operators sharing responbility for a cluster risk unnecessarily replacing resources.

Therefore, it has been determined that it's best to inline the public
SSH keys. The chosen variable `ssh_public_keys` provides some uniformity
with `contrib/azurerm`.

* Fix Terraform Exoscale test

* Fix Terraform 0.14 test
This commit is contained in:
Cristian Klein 2021-02-03 16:32:28 +01:00 committed by GitHub
parent 88bee6c68e
commit b77460ec34
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 161 additions and 75 deletions

View file

@ -95,6 +95,13 @@ tf-0.13.x-validate-aws:
PROVIDER: aws PROVIDER: aws
CLUSTER: $CI_COMMIT_REF_NAME CLUSTER: $CI_COMMIT_REF_NAME
tf-0.13.x-validate-exoscale:
extends: .terraform_validate
variables:
TF_VERSION: 0.13.5
PROVIDER: exoscale
CLUSTER: $CI_COMMIT_REF_NAME
tf-0.14.x-validate-openstack: tf-0.14.x-validate-openstack:
extends: .terraform_validate extends: .terraform_validate
variables: variables:
@ -116,6 +123,13 @@ tf-0.14.x-validate-aws:
PROVIDER: aws PROVIDER: aws
CLUSTER: $CI_COMMIT_REF_NAME CLUSTER: $CI_COMMIT_REF_NAME
tf-0.14.x-validate-exoscale:
extends: .terraform_validate
variables:
TF_VERSION: 0.14.3
PROVIDER: exoscale
CLUSTER: $CI_COMMIT_REF_NAME
# tf-packet-ubuntu16-default: # tf-packet-ubuntu16-default:
# extends: .terraform_apply # extends: .terraform_apply
# variables: # variables:

View file

@ -48,7 +48,7 @@ cp contrib/terraform/exoscale/default.tfvars inventory/$CLUSTER/
cd inventory/$CLUSTER cd inventory/$CLUSTER
``` ```
Edit `default.tfvars` to match your setup Edit `default.tfvars` to match your setup. You MUST, at the very least, change `ssh_public_keys`.
```bash ```bash
# Ensure $EDITOR points to your favorite editor, e.g., vim, emacs, VS Code, etc. # Ensure $EDITOR points to your favorite editor, e.g., vim, emacs, VS Code, etc.
@ -122,7 +122,7 @@ terraform destroy -var-file default.tfvars ../../contrib/terraform/exoscale
### Required ### Required
* `ssh_pub_key`: Path to public ssh key to use for all machines * `ssh_public_keys`: List of public SSH keys to install on all machines
* `zone`: The zone where to run the cluster * `zone`: The zone where to run the cluster
* `machines`: Machines to provision. Key of this object will be used as the name of the machine * `machines`: Machines to provision. Key of this object will be used as the name of the machine
* `node_type`: The role of this node *(master|worker)* * `node_type`: The role of this node *(master|worker)*

View file

@ -3,49 +3,53 @@ zone = "ch-gva-2"
inventory_file = "inventory.ini" inventory_file = "inventory.ini"
ssh_pub_key = "~/.ssh/id_rsa.pub" ssh_public_keys = [
# Put your public SSH key here
"ssh-rsa I-did-not-read-the-docs",
"ssh-rsa I-did-not-read-the-docs 2",
]
machines = { machines = {
"master-0": { "master-0" : {
"node_type": "master", "node_type" : "master",
"size": "Small", "size" : "Small",
"boot_disk": { "boot_disk" : {
"image_name": "Linux Ubuntu 20.04 LTS 64-bit", "image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size": 50, "root_partition_size" : 50,
"node_local_partition_size": 0, "node_local_partition_size" : 0,
"ceph_partition_size": 0 "ceph_partition_size" : 0
} }
}, },
"worker-0": { "worker-0" : {
"node_type": "worker", "node_type" : "worker",
"size": "Large", "size" : "Large",
"boot_disk": { "boot_disk" : {
"image_name": "Linux Ubuntu 20.04 LTS 64-bit", "image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size": 50, "root_partition_size" : 50,
"node_local_partition_size": 0, "node_local_partition_size" : 0,
"ceph_partition_size": 0 "ceph_partition_size" : 0
} }
}, },
"worker-1": { "worker-1" : {
"node_type": "worker", "node_type" : "worker",
"size": "Large", "size" : "Large",
"boot_disk": { "boot_disk" : {
"image_name": "Linux Ubuntu 20.04 LTS 64-bit", "image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size": 50, "root_partition_size" : 50,
"node_local_partition_size": 0, "node_local_partition_size" : 0,
"ceph_partition_size": 0 "ceph_partition_size" : 0
} }
}, },
"worker-2": { "worker-2" : {
"node_type": "worker", "node_type" : "worker",
"size": "Large", "size" : "Large",
"boot_disk": { "boot_disk" : {
"image_name": "Linux Ubuntu 20.04 LTS 64-bit", "image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size": 50, "root_partition_size" : 50,
"node_local_partition_size": 0, "node_local_partition_size" : 0,
"ceph_partition_size": 0 "ceph_partition_size" : 0
} }
} }
} }
nodeport_whitelist = [ nodeport_whitelist = [

View file

@ -7,7 +7,7 @@ module "kubernetes" {
machines = var.machines machines = var.machines
ssh_pub_key = var.ssh_pub_key ssh_public_keys = var.ssh_public_keys
ssh_whitelist = var.ssh_whitelist ssh_whitelist = var.ssh_whitelist
api_server_whitelist = var.api_server_whitelist api_server_whitelist = var.api_server_whitelist
@ -23,18 +23,18 @@ data "template_file" "inventory" {
vars = { vars = {
connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s etcd_member_name=etcd%d", connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s etcd_member_name=etcd%d",
keys(module.kubernetes.master_ip_addresses), keys(module.kubernetes.master_ip_addresses),
values(module.kubernetes.master_ip_addresses).*.public_ip, values(module.kubernetes.master_ip_addresses).*.public_ip,
values(module.kubernetes.master_ip_addresses).*.private_ip, values(module.kubernetes.master_ip_addresses).*.private_ip,
range(1, length(module.kubernetes.master_ip_addresses) + 1))) range(1, length(module.kubernetes.master_ip_addresses) + 1)))
connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s", connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s ip=%s",
keys(module.kubernetes.worker_ip_addresses), keys(module.kubernetes.worker_ip_addresses),
values(module.kubernetes.worker_ip_addresses).*.public_ip, values(module.kubernetes.worker_ip_addresses).*.public_ip,
values(module.kubernetes.worker_ip_addresses).*.private_ip)) values(module.kubernetes.worker_ip_addresses).*.private_ip))
list_master = join("\n", keys(module.kubernetes.master_ip_addresses)) list_master = join("\n", keys(module.kubernetes.master_ip_addresses))
list_worker = join("\n", keys(module.kubernetes.worker_ip_addresses)) list_worker = join("\n", keys(module.kubernetes.worker_ip_addresses))
api_lb_ip_address = module.kubernetes.control_plane_lb_ip_address api_lb_ip_address = module.kubernetes.control_plane_lb_ip_address
} }
} }

View file

@ -45,7 +45,6 @@ resource "exoscale_compute" "master" {
template_id = data.exoscale_compute_template.os_image[each.key].id template_id = data.exoscale_compute_template.os_image[each.key].id
size = each.value.size size = each.value.size
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
key_pair = exoscale_ssh_keypair.ssh_key.name
state = "Running" state = "Running"
zone = var.zone zone = var.zone
security_groups = [exoscale_security_group.master_sg.name] security_groups = [exoscale_security_group.master_sg.name]
@ -58,6 +57,7 @@ resource "exoscale_compute" "master" {
ceph_partition_size = each.value.boot_disk.ceph_partition_size ceph_partition_size = each.value.boot_disk.ceph_partition_size
root_partition_size = each.value.boot_disk.root_partition_size root_partition_size = each.value.boot_disk.root_partition_size
node_type = "master" node_type = "master"
ssh_public_keys = var.ssh_public_keys
} }
) )
} }
@ -73,7 +73,6 @@ resource "exoscale_compute" "worker" {
template_id = data.exoscale_compute_template.os_image[each.key].id template_id = data.exoscale_compute_template.os_image[each.key].id
size = each.value.size size = each.value.size
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
key_pair = exoscale_ssh_keypair.ssh_key.name
state = "Running" state = "Running"
zone = var.zone zone = var.zone
security_groups = [exoscale_security_group.worker_sg.name] security_groups = [exoscale_security_group.worker_sg.name]
@ -86,6 +85,7 @@ resource "exoscale_compute" "worker" {
ceph_partition_size = each.value.boot_disk.ceph_partition_size ceph_partition_size = each.value.boot_disk.ceph_partition_size
root_partition_size = each.value.boot_disk.root_partition_size root_partition_size = each.value.boot_disk.root_partition_size
node_type = "worker" node_type = "worker"
ssh_public_keys = var.ssh_public_keys
} }
) )
} }
@ -191,8 +191,3 @@ resource "exoscale_secondary_ipaddress" "control_plane_lb" {
compute_id = each.value.id compute_id = each.value.id
ip_address = exoscale_ipaddress.control_plane_lb.ip_address ip_address = exoscale_ipaddress.control_plane_lb.ip_address
} }
resource "exoscale_ssh_keypair" "ssh_key" {
name = "${var.prefix}-ssh-key"
public_key = trimspace(file(pathexpand(var.ssh_pub_key)))
}

View file

@ -13,6 +13,11 @@ bootcmd:
%{ endif } %{ endif }
%{ endif } %{ endif }
ssh_authorized_keys:
%{ for ssh_public_key in ssh_public_keys ~}
- ${ssh_public_key}
%{ endfor ~}
write_files: write_files:
- path: /etc/netplan/eth1.yaml - path: /etc/netplan/eth1.yaml
content: | content: |

View file

@ -21,7 +21,9 @@ variable "machines" {
})) }))
} }
variable "ssh_pub_key" {} variable "ssh_public_keys" {
type = list(string)
}
variable "ssh_whitelist" { variable "ssh_whitelist" {
type = list(string) type = list(string)

View file

@ -0,0 +1,65 @@
prefix = "default"
zone = "ch-gva-2"
inventory_file = "inventory.ini"
ssh_public_keys = [
# Put your public SSH key here
"ssh-rsa I-did-not-read-the-docs",
"ssh-rsa I-did-not-read-the-docs 2",
]
machines = {
"master-0" : {
"node_type" : "master",
"size" : "Small",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
"node_local_partition_size" : 0,
"ceph_partition_size" : 0
}
},
"worker-0" : {
"node_type" : "worker",
"size" : "Large",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
"node_local_partition_size" : 0,
"ceph_partition_size" : 0
}
},
"worker-1" : {
"node_type" : "worker",
"size" : "Large",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
"node_local_partition_size" : 0,
"ceph_partition_size" : 0
}
},
"worker-2" : {
"node_type" : "worker",
"size" : "Large",
"boot_disk" : {
"image_name" : "Linux Ubuntu 20.04 LTS 64-bit",
"root_partition_size" : 50,
"node_local_partition_size" : 0,
"ceph_partition_size" : 0
}
}
}
nodeport_whitelist = [
"0.0.0.0/0"
]
ssh_whitelist = [
"0.0.0.0/0"
]
api_server_whitelist = [
"0.0.0.0/0"
]

View file

@ -0,0 +1 @@
../../../../inventory/sample/group_vars

View file

@ -1,13 +1,13 @@
variable zone { variable "zone" {
description = "The zone where to run the cluster" description = "The zone where to run the cluster"
} }
variable prefix { variable "prefix" {
description = "Prefix for resource names" description = "Prefix for resource names"
default = "default" default = "default"
} }
variable machines { variable "machines" {
description = "Cluster machines" description = "Cluster machines"
type = map(object({ type = map(object({
node_type = string node_type = string
@ -21,24 +21,24 @@ variable machines {
})) }))
} }
variable ssh_pub_key { variable "ssh_public_keys" {
description = "Path to public SSH key file which is injected into the VMs." description = "List of public SSH keys which are injected into the VMs."
type = string type = list(string)
} }
variable ssh_whitelist { variable "ssh_whitelist" {
description = "List of IP ranges (CIDR) to whitelist for ssh" description = "List of IP ranges (CIDR) to whitelist for ssh"
type = list(string) type = list(string)
} }
variable api_server_whitelist { variable "api_server_whitelist" {
description = "List of IP ranges (CIDR) to whitelist for kubernetes api server" description = "List of IP ranges (CIDR) to whitelist for kubernetes api server"
type = list(string) type = list(string)
} }
variable nodeport_whitelist { variable "nodeport_whitelist" {
description = "List of IP ranges (CIDR) to whitelist for kubernetes nodeports" description = "List of IP ranges (CIDR) to whitelist for kubernetes nodeports"
type = list(string) type = list(string)
} }
variable "inventory_file" { variable "inventory_file" {

View file

@ -1,7 +1,7 @@
terraform { terraform {
required_providers { required_providers {
exoscale = { exoscale = {
source = "exoscale/exoscale" source = "exoscale/exoscale"
version = ">= 0.21" version = ">= 0.21"
} }
null = { null = {