* Add terraform scripts for vSphere

* Fixup: Add terraform scripts for vSphere

* Add inventory generation

* Use machines var to provide IPs

* Add README file

* Add default.tfvars file

* Fix newlines at the end of files

* Remove master.count and worker.count variables

* Fixup cloud-init formatting

* Fixes after initial review

* Add warning about disabled DHCP

* Fixes after second review

* Add sample-inventory
This commit is contained in:
Jakub Krzywda 2021-02-26 13:20:15 +01:00 committed by GitHub
parent 100d9333ca
commit 0a0156c946
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
15 changed files with 670 additions and 0 deletions

View file

@ -100,6 +100,12 @@ tf-0.13.x-validate-exoscale:
variables: variables:
TF_VERSION: 0.13.5 TF_VERSION: 0.13.5
PROVIDER: exoscale PROVIDER: exoscale
tf-0.13.x-validate-vsphere:
extends: .terraform_validate
variables:
TF_VERSION: 0.13.5
PROVIDER: vsphere
CLUSTER: $CI_COMMIT_REF_NAME CLUSTER: $CI_COMMIT_REF_NAME
tf-0.14.x-validate-openstack: tf-0.14.x-validate-openstack:
@ -128,6 +134,12 @@ tf-0.14.x-validate-exoscale:
variables: variables:
TF_VERSION: 0.14.3 TF_VERSION: 0.14.3
PROVIDER: exoscale PROVIDER: exoscale
tf-0.14.x-validate-vsphere:
extends: .terraform_validate
variables:
TF_VERSION: 0.14.3
PROVIDER: vsphere
CLUSTER: $CI_COMMIT_REF_NAME CLUSTER: $CI_COMMIT_REF_NAME
# tf-packet-ubuntu16-default: # tf-packet-ubuntu16-default:

View file

@ -0,0 +1,117 @@
# Kubernetes on Exoscale with Terraform
Provision a Kubernetes cluster on [vSphere](https://www.vmware.com/se/products/vsphere.html) using Terraform and Kubespray.
## Overview
The setup looks like following.
```text
Kubernetes cluster
+-----------------------+
| +--------------+ |
| | +--------------+ |
| | | | |
| | | Master/etcd | |
| | | node(s) | |
| +-+ | |
| +--------------+ |
| ^ |
| | |
| v |
| +--------------+ |
| | +--------------+ |
| | | | |
| | | Worker | |
| | | node(s) | |
| +-+ | |
| +--------------+ |
+-----------------------+
```
## Warning
This setup assumes that the DHCP is disabled in the vSphere cluster and IP addresses have to be provided in the configuration file.
## Requirements
* Terraform 0.13.0 or newer
*0.12 also works if you modify the provider block to include version and remove all `versions.tf` files*
## Quickstart
NOTE: *Assumes you are at the root of the kubespray repo*
Copy the sample inventory for your cluster and copy the default terraform variables.
```bash
CLUSTER=my-vsphere-cluster
cp -r inventory/sample inventory/$CLUSTER
cp contrib/terraform/vsphere/default.tfvars inventory/$CLUSTER/
cd inventory/$CLUSTER
```
Edit `default.tfvars` to match your setup. You MUST set values specific for you network and vSphere cluster.
```bash
# Ensure $EDITOR points to your favorite editor, e.g., vim, emacs, VS Code, etc.
$EDITOR default.tfvars
```
For authentication in your vSphere cluster you can use the environment variables.
```bash
export TF_VAR_vsphere_user=username
export TF_VAR_vsphere_password=password
```
Run Terraform to create the infrastructure.
```bash
terraform init ../../contrib/terraform/vsphere
terraform apply \
-var-file default.tfvars \
-state=tfstate-$CLUSTER.tfstate \
../../contrib/terraform/vsphere
```
You should now have a inventory file named `inventory.ini` that you can use with kubespray.
You can now copy your inventory file and use it with kubespray to set up a cluster.
You can type `terraform output` to find out the IP addresses of the nodes.
It is a good idea to check that you have basic SSH connectivity to the nodes. You can do that by:
```bash
ansible -i inventory.ini -m ping all
```
Example to use this with the default sample inventory:
```bash
ansible-playbook -i inventory.ini ../../cluster.yml -b -v
```
## Variables
### Required
* `machines`: Machines to provision. Key of this object will be used as the name of the machine
* `node_type`: The role of this node *(master|worker)*
* `ip`: The IP address with the netmask (CIDR notation)
* `gateway`: The IP address of the network gateway
* `ssh_public_keys`: List of public SSH keys to install on all machines
* `vsphere_datacenter`: The identifier of vSphere data center
* `vsphere_compute_cluster`: The identifier of vSphere compute cluster
* `vsphere_datastore`: The identifier of vSphere data store
* `vsphere_server`: The address of vSphere server
* `vsphere_hostname`: The IP address of vSphere hostname
* `template_name`: The name of a base image (the image has to be uploaded to vSphere beforehand)
### Optional
* `prefix`: Prefix to use for all resources, required to be unique for all clusters in the same project *(Defaults to `default`)*
* `dns_primary`: The IP address of primary DNS server *(Defaults to `8.8.4.4`)*
* `dns_secondary`:The IP address of secondary DNS server *(Defaults to `8.8.8.8`)*
An example variables file can be found `default.tfvars`

View file

@ -0,0 +1,34 @@
prefix = "default"
inventory_file = "inventory.ini"
machines = {
"master-0" : {
"node_type" : "master",
"ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24
},
"worker-0" : {
"node_type" : "worker",
"ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24
},
"worker-1" : {
"node_type" : "worker",
"ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24
}
}
gateway = "i-did-not-read-the-docs" # e.g. 192.168.0.2
ssh_public_keys = [
# Put your public SSH key here
"ssh-rsa I-did-not-read-the-docs",
"ssh-rsa I-did-not-read-the-docs 2",
]
vsphere_datacenter = "i-did-not-read-the-docs"
vsphere_compute_cluster = "i-did-not-read-the-docs" # e.g. Cluster
vsphere_datastore = "i-did-not-read-the-docs" # e.g. ssd-000000
vsphere_server = "i-did-not-read-the-docs" # e.g. vsphere.server.com
vsphere_hostname = "i-did-not-read-the-docs" # e.g. 192.168.0.2
template_name = "i-did-not-read-the-docs" # e.g. ubuntu-bionic-18.04-cloudimg

View file

@ -0,0 +1,116 @@
provider "vsphere" {
# Username and password set through env vars VSPHERE_USER and VSPHERE_PASSWORD
user = var.vsphere_user
password = var.vsphere_password
vsphere_server = var.vsphere_server
# If you have a self-signed cert
allow_unverified_ssl = true
}
data "vsphere_datacenter" "dc" {
name = var.vsphere_datacenter
}
data "vsphere_datastore" "datastore" {
name = var.vsphere_datastore
datacenter_id = data.vsphere_datacenter.dc.id
}
data "vsphere_network" "network" {
name = "VM Network"
datacenter_id = data.vsphere_datacenter.dc.id
}
data "vsphere_host" "host" {
name = var.vsphere_hostname
datacenter_id = data.vsphere_datacenter.dc.id
}
data "vsphere_virtual_machine" "template" {
name = var.template_name
datacenter_id = data.vsphere_datacenter.dc.id
}
data "vsphere_compute_cluster" "compute_cluster" {
name = var.vsphere_compute_cluster
datacenter_id = data.vsphere_datacenter.dc.id
}
resource "vsphere_resource_pool" "pool" {
name = "${var.prefix}-cluster-pool"
parent_resource_pool_id = data.vsphere_host.host.resource_pool_id
}
module "kubernetes" {
source = "./modules/kubernetes-cluster"
prefix = var.prefix
machines = var.machines
## Master ##
master_cores = var.master_cores
master_memory = var.master_memory
master_disk_size = var.master_disk_size
## Worker ##
worker_cores = var.worker_cores
worker_memory = var.worker_memory
worker_disk_size = var.worker_disk_size
## Global ##
gateway = var.gateway
dns_primary = var.dns_primary
dns_secondary = var.dns_secondary
pool_id = vsphere_resource_pool.pool.id
datastore_id = data.vsphere_datastore.datastore.id
folder = ""
guest_id = data.vsphere_virtual_machine.template.guest_id
scsi_type = data.vsphere_virtual_machine.template.scsi_type
network_id = data.vsphere_network.network.id
adapter_type = data.vsphere_virtual_machine.template.network_interface_types[0]
firmware = var.firmware
hardware_version = var.hardware_version
disk_thin_provisioned = data.vsphere_virtual_machine.template.disks.0.thin_provisioned
template_id = data.vsphere_virtual_machine.template.id
ssh_public_keys = var.ssh_public_keys
}
#
# Generate ansible inventory
#
data "template_file" "inventory" {
template = file("${path.module}/templates/inventory.tpl")
vars = {
connection_strings_master = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s etcd_member_name=etcd%d",
keys(module.kubernetes.master_ip),
values(module.kubernetes.master_ip),
range(1, length(module.kubernetes.master_ip) + 1)))
connection_strings_worker = join("\n", formatlist("%s ansible_user=ubuntu ansible_host=%s",
keys(module.kubernetes.worker_ip),
values(module.kubernetes.worker_ip)))
list_master = join("\n", formatlist("%s",
keys(module.kubernetes.master_ip)))
list_worker = join("\n", formatlist("%s",
keys(module.kubernetes.worker_ip)))
}
}
resource "null_resource" "inventories" {
provisioner "local-exec" {
command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}"
}
triggers = {
template = data.template_file.inventory.rendered
}
}

View file

@ -0,0 +1,109 @@
resource "vsphere_virtual_machine" "worker" {
for_each = {
for name, machine in var.machines :
name => machine
if machine.node_type == "worker"
}
name = each.key
resource_pool_id = var.pool_id
datastore_id = var.datastore_id
num_cpus = var.worker_cores
memory = var.worker_memory
memory_reservation = var.worker_memory
guest_id = var.guest_id
enable_disk_uuid = "true"
scsi_type = var.scsi_type
folder = var.folder
firmware = var.firmware
hardware_version = var.hardware_version
wait_for_guest_net_routable = false
network_interface {
network_id = var.network_id
adapter_type = var.adapter_type
}
disk {
label = "disk0"
size = var.worker_disk_size
thin_provisioned = var.disk_thin_provisioned
}
lifecycle {
ignore_changes = [disk]
}
clone {
template_uuid = var.template_id
}
cdrom {
client_device = true
}
vapp {
properties = {
"user-data" = base64encode(templatefile("${path.module}/templates/cloud-init.tmpl", { ip = each.value.ip,
gw = var.gateway,
dns = var.dns_primary,
ssh_public_keys = var.ssh_public_keys}))
}
}
}
resource "vsphere_virtual_machine" "master" {
for_each = {
for name, machine in var.machines :
name => machine
if machine.node_type == "master"
}
name = each.key
resource_pool_id = var.pool_id
datastore_id = var.datastore_id
num_cpus = var.master_cores
memory = var.master_memory
memory_reservation = var.master_memory
guest_id = var.guest_id
enable_disk_uuid = "true"
scsi_type = var.scsi_type
folder = var.folder
firmware = var.firmware
hardware_version = var.hardware_version
network_interface {
network_id = var.network_id
adapter_type = var.adapter_type
}
disk {
label = "disk0"
size = var.master_disk_size
thin_provisioned = var.disk_thin_provisioned
}
lifecycle {
ignore_changes = [disk]
}
clone {
template_uuid = var.template_id
}
cdrom {
client_device = true
}
vapp {
properties = {
"user-data" = base64encode(templatefile("${path.module}/templates/cloud-init.tmpl", { ip = each.value.ip,
gw = var.gateway,
dns = var.dns_primary,
ssh_public_keys = var.ssh_public_keys}))
}
}
}

View file

@ -0,0 +1,13 @@
output "master_ip" {
value = {
for instance in vsphere_virtual_machine.master :
instance.name => instance.default_ip_address
}
}
output "worker_ip" {
value = {
for instance in vsphere_virtual_machine.worker :
instance.name => instance.default_ip_address
}
}

View file

@ -0,0 +1,35 @@
#cloud-config
ssh_authorized_keys:
%{ for ssh_public_key in ssh_public_keys ~}
- ${ssh_public_key}
%{ endfor ~}
write_files:
- path: /etc/netplan/20-internal-network.yaml
content: |
network:
version: 2
ethernets:
"lo:0":
match:
name: lo
dhcp4: false
addresses:
- 172.17.0.100/32
- path: /etc/netplan/10-user-network.yaml
content: |
network:
version: 2
ethernets:
ens192:
dhcp4: false #true to use dhcp
addresses:
- ${ip}
gateway4: ${gw} # Set gw here
nameservers:
addresses:
- ${dns} # Set DNS ip address here
runcmd:
- netplan apply

View file

@ -0,0 +1,38 @@
## Global ##
variable "prefix" {}
variable "machines" {
description = "Cluster machines"
type = map(object({
node_type = string
ip = string
}))
}
variable "gateway" {}
variable "dns_primary" {}
variable "dns_secondary" {}
variable "pool_id" {}
variable "datastore_id" {}
variable "guest_id" {}
variable "scsi_type" {}
variable "network_id" {}
variable "adapter_type" {}
variable "disk_thin_provisioned" {}
variable "template_id" {}
variable "firmware" {}
variable "folder" {}
variable "ssh_public_keys" {
type = list(string)
}
variable "hardware_version" {}
## Master ##
variable "master_cores" {}
variable "master_memory" {}
variable "master_disk_size" {}
## Worker ##
variable "worker_cores" {}
variable "worker_memory" {}
variable "worker_disk_size" {}

View file

@ -0,0 +1,9 @@
terraform {
required_providers {
vsphere = {
source = "hashicorp/vsphere"
version = ">= 1.24.3"
}
}
required_version = ">= 0.13"
}

View file

@ -0,0 +1,31 @@
output "master_ip_addresses" {
value = module.kubernetes.master_ip
}
output "worker_ip_addresses" {
value = module.kubernetes.worker_ip
}
output "vsphere_datacenter" {
value = var.vsphere_datacenter
}
output "vsphere_server" {
value = var.vsphere_server
}
output "vsphere_datastore" {
value = var.vsphere_datastore
}
output "vsphere_network" {
value = var.network
}
output "vsphere_folder" {
value = terraform.workspace
}
output "vsphere_pool" {
value = "${terraform.workspace}-cluster-pool"
}

View file

@ -0,0 +1,34 @@
prefix = "default"
inventory_file = "inventory.ini"
machines = {
"master-0" : {
"node_type" : "master",
"ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24
},
"worker-0" : {
"node_type" : "worker",
"ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24
},
"worker-1" : {
"node_type" : "worker",
"ip" : "i-did-not-read-the-docs" # e.g. 192.168.0.2/24
}
}
gateway = "i-did-not-read-the-docs" # e.g. 192.168.0.2
ssh_public_keys = [
# Put your public SSH key here
"ssh-rsa I-did-not-read-the-docs",
"ssh-rsa I-did-not-read-the-docs 2",
]
vsphere_datacenter = "i-did-not-read-the-docs"
vsphere_compute_cluster = "i-did-not-read-the-docs" # e.g. Cluster
vsphere_datastore = "i-did-not-read-the-docs" # e.g. ssd-000000
vsphere_server = "i-did-not-read-the-docs" # e.g. vsphere.server.com
vsphere_hostname = "i-did-not-read-the-docs" # e.g. 192.168.0.2
template_name = "i-did-not-read-the-docs" # e.g. ubuntu-bionic-18.04-cloudimg

View file

@ -0,0 +1 @@
../../../../inventory/sample/group_vars

View file

@ -0,0 +1,17 @@
[all]
${connection_strings_master}
${connection_strings_worker}
[kube-master]
${list_master}
[etcd]
${list_master}
[kube-node]
${list_worker}
[k8s-cluster:children]
kube-master
kube-node

View file

@ -0,0 +1,89 @@
## Global ##
variable "prefix" {
default = ""
}
variable "machines" {
description = "Cluster machines"
type = map(object({
node_type = string
ip = string
}))
}
variable "inventory_file" {
default = "inventory.ini"
}
variable "network" {
default = "VM Network"
}
variable "gateway" {}
variable "dns_primary" {
default = "8.8.4.4"
}
variable "dns_secondary" {
default = "8.8.8.8"
}
variable "vsphere_datacenter" {}
variable "vsphere_compute_cluster" {}
variable "vsphere_datastore" {}
variable "vsphere_user" {}
variable "vsphere_password" {}
variable "vsphere_server" {}
variable "vsphere_hostname" {}
variable "firmware" {
default = "bios"
}
variable "hardware_version" {
default = "15"
}
variable "template_name" {
default = "ubuntu-focal-20.04-cloudimg"
}
variable "ssh_public_keys" {
description = "List of public SSH keys which are injected into the VMs."
type = list(string)
}
## Master ##
variable "master_cores" {
default = 4
}
variable "master_memory" {
default = 4096
}
variable "master_disk_size" {
default = "20"
}
## Worker ##
variable "worker_cores" {
default = 16
}
variable "worker_memory" {
default = 8192
}
variable "worker_disk_size" {
default = "100"
}

View file

@ -0,0 +1,15 @@
terraform {
required_providers {
vsphere = {
source = "hashicorp/vsphere"
version = ">= 1.24.3"
}
null = {
source = "hashicorp/null"
}
template = {
source = "hashicorp/template"
}
}
required_version = ">= 0.13"
}