mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2025-12-14 13:54:37 +03:00
contrib/terraform/exoscale: Rework SSH public keys (#7242)
* contrib/terraform/exoscale: Rework SSH public keys Exoscale has a few limitations with `exoscale_ssh_keypair` resources. Creating several clusters with these scripts may lead to an error like: ``` Error: API error ParamError 431 (InvalidParameterValueException 4350): The key pair "lj-sc-ssh-key" already has this fingerprint ``` This patch reworks handling of SSH public keys. Specifically, we rely on the more cloud-agnostic way of configuring SSH public keys via `cloud-init`. * contrib/terraform/exoscale: terraform fmt * contrib/terraform/exoscale: Add terraform validate * contrib/terraform/exoscale: Inline public SSH keys The Terraform scripts need to install some SSH key, so that Kubespray (i.e., the "Ansible part") can take over. Initially, we pointed the Terraform scripts to `~/.ssh/id_rsa.pub`. This proved to be suboptimal: Operators sharing responbility for a cluster risk unnecessarily replacing resources. Therefore, it has been determined that it's best to inline the public SSH keys. The chosen variable `ssh_public_keys` provides some uniformity with `contrib/azurerm`. * Fix Terraform Exoscale test * Fix Terraform 0.14 test
This commit is contained in:
@@ -45,7 +45,6 @@ resource "exoscale_compute" "master" {
|
||||
template_id = data.exoscale_compute_template.os_image[each.key].id
|
||||
size = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
key_pair = exoscale_ssh_keypair.ssh_key.name
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_groups = [exoscale_security_group.master_sg.name]
|
||||
@@ -58,6 +57,7 @@ resource "exoscale_compute" "master" {
|
||||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||
root_partition_size = each.value.boot_disk.root_partition_size
|
||||
node_type = "master"
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
}
|
||||
)
|
||||
}
|
||||
@@ -73,7 +73,6 @@ resource "exoscale_compute" "worker" {
|
||||
template_id = data.exoscale_compute_template.os_image[each.key].id
|
||||
size = each.value.size
|
||||
disk_size = each.value.boot_disk.root_partition_size + each.value.boot_disk.node_local_partition_size + each.value.boot_disk.ceph_partition_size
|
||||
key_pair = exoscale_ssh_keypair.ssh_key.name
|
||||
state = "Running"
|
||||
zone = var.zone
|
||||
security_groups = [exoscale_security_group.worker_sg.name]
|
||||
@@ -86,6 +85,7 @@ resource "exoscale_compute" "worker" {
|
||||
ceph_partition_size = each.value.boot_disk.ceph_partition_size
|
||||
root_partition_size = each.value.boot_disk.root_partition_size
|
||||
node_type = "worker"
|
||||
ssh_public_keys = var.ssh_public_keys
|
||||
}
|
||||
)
|
||||
}
|
||||
@@ -191,8 +191,3 @@ resource "exoscale_secondary_ipaddress" "control_plane_lb" {
|
||||
compute_id = each.value.id
|
||||
ip_address = exoscale_ipaddress.control_plane_lb.ip_address
|
||||
}
|
||||
|
||||
resource "exoscale_ssh_keypair" "ssh_key" {
|
||||
name = "${var.prefix}-ssh-key"
|
||||
public_key = trimspace(file(pathexpand(var.ssh_pub_key)))
|
||||
}
|
||||
|
||||
@@ -13,6 +13,11 @@ bootcmd:
|
||||
%{ endif }
|
||||
%{ endif }
|
||||
|
||||
ssh_authorized_keys:
|
||||
%{ for ssh_public_key in ssh_public_keys ~}
|
||||
- ${ssh_public_key}
|
||||
%{ endfor ~}
|
||||
|
||||
write_files:
|
||||
- path: /etc/netplan/eth1.yaml
|
||||
content: |
|
||||
|
||||
@@ -21,7 +21,9 @@ variable "machines" {
|
||||
}))
|
||||
}
|
||||
|
||||
variable "ssh_pub_key" {}
|
||||
variable "ssh_public_keys" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "ssh_whitelist" {
|
||||
type = list(string)
|
||||
|
||||
Reference in New Issue
Block a user