diff --git a/contrib/terraform/openstack/README.md b/contrib/terraform/openstack/README.md
index 62f57db3bccede444276483d59ae0f783eb2bf06..174d66e5a7618b32788fdbf44a5f45f4b706344c 100644
--- a/contrib/terraform/openstack/README.md
+++ b/contrib/terraform/openstack/README.md
@@ -17,7 +17,7 @@ most modern installs of OpenStack that support the basic services.
 - [ELASTX](https://elastx.se/)
 - [EnterCloudSuite](https://www.entercloudsuite.com/)
 - [FugaCloud](https://fuga.cloud/)
-- [Open Telekom Cloud](https://cloud.telekom.de/) : requires to set the variable `wait_for_floatingip = "true"` in your cluster.tfvars
+- [Open Telekom Cloud](https://cloud.telekom.de/)
 - [OVH](https://www.ovh.com/)
 - [Rackspace](https://www.rackspace.com/)
 - [Ultimum](https://ultimum.io/)
@@ -271,7 +271,6 @@ For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`.
 |`k8s_allowed_remote_ips` | List of CIDR allowed to initiate a SSH connection, empty by default |
 |`worker_allowed_ports` | List of ports to open on worker nodes, `[{ "protocol" = "tcp", "port_range_min" = 30000, "port_range_max" = 32767, "remote_ip_prefix" = "0.0.0.0/0"}]` by default |
 |`master_allowed_ports` | List of ports to open on master nodes, expected format is `[{ "protocol" = "tcp", "port_range_min" = 443, "port_range_max" = 443, "remote_ip_prefix" = "0.0.0.0/0"}]`, empty by default |
-|`wait_for_floatingip` | Let Terraform poll the instance until the floating IP has been associated, `false` by default. |
 |`node_root_volume_size_in_gb` | Size of the root volume for nodes, 0 to use ephemeral storage |
 |`master_root_volume_size_in_gb` | Size of the root volume for masters, 0 to use ephemeral storage |
 |`master_volume_type` | Volume type of the root volume for control_plane, 'Default' by default |
diff --git a/contrib/terraform/openstack/kubespray.tf b/contrib/terraform/openstack/kubespray.tf
index 583a213d8fe610bad82c4b6b675d4848e53db7ad..ea943225e02c8b1ca4c9e1e42cb9e24e97c8d84a 100644
--- a/contrib/terraform/openstack/kubespray.tf
+++ b/contrib/terraform/openstack/kubespray.tf
@@ -81,7 +81,6 @@ module "compute" {
   supplementary_node_groups                    = var.supplementary_node_groups
   master_allowed_ports                         = var.master_allowed_ports
   worker_allowed_ports                         = var.worker_allowed_ports
-  wait_for_floatingip                          = var.wait_for_floatingip
   use_access_ip                                = var.use_access_ip
   master_server_group_policy                   = var.master_server_group_policy
   node_server_group_policy                     = var.node_server_group_policy
diff --git a/contrib/terraform/openstack/modules/compute/main.tf b/contrib/terraform/openstack/modules/compute/main.tf
index 03a3edaebe4698cc165d4a80548e79154cd5f109..9f6fbc75b4366638b537d09a2659ed42c7bf4d88 100644
--- a/contrib/terraform/openstack/modules/compute/main.tf
+++ b/contrib/terraform/openstack/modules/compute/main.tf
@@ -16,7 +16,11 @@ data "openstack_images_image_v2" "image_master" {
 }
 
 data "template_file" "cloudinit" {
-    template = file("${path.module}/templates/cloudinit.yaml")
+  template = file("${path.module}/templates/cloudinit.yaml")
+}
+
+data "openstack_networking_network_v2" "k8s_network" {
+  name = var.network_name
 }
 
 resource "openstack_compute_keypair_v2" "k8s" {
@@ -182,6 +186,16 @@ locals {
   image_to_use_master = var.image_master_uuid != "" ? var.image_master_uuid : var.image_uuid != "" ? var.image_uuid : data.openstack_images_image_v2.image_master[0].id
 }
 
+resource "openstack_networking_port_v2" "bastion_port" {
+  count                 = var.number_of_bastions
+  name                  = "${var.cluster_name}-bastion-${count.index + 1}"
+  network_id            = "${data.openstack_networking_network_v2.k8s_network.id}"
+  admin_state_up        = "true"
+  port_security_enabled = var.port_security_enabled
+  security_group_ids    = var.port_security_enabled ? local.bastion_sec_groups : null
+  no_security_groups    = var.port_security_enabled ? null : false
+}
+
 resource "openstack_compute_instance_v2" "bastion" {
   name       = "${var.cluster_name}-bastion-${count.index + 1}"
   count      = var.number_of_bastions
@@ -203,11 +217,9 @@ resource "openstack_compute_instance_v2" "bastion" {
   }
 
   network {
-    name = var.network_name
+    port = element(openstack_networking_port_v2.bastion_port.*.id, count.index)
   }
 
-  security_groups = var.port_security_enabled ? local.bastion_sec_groups : null
-
   metadata = {
     ssh_user         = var.ssh_user
     kubespray_groups = "bastion"
@@ -220,6 +232,16 @@ resource "openstack_compute_instance_v2" "bastion" {
   }
 }
 
+resource "openstack_networking_port_v2" "k8s_master_port" {
+  count                 = var.number_of_k8s_masters
+  name                  = "${var.cluster_name}-k8s-master-${count.index + 1}"
+  network_id            = "${data.openstack_networking_network_v2.k8s_network.id}"
+  admin_state_up        = "true"
+  port_security_enabled = var.port_security_enabled
+  security_group_ids    = var.port_security_enabled ? local.master_sec_groups : null
+  no_security_groups    = var.port_security_enabled ? null : false
+}
+
 resource "openstack_compute_instance_v2" "k8s_master" {
   name              = "${var.cluster_name}-k8s-master-${count.index + 1}"
   count             = var.number_of_k8s_masters
@@ -244,11 +266,9 @@ resource "openstack_compute_instance_v2" "k8s_master" {
   }
 
   network {
-    name = var.network_name
+    port = element(openstack_networking_port_v2.k8s_master_port.*.id, count.index)
   }
 
-  security_groups = var.port_security_enabled ? local.master_sec_groups : null
-
   dynamic "scheduler_hints" {
     for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
     content {
@@ -268,6 +288,16 @@ resource "openstack_compute_instance_v2" "k8s_master" {
   }
 }
 
+resource "openstack_networking_port_v2" "k8s_master_no_etcd_port" {
+  count                 = var.number_of_k8s_masters_no_etcd
+  name                  = "${var.cluster_name}-k8s-master-ne-${count.index + 1}"
+  network_id            = "${data.openstack_networking_network_v2.k8s_network.id}"
+  admin_state_up        = "true"
+  port_security_enabled = var.port_security_enabled
+  security_group_ids    = var.port_security_enabled ? local.master_sec_groups : null
+  no_security_groups    = var.port_security_enabled ? null : false
+}
+
 resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
   name              = "${var.cluster_name}-k8s-master-ne-${count.index + 1}"
   count             = var.number_of_k8s_masters_no_etcd
@@ -292,11 +322,9 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
   }
 
   network {
-    name = var.network_name
+    port = element(openstack_networking_port_v2.k8s_master_no_etcd_port.*.id, count.index)
   }
 
-  security_groups = var.port_security_enabled ? local.master_sec_groups : null
-
   dynamic "scheduler_hints" {
     for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
     content {
@@ -316,6 +344,16 @@ resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
   }
 }
 
+resource "openstack_networking_port_v2" "etcd_port" {
+  count                 = var.number_of_etcd
+  name                  = "${var.cluster_name}-etcd-${count.index + 1}"
+  network_id            = "${data.openstack_networking_network_v2.k8s_network.id}"
+  admin_state_up        = "true"
+  port_security_enabled = var.port_security_enabled
+  security_group_ids    = var.port_security_enabled ? local.etcd_sec_groups : null
+  no_security_groups    = var.port_security_enabled ? null : false
+}
+
 resource "openstack_compute_instance_v2" "etcd" {
   name              = "${var.cluster_name}-etcd-${count.index + 1}"
   count             = var.number_of_etcd
@@ -338,11 +376,9 @@ resource "openstack_compute_instance_v2" "etcd" {
   }
 
   network {
-    name = var.network_name
+    port = element(openstack_networking_port_v2.etcd_port.*.id, count.index)
   }
 
-  security_groups = var.port_security_enabled ? local.etcd_sec_groups : null
-
   dynamic "scheduler_hints" {
     for_each = var.etcd_server_group_policy ? [openstack_compute_servergroup_v2.k8s_etcd[0]] : []
     content {
@@ -358,6 +394,16 @@ resource "openstack_compute_instance_v2" "etcd" {
   }
 }
 
+resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_port" {
+  count                 = var.number_of_k8s_masters_no_floating_ip
+  name                  = "${var.cluster_name}-k8s-master-nf-${count.index + 1}"
+  network_id            = "${data.openstack_networking_network_v2.k8s_network.id}"
+  admin_state_up        = "true"
+  port_security_enabled = var.port_security_enabled
+  security_group_ids    = var.port_security_enabled ? local.master_sec_groups : null
+  no_security_groups    = var.port_security_enabled ? null : false
+}
+
 resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
   name              = "${var.cluster_name}-k8s-master-nf-${count.index + 1}"
   count             = var.number_of_k8s_masters_no_floating_ip
@@ -380,11 +426,9 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
   }
 
   network {
-    name = var.network_name
+    port = element(openstack_networking_port_v2.k8s_master_no_floating_ip_port.*.id, count.index)
   }
 
-  security_groups = var.port_security_enabled ? local.master_sec_groups : null
-
   dynamic "scheduler_hints" {
     for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
     content {
@@ -400,6 +444,16 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
   }
 }
 
+resource "openstack_networking_port_v2" "k8s_master_no_floating_ip_no_etcd_port" {
+  count                 = var.number_of_k8s_masters_no_floating_ip_no_etcd
+  name                  = "${var.cluster_name}-k8s-master-ne-nf-${count.index + 1}"
+  network_id            = "${data.openstack_networking_network_v2.k8s_network.id}"
+  admin_state_up        = "true"
+  port_security_enabled = var.port_security_enabled
+  security_group_ids    = var.port_security_enabled ? local.master_sec_groups : null
+  no_security_groups    = var.port_security_enabled ? null : false
+}
+
 resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
   name              = "${var.cluster_name}-k8s-master-ne-nf-${count.index + 1}"
   count             = var.number_of_k8s_masters_no_floating_ip_no_etcd
@@ -423,11 +477,9 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
   }
 
   network {
-    name = var.network_name
+    port = element(openstack_networking_port_v2.k8s_master_no_floating_ip_no_etcd_port.*.id, count.index)
   }
 
-  security_groups = var.port_security_enabled ? local.master_sec_groups : null
-
   dynamic "scheduler_hints" {
     for_each = var.master_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_master[0]] : []
     content {
@@ -443,6 +495,16 @@ resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
   }
 }
 
+resource "openstack_networking_port_v2" "k8s_node_port" {
+  count                 = var.number_of_k8s_nodes
+  name                  = "${var.cluster_name}-k8s-node-${count.index + 1}"
+  network_id            = "${data.openstack_networking_network_v2.k8s_network.id}"
+  admin_state_up        = "true"
+  port_security_enabled = var.port_security_enabled
+  security_group_ids    = var.port_security_enabled ? local.worker_sec_groups : null
+  no_security_groups    = var.port_security_enabled ? null : false
+}
+
 resource "openstack_compute_instance_v2" "k8s_node" {
   name              = "${var.cluster_name}-k8s-node-${count.index + 1}"
   count             = var.number_of_k8s_nodes
@@ -466,10 +528,9 @@ resource "openstack_compute_instance_v2" "k8s_node" {
   }
 
   network {
-    name = var.network_name
+    port = element(openstack_networking_port_v2.k8s_node_port.*.id, count.index)
   }
 
-  security_groups = var.port_security_enabled ? local.worker_sec_groups : null
 
   dynamic "scheduler_hints" {
     for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
@@ -490,6 +551,16 @@ resource "openstack_compute_instance_v2" "k8s_node" {
   }
 }
 
+resource "openstack_networking_port_v2" "k8s_node_no_floating_ip_port" {
+  count                 = var.number_of_k8s_nodes_no_floating_ip
+  name                  = "${var.cluster_name}-k8s-node-nf-${count.index + 1}"
+  network_id            = "${data.openstack_networking_network_v2.k8s_network.id}"
+  admin_state_up        = "true"
+  port_security_enabled = var.port_security_enabled
+  security_group_ids    = var.port_security_enabled ? local.worker_sec_groups : null
+  no_security_groups    = var.port_security_enabled ? null : false
+}
+
 resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
   name              = "${var.cluster_name}-k8s-node-nf-${count.index + 1}"
   count             = var.number_of_k8s_nodes_no_floating_ip
@@ -513,11 +584,9 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
   }
 
   network {
-    name = var.network_name
+    port = element(openstack_networking_port_v2.k8s_node_no_floating_ip_port.*.id, count.index)
   }
 
-  security_groups = var.port_security_enabled ? local.worker_sec_groups : null
-
   dynamic "scheduler_hints" {
     for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
     content {
@@ -533,6 +602,16 @@ resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
   }
 }
 
+resource "openstack_networking_port_v2" "k8s_nodes_port" {
+  for_each              = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {}
+  name                  = "${var.cluster_name}-k8s-node-${each.key}"
+  network_id            = "${data.openstack_networking_network_v2.k8s_network.id}"
+  admin_state_up        = "true"
+  port_security_enabled = var.port_security_enabled
+  security_group_ids    = var.port_security_enabled ? local.worker_sec_groups : null
+  no_security_groups    = var.port_security_enabled ? null : false
+}
+
 resource "openstack_compute_instance_v2" "k8s_nodes" {
   for_each          = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? var.k8s_nodes : {}
   name              = "${var.cluster_name}-k8s-node-${each.key}"
@@ -556,11 +635,9 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
   }
 
   network {
-    name = var.network_name
+    port = openstack_networking_port_v2.k8s_nodes_port[each.key].id
   }
 
-  security_groups = var.port_security_enabled ? local.worker_sec_groups : null
-
   dynamic "scheduler_hints" {
     for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
     content {
@@ -580,6 +657,16 @@ resource "openstack_compute_instance_v2" "k8s_nodes" {
   }
 }
 
+resource "openstack_networking_port_v2" "glusterfs_node_no_floating_ip_port" {
+  count                 = var.number_of_gfs_nodes_no_floating_ip
+  name                  = "${var.cluster_name}-gfs-node-nf-${count.index + 1}"
+  network_id            = "${data.openstack_networking_network_v2.k8s_network.id}"
+  admin_state_up        = "true"
+  port_security_enabled = var.port_security_enabled
+  security_group_ids   = var.port_security_enabled ? local.gfs_sec_groups : null
+  no_security_groups    = var.port_security_enabled ? null : false
+}
+
 resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
   name              = "${var.cluster_name}-gfs-node-nf-${count.index + 1}"
   count             = var.number_of_gfs_nodes_no_floating_ip
@@ -601,11 +688,9 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
   }
 
   network {
-    name = var.network_name
+    port = element(openstack_networking_port_v2.glusterfs_node_no_floating_ip_port.*.id, count.index)
   }
 
-  security_groups = var.port_security_enabled ? local.gfs_sec_groups : null
-
   dynamic "scheduler_hints" {
     for_each = var.node_server_group_policy != "" ? [openstack_compute_servergroup_v2.k8s_node[0]] : []
     content {
@@ -621,39 +706,35 @@ resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
   }
 }
 
-resource "openstack_compute_floatingip_associate_v2" "bastion" {
+resource "openstack_networking_floatingip_associate_v2" "bastion" {
   count                 = var.number_of_bastions
   floating_ip           = var.bastion_fips[count.index]
-  instance_id           = element(openstack_compute_instance_v2.bastion.*.id, count.index)
-  wait_until_associated = var.wait_for_floatingip
+  port_id               = element(openstack_networking_port_v2.bastion_port.*.id, count.index)
 }
 
 
-resource "openstack_compute_floatingip_associate_v2" "k8s_master" {
+resource "openstack_networking_floatingip_associate_v2" "k8s_master" {
   count                 = var.number_of_k8s_masters
-  instance_id           = element(openstack_compute_instance_v2.k8s_master.*.id, count.index)
   floating_ip           = var.k8s_master_fips[count.index]
-  wait_until_associated = var.wait_for_floatingip
+  port_id               = element(openstack_networking_port_v2.k8s_master_port.*.id, count.index)
 }
 
-resource "openstack_compute_floatingip_associate_v2" "k8s_master_no_etcd" {
-  count       = var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_etcd : 0
-  instance_id = element(openstack_compute_instance_v2.k8s_master_no_etcd.*.id, count.index)
-  floating_ip = var.k8s_master_no_etcd_fips[count.index]
+resource "openstack_networking_floatingip_associate_v2" "k8s_master_no_etcd" {
+  count                 = var.master_root_volume_size_in_gb == 0 ? var.number_of_k8s_masters_no_etcd : 0
+  floating_ip           = var.k8s_master_no_etcd_fips[count.index]
+  port_id               = element(openstack_networking_port_v2.k8s_master_no_etcd_port.*.id, count.index)
 }
 
-resource "openstack_compute_floatingip_associate_v2" "k8s_node" {
+resource "openstack_networking_floatingip_associate_v2" "k8s_node" {
   count                 = var.node_root_volume_size_in_gb == 0 ? var.number_of_k8s_nodes : 0
   floating_ip           = var.k8s_node_fips[count.index]
-  instance_id           = element(openstack_compute_instance_v2.k8s_node[*].id, count.index)
-  wait_until_associated = var.wait_for_floatingip
+  port_id               = element(openstack_networking_port_v2.k8s_node_port.*.id, count.index)
 }
 
-resource "openstack_compute_floatingip_associate_v2" "k8s_nodes" {
+resource "openstack_networking_floatingip_associate_v2" "k8s_nodes" {
   for_each              = var.number_of_k8s_nodes == 0 && var.number_of_k8s_nodes_no_floating_ip == 0 ? { for key, value in var.k8s_nodes : key => value if value.floating_ip } : {}
   floating_ip           = var.k8s_nodes_fips[each.key].address
-  instance_id           = openstack_compute_instance_v2.k8s_nodes[each.key].id
-  wait_until_associated = var.wait_for_floatingip
+  port_id               = openstack_networking_port_v2.k8s_nodes_port[each.key].id
 }
 
 resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
diff --git a/contrib/terraform/openstack/modules/compute/variables.tf b/contrib/terraform/openstack/modules/compute/variables.tf
index 61a614e86caf0e5b6ca217c478df44f772db5a51..527e6dceb619c78ecbe7f37db72e5c9575cfecd3 100644
--- a/contrib/terraform/openstack/modules/compute/variables.tf
+++ b/contrib/terraform/openstack/modules/compute/variables.tf
@@ -106,8 +106,6 @@ variable "k8s_allowed_egress_ips" {
 
 variable "k8s_nodes" {}
 
-variable "wait_for_floatingip" {}
-
 variable "supplementary_master_groups" {
   default = ""
 }