diff --git a/.gitlab-ci/terraform.yml b/.gitlab-ci/terraform.yml index 22b20812c123a440d9f0ee4fc3dca74d263a39b6..92e32409417f49f462171952af5e4283b14af8ef 100644 --- a/.gitlab-ci/terraform.yml +++ b/.gitlab-ci/terraform.yml @@ -9,10 +9,9 @@ # Set Ansible config - cp ansible.cfg ~/.ansible.cfg # Prepare inventory - - if [ "$PROVIDER" == "openstack" ]; then VARIABLEFILE="cluster.tfvars"; else VARIABLEFILE="cluster.tf"; fi + - cp contrib/terraform/$PROVIDER/sample-inventory/cluster.tfvars . - ln -s contrib/terraform/$PROVIDER/hosts - terraform init contrib/terraform/$PROVIDER - - cp contrib/terraform/$PROVIDER/sample-inventory/$VARIABLEFILE . # Copy SSH keypair - mkdir -p ~/.ssh - echo "$PACKET_PRIVATE_KEY" | base64 -d > ~/.ssh/id_rsa @@ -24,8 +23,7 @@ stage: unit-tests only: ['master', /^pr-.*$/] script: - - if [ "$PROVIDER" == "openstack" ]; then VARIABLEFILE="cluster.tfvars"; else VARIABLEFILE="cluster.tf"; fi - - terraform validate -var-file=$VARIABLEFILE contrib/terraform/$PROVIDER + - terraform validate -var-file=cluster.tfvars contrib/terraform/$PROVIDER - terraform fmt -check -diff contrib/terraform/$PROVIDER .terraform_apply: @@ -48,7 +46,7 @@ tf-validate-openstack: extends: .terraform_validate variables: - TF_VERSION: 0.12.6 + TF_VERSION: 0.12.12 PROVIDER: openstack CLUSTER: $CI_COMMIT_REF_NAME @@ -62,14 +60,14 @@ tf-validate-packet: tf-validate-aws: extends: .terraform_validate variables: - TF_VERSION: 0.11.11 + TF_VERSION: 0.12.12 PROVIDER: aws CLUSTER: $CI_COMMIT_REF_NAME # tf-packet-ubuntu16-default: # extends: .terraform_apply # variables: -# TF_VERSION: 0.11.11 +# TF_VERSION: 0.12.12 # PROVIDER: packet # CLUSTER: $CI_COMMIT_REF_NAME # TF_VAR_number_of_k8s_masters: "1" @@ -83,7 +81,7 @@ tf-validate-aws: # tf-packet-ubuntu18-default: # extends: .terraform_apply # variables: -# TF_VERSION: 0.11.11 +# TF_VERSION: 0.12.12 # PROVIDER: packet # CLUSTER: $CI_COMMIT_REF_NAME # TF_VAR_number_of_k8s_masters: "1" @@ -110,7 +108,7 @@ tf-ovh_ubuntu18-calico: when: on_success variables: <<: *ovh_variables - TF_VERSION: 0.12.6 + TF_VERSION: 0.12.12 PROVIDER: openstack CLUSTER: $CI_COMMIT_REF_NAME ANSIBLE_TIMEOUT: "60" @@ -138,7 +136,7 @@ tf-ovh_coreos-calico: when: on_success variables: <<: *ovh_variables - TF_VERSION: 0.12.6 + TF_VERSION: 0.12.12 PROVIDER: openstack CLUSTER: $CI_COMMIT_REF_NAME ANSIBLE_TIMEOUT: "60" diff --git a/contrib/terraform/aws/create-infrastructure.tf b/contrib/terraform/aws/create-infrastructure.tf index ebfd997018b16817cf59c7d6c05f62f7ad0a0509..60befe8e2239e858f9e5075c28bdd2bf888f3f8a 100644 --- a/contrib/terraform/aws/create-infrastructure.tf +++ b/contrib/terraform/aws/create-infrastructure.tf @@ -16,22 +16,22 @@ data "aws_availability_zones" "available" {} */ module "aws-vpc" { - source = "modules/vpc" + source = "./modules/vpc" aws_cluster_name = "${var.aws_cluster_name}" aws_vpc_cidr_block = "${var.aws_vpc_cidr_block}" - aws_avail_zones = "${slice(data.aws_availability_zones.available.names,0,2)}" + aws_avail_zones = "${slice(data.aws_availability_zones.available.names, 0, 2)}" aws_cidr_subnets_private = "${var.aws_cidr_subnets_private}" aws_cidr_subnets_public = "${var.aws_cidr_subnets_public}" default_tags = "${var.default_tags}" } module "aws-elb" { - source = "modules/elb" + source = "./modules/elb" aws_cluster_name = "${var.aws_cluster_name}" aws_vpc_id = "${module.aws-vpc.aws_vpc_id}" - aws_avail_zones = "${slice(data.aws_availability_zones.available.names,0,2)}" + aws_avail_zones = "${slice(data.aws_availability_zones.available.names, 0, 2)}" aws_subnet_ids_public = "${module.aws-vpc.aws_subnet_ids_public}" aws_elb_api_port = "${var.aws_elb_api_port}" k8s_secure_api_port = "${var.k8s_secure_api_port}" @@ -39,7 +39,7 @@ module "aws-elb" { } module "aws-iam" { - source = "modules/iam" + source = "./modules/iam" aws_cluster_name = "${var.aws_cluster_name}" } @@ -54,18 +54,18 @@ resource "aws_instance" "bastion-server" { instance_type = "${var.aws_bastion_size}" count = "${length(var.aws_cidr_subnets_public)}" associate_public_ip_address = true - availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}" - subnet_id = "${element(module.aws-vpc.aws_subnet_ids_public,count.index)}" + availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}" + subnet_id = "${element(module.aws-vpc.aws_subnet_ids_public, count.index)}" - vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"] + vpc_security_group_ids = "${module.aws-vpc.aws_security_group}" key_name = "${var.AWS_SSH_KEY_NAME}" tags = "${merge(var.default_tags, map( - "Name", "kubernetes-${var.aws_cluster_name}-bastion-${count.index}", - "Cluster", "${var.aws_cluster_name}", - "Role", "bastion-${var.aws_cluster_name}-${count.index}" - ))}" + "Name", "kubernetes-${var.aws_cluster_name}-bastion-${count.index}", + "Cluster", "${var.aws_cluster_name}", + "Role", "bastion-${var.aws_cluster_name}-${count.index}" + ))}" } /* @@ -79,25 +79,25 @@ resource "aws_instance" "k8s-master" { count = "${var.aws_kube_master_num}" - availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}" - subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}" + availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}" + subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private, count.index)}" - vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"] + vpc_security_group_ids = "${module.aws-vpc.aws_security_group}" iam_instance_profile = "${module.aws-iam.kube-master-profile}" key_name = "${var.AWS_SSH_KEY_NAME}" tags = "${merge(var.default_tags, map( - "Name", "kubernetes-${var.aws_cluster_name}-master${count.index}", - "kubernetes.io/cluster/${var.aws_cluster_name}", "member", - "Role", "master" - ))}" + "Name", "kubernetes-${var.aws_cluster_name}-master${count.index}", + "kubernetes.io/cluster/${var.aws_cluster_name}", "member", + "Role", "master" + ))}" } resource "aws_elb_attachment" "attach_master_nodes" { count = "${var.aws_kube_master_num}" elb = "${module.aws-elb.aws_elb_api_id}" - instance = "${element(aws_instance.k8s-master.*.id,count.index)}" + instance = "${element(aws_instance.k8s-master.*.id, count.index)}" } resource "aws_instance" "k8s-etcd" { @@ -106,18 +106,18 @@ resource "aws_instance" "k8s-etcd" { count = "${var.aws_etcd_num}" - availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}" - subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}" + availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}" + subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private, count.index)}" - vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"] + vpc_security_group_ids = "${module.aws-vpc.aws_security_group}" key_name = "${var.AWS_SSH_KEY_NAME}" tags = "${merge(var.default_tags, map( - "Name", "kubernetes-${var.aws_cluster_name}-etcd${count.index}", - "kubernetes.io/cluster/${var.aws_cluster_name}", "member", - "Role", "etcd" - ))}" + "Name", "kubernetes-${var.aws_cluster_name}-etcd${count.index}", + "kubernetes.io/cluster/${var.aws_cluster_name}", "member", + "Role", "etcd" + ))}" } resource "aws_instance" "k8s-worker" { @@ -126,19 +126,19 @@ resource "aws_instance" "k8s-worker" { count = "${var.aws_kube_worker_num}" - availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}" - subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}" + availability_zone = "${element(slice(data.aws_availability_zones.available.names, 0, 2), count.index)}" + subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private, count.index)}" - vpc_security_group_ids = ["${module.aws-vpc.aws_security_group}"] + vpc_security_group_ids = "${module.aws-vpc.aws_security_group}" iam_instance_profile = "${module.aws-iam.kube-worker-profile}" key_name = "${var.AWS_SSH_KEY_NAME}" tags = "${merge(var.default_tags, map( - "Name", "kubernetes-${var.aws_cluster_name}-worker${count.index}", - "kubernetes.io/cluster/${var.aws_cluster_name}", "member", - "Role", "worker" - ))}" + "Name", "kubernetes-${var.aws_cluster_name}-worker${count.index}", + "kubernetes.io/cluster/${var.aws_cluster_name}", "member", + "Role", "worker" + ))}" } /* @@ -148,14 +148,14 @@ resource "aws_instance" "k8s-worker" { data "template_file" "inventory" { template = "${file("${path.module}/templates/inventory.tpl")}" - vars { - public_ip_address_bastion = "${join("\n",formatlist("bastion ansible_host=%s" , aws_instance.bastion-server.*.public_ip))}" - connection_strings_master = "${join("\n",formatlist("%s ansible_host=%s",aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}" + vars = { + public_ip_address_bastion = "${join("\n", formatlist("bastion ansible_host=%s", aws_instance.bastion-server.*.public_ip))}" + connection_strings_master = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}" connection_strings_node = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.tags.Name, aws_instance.k8s-worker.*.private_ip))}" - connection_strings_etcd = "${join("\n",formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.tags.Name, aws_instance.k8s-etcd.*.private_ip))}" - list_master = "${join("\n",aws_instance.k8s-master.*.tags.Name)}" - list_node = "${join("\n",aws_instance.k8s-worker.*.tags.Name)}" - list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}" + connection_strings_etcd = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.tags.Name, aws_instance.k8s-etcd.*.private_ip))}" + list_master = "${join("\n", aws_instance.k8s-master.*.tags.Name)}" + list_node = "${join("\n", aws_instance.k8s-worker.*.tags.Name)}" + list_etcd = "${join("\n", aws_instance.k8s-etcd.*.tags.Name)}" elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\"" } } @@ -165,7 +165,7 @@ resource "null_resource" "inventories" { command = "echo '${data.template_file.inventory.rendered}' > ${var.inventory_file}" } - triggers { + triggers = { template = "${data.template_file.inventory.rendered}" } } diff --git a/contrib/terraform/aws/modules/elb/main.tf b/contrib/terraform/aws/modules/elb/main.tf index 48b8e3df7aca1a3915c90bd6a76cd5db79eb068c..2139876326d9a9c68e685180abf598613d3f0252 100644 --- a/contrib/terraform/aws/modules/elb/main.tf +++ b/contrib/terraform/aws/modules/elb/main.tf @@ -28,7 +28,7 @@ resource "aws_security_group_rule" "aws-allow-api-egress" { # Create a new AWS ELB for K8S API resource "aws_elb" "aws-elb-api" { name = "kubernetes-elb-${var.aws_cluster_name}" - subnets = ["${var.aws_subnet_ids_public}"] + subnets = "${var.aws_subnet_ids_public}" security_groups = ["${aws_security_group.aws-elb.id}"] listener { diff --git a/contrib/terraform/aws/sample-inventory/cluster.tf b/contrib/terraform/aws/sample-inventory/cluster.tfvars similarity index 100% rename from contrib/terraform/aws/sample-inventory/cluster.tf rename to contrib/terraform/aws/sample-inventory/cluster.tfvars diff --git a/contrib/terraform/aws/terraform.tfvars b/contrib/terraform/aws/terraform.tfvars index c5b1dbff1b1daf7697408b2c70331cc9511cbcdd..c8db6b424a23adada6585c53ab339ed852f9fe31 100644 --- a/contrib/terraform/aws/terraform.tfvars +++ b/contrib/terraform/aws/terraform.tfvars @@ -2,9 +2,9 @@ aws_cluster_name = "devtest" #VPC Vars -aws_vpc_cidr_block = "10.250.192.0/18" -aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"] -aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"] +aws_vpc_cidr_block = "10.250.192.0/18" +aws_cidr_subnets_private = ["10.250.192.0/20", "10.250.208.0/20"] +aws_cidr_subnets_public = ["10.250.224.0/20", "10.250.240.0/20"] #Bastion Host aws_bastion_size = "t2.medium" @@ -12,24 +12,24 @@ aws_bastion_size = "t2.medium" #Kubernetes Cluster -aws_kube_master_num = 3 +aws_kube_master_num = 3 aws_kube_master_size = "t2.medium" -aws_etcd_num = 3 +aws_etcd_num = 3 aws_etcd_size = "t2.medium" -aws_kube_worker_num = 4 +aws_kube_worker_num = 4 aws_kube_worker_size = "t2.medium" #Settings AWS ELB -aws_elb_api_port = 6443 -k8s_secure_api_port = 6443 +aws_elb_api_port = 6443 +k8s_secure_api_port = 6443 kube_insecure_apiserver_address = "0.0.0.0" default_tags = { -# Env = "devtest" -# Product = "kubernetes" + # Env = "devtest" + # Product = "kubernetes" } inventory_file = "../../../inventory/hosts" diff --git a/contrib/terraform/packet/README.md b/contrib/terraform/packet/README.md index 4cc2448150cf4ab68a759fd4c8e20c2df578d0f6..b216797e9fd3a5f46e790bc7f63082cf9a0d22c8 100644 --- a/contrib/terraform/packet/README.md +++ b/contrib/terraform/packet/README.md @@ -38,7 +38,7 @@ now six total etcd replicas. ## SSH Key Setup -An SSH keypair is required so Ansible can access the newly provisioned nodes (bare metal Packet hosts). By default, the public SSH key defined in cluster.tf will be installed in authorized_key on the newly provisioned nodes (~/.ssh/id_rsa.pub). Terraform will upload this public key and then it will be distributed out to all the nodes. If you have already set this public key in Packet (i.e. via the portal), then set the public keyfile name in cluster.tf to blank to prevent the duplicate key from being uploaded which will cause an error. +An SSH keypair is required so Ansible can access the newly provisioned nodes (bare metal Packet hosts). By default, the public SSH key defined in cluster.tfvars will be installed in authorized_key on the newly provisioned nodes (~/.ssh/id_rsa.pub). Terraform will upload this public key and then it will be distributed out to all the nodes. If you have already set this public key in Packet (i.e. via the portal), then set the public keyfile name in cluster.tfvars to blank to prevent the duplicate key from being uploaded which will cause an error. If you don't already have a keypair generated (~/.ssh/id_rsa and ~/.ssh/id_rsa.pub), then a new keypair can be generated with the command: @@ -72,7 +72,7 @@ If someone gets this key, they can startup/shutdown hosts in your project! For more information on how to generate an API key or find your project ID, please see: https://support.packet.com/kb/articles/api-integrations -The Packet Project ID associated with the key will be set later in cluster.tf. +The Packet Project ID associated with the key will be set later in cluster.tfvars. For more information about the API, please see: https://www.packet.com/developers/api/ @@ -88,7 +88,7 @@ Note that to deploy several clusters within the same project you need to use [te The construction of the cluster is driven by values found in [variables.tf](variables.tf). -For your cluster, edit `inventory/$CLUSTER/cluster.tf`. +For your cluster, edit `inventory/$CLUSTER/cluster.tfvars`. The `cluster_name` is used to set a tag on each server deployed as part of this cluster. This helps when identifying which hosts are associated with each cluster. @@ -138,7 +138,7 @@ This should finish fairly quickly telling you Terraform has successfully initial You can apply the Terraform configuration to your cluster with the following command issued from your cluster's inventory directory (`inventory/$CLUSTER`): ```ShellSession -$ terraform apply -var-file=cluster.tf ../../contrib/terraform/packet +$ terraform apply -var-file=cluster.tfvars ../../contrib/terraform/packet $ export ANSIBLE_HOST_KEY_CHECKING=False $ ansible-playbook -i hosts ../../cluster.yml ``` @@ -147,7 +147,7 @@ $ ansible-playbook -i hosts ../../cluster.yml You can destroy your new cluster with the following command issued from the cluster's inventory directory: ```ShellSession -$ terraform destroy -var-file=cluster.tf ../../contrib/terraform/packet +$ terraform destroy -var-file=cluster.tfvars ../../contrib/terraform/packet ``` If you've started the Ansible run, it may also be a good idea to do some manual cleanup: diff --git a/contrib/terraform/packet/sample-inventory/cluster.tf b/contrib/terraform/packet/sample-inventory/cluster.tfvars similarity index 100% rename from contrib/terraform/packet/sample-inventory/cluster.tf rename to contrib/terraform/packet/sample-inventory/cluster.tfvars diff --git a/docs/packet.md b/docs/packet.md index 5e8b010f4f087dd5eaed449d6eb9f7273a69a833..eef35591d90f351f26b05149c13ed3d729b1d6d8 100644 --- a/docs/packet.md +++ b/docs/packet.md @@ -40,7 +40,7 @@ Grab the latest version of Terraform and install it. ```bash echo "https://releases.hashicorp.com/terraform/$(curl -s https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r -M '.current_version')/terraform_$(curl -s https://checkpoint-api.hashicorp.com/v1/check/terraform | jq -r -M '.current_version')_darwin_amd64.zip" sudo yum install unzip -sudo unzip terraform_0.11.11_linux_amd64.zip -d /usr/local/bin/ +sudo unzip terraform_0.12.12_linux_amd64.zip -d /usr/local/bin/ ``` ## Download Kubespray @@ -67,7 +67,7 @@ Details about the cluster, such as the name, as well as the authentication token for Packet need to be defined. To find these values see [Packet API Integration](https://support.packet.com/kb/articles/api-integrations) ```bash -vi cluster.tf +vi cluster.tfvars ``` * cluster_name = alpha * packet_project_id = ABCDEFGHIJKLMNOPQRSTUVWXYZ123456 @@ -84,7 +84,7 @@ terraform init ../../contrib/terraform/packet/ Run Terraform to deploy the hardware. ```bash -terraform apply -var-file=cluster.tf ../../contrib/terraform/packet +terraform apply -var-file=cluster.tfvars ../../contrib/terraform/packet ``` ## Run Kubespray Playbooks