2025-03-18 18:54:10 -04:00

234 lines
6.2 KiB
HCL

variable "project" {
type = string
}
variable "region" {
type = string
}
variable "zones" {
description = "Zone."
type = set(string)
}
variable "name" {
type = string
}
variable "network_id" {
}
variable "subnetwork_id" {
}
variable "service_range_name" {
}
variable "pod_range_name" {
}
variable "service_account_email" {
type = string
}
variable "dns_managed_zone" {
}
variable "service_container" {
}
variable "public_ingress" {
description = "Set to true to make the kubernetes ingresses exposed to the public internet."
type = bool
}
variable "ingress_type" {
description = "What controller should we use to handle incoming http(s) connections."
type = string
default = "gateway"
validation {
condition = contains(["gateway"], var.ingress_type)
error_message = "Currently only \"gateway\" is supported."
}
}
variable "main_k8s_namespace" {
type = string
default = "default"
}
variable "ssh_key" {
description = "SSH key to install on user machine and GKE nodes. Format: username:public key"
type = string
}
variable "cluster_exists" {
description = "Set to true after the kubernetes clusters exist to install the kubernetes_manifest resources. See https://github.com/hashicorp/terraform-provider-kubernetes/issues/1775"
type = bool
}
variable "routes_based" {
description = "Set to true to create a routes-based cluster instead of VPC Native. This is mostly for testing."
type = bool
default = false
}
variable "enable_snat" {
description = "Whether we should enable source network address translation to the node IP address."
type = bool
}
output "gke_connect_command" {
description = "Command to run to connect to the kubernetes cluster. By default this uses credentials from `gcloud auth login`, not application default."
value = "gcloud container clusters get-credentials --region ${var.region} --project ${var.project} ${google_container_cluster.cluster.name}"
}
output "gke_kubeconfig" {
description = "Yaml kubeconfig to configure kubectl to connect to the cluster. Traditionally gets written to `~/.kube/config`."
value = local.kubeconfig_yaml
}
output "kubectl_command" {
description = "Kubectl command to access the kubernetes cluster."
value = "kubectl --kubeconfig ${abspath(local_file.kubeconfig.filename)}"
}
output "cluster_ip_address_utilization_url" {
description = "URL to a page showing IP address utilization within the cluster."
value = "https://console.cloud.google.com/kubernetes/clusters/details/${var.region}/${var.name}/observability?pageState=(%22nav%22:(%22section%22:%22ipUtilization%22))&project=${var.project}"
}
locals {
node_count = 2
cluster_node_type = "g1-small"
}
resource "google_container_cluster" "cluster" {
project = var.project
name = var.name
depends_on = [
var.service_container
]
network = var.network_id
subnetwork = var.subnetwork_id
networking_mode = var.routes_based ? "ROUTES" : "VPC_NATIVE"
ip_allocation_policy {
stack_type = "IPV4"
services_secondary_range_name = var.service_range_name
cluster_secondary_range_name = var.pod_range_name
}
lifecycle {
ignore_changes = [
# ip_allocation_policy always triggers recreating the cluster on a routes-based cluster. Might be a bug in the google cloud terraform provider? This is not needed for the default VPC_NATIVE cluster.
ip_allocation_policy,
]
}
location = var.region
# We can't create a cluster with no node pool defined, but we want to only use
# separately managed node pools. So we create the smallest possible default
# node pool and immediately delete it.
remove_default_node_pool = true
initial_node_count = 1
node_locations = var.zones
node_config {
# preemptible = true # Cost savings. Not needed in production, but we're going to be spinning up a lot of nodes in this demo.
spot = true # Cost savings. Not needed in production, but we're going to be spinning up a lot of nodes in this demo.
machine_type = local.cluster_node_type
disk_size_gb = 10
tags = ["allow-iap-ssh"] # Allow ssh onto nodes for iptables investigation. Not needed in production.
service_account = var.service_account_email
oauth_scopes = [
"https://www.googleapis.com/auth/cloud-platform"
]
}
workload_identity_config {
workload_pool = "${var.project}.svc.id.goog"
}
gateway_api_config {
channel = "CHANNEL_STANDARD"
}
addons_config {
http_load_balancing {
disabled = var.ingress_type == "nginx"
}
}
private_cluster_config {
enable_private_endpoint = true # Disable public ip for control plane endpoint
enable_private_nodes = true # Disable public ip for nodes
}
master_authorized_networks_config {
# TODO: What did this do again?
gcp_public_cidrs_access_enabled = false
}
control_plane_endpoints_config {
dns_endpoint_config {
allow_external_traffic = true
}
}
timeouts {
create = "60m"
delete = "60m"
update = "60m"
}
deletion_protection = false
}
resource "google_container_node_pool" "node_pool" {
project = var.project
name = "${var.name}-pool"
location = var.region
cluster = google_container_cluster.cluster.name
initial_node_count = local.node_count
node_locations = var.zones
autoscaling {
# Forcing exactly 2 nodes to have a different quantity of nodes than clusters to show which is consuming the IP addresses.
total_min_node_count = local.node_count
total_max_node_count = local.node_count
}
node_config {
# preemptible = true # Cost savings
spot = true # Cost savings
machine_type = local.cluster_node_type
disk_size_gb = 10
service_account = var.service_account_email
oauth_scopes = [
"https://www.googleapis.com/auth/cloud-platform"
]
tags = ["allow-iap-ssh"] # Allow ssh onto nodes for iptables investigation. Not needed in production.
metadata = var.ssh_key != null ? {
ssh-keys = var.ssh_key
"disable-legacy-endpoints" = "true"
} : { "disable-legacy-endpoints" = "true" }
workload_metadata_config {
mode = "GKE_METADATA"
}
}
}