Demonstrate conservative RFC1918 IP address use on GKE.
This is a terraform config demonstrating spinning up 14 clusters in only a /26 (64 addresses) to demonstrate the GKE clusters do not need to consume large amounts of RFC1918 IP addresses.
This commit is contained in:
50
terraform/modules/cluster/kubeconfig.tf
Normal file
50
terraform/modules/cluster/kubeconfig.tf
Normal file
@@ -0,0 +1,50 @@
|
||||
locals {
|
||||
kubeconfig_name = "gke_${google_container_cluster.cluster.project}_${google_container_cluster.cluster.location}_${google_container_cluster.cluster.name}"
|
||||
kubeconfig_yaml = yamlencode(local.kubeconfig)
|
||||
kubeconfig = {
|
||||
apiVersion = "v1"
|
||||
kind = "Config"
|
||||
preferences = {}
|
||||
clusters = [
|
||||
{
|
||||
name = local.kubeconfig_name
|
||||
cluster = {
|
||||
server = "https://${google_container_cluster.cluster.control_plane_endpoints_config[0].dns_endpoint_config[0].endpoint}"
|
||||
}
|
||||
}
|
||||
]
|
||||
contexts = [
|
||||
{
|
||||
name = local.kubeconfig_name
|
||||
context = {
|
||||
cluster = local.kubeconfig_name
|
||||
user = local.kubeconfig_name
|
||||
}
|
||||
}
|
||||
]
|
||||
current-context = local.kubeconfig_name
|
||||
users = [
|
||||
{
|
||||
name = local.kubeconfig_name
|
||||
user = {
|
||||
exec = {
|
||||
apiVersion = "client.authentication.k8s.io/v1beta1"
|
||||
command = "gke-gcloud-auth-plugin"
|
||||
provideClusterInfo = true
|
||||
installHint = <<EOT
|
||||
Install gke-gcloud-auth-plugin for use with kubectl by following
|
||||
https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl#install_plugin
|
||||
EOT
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "local_file" "kubeconfig" {
|
||||
content = local.kubeconfig_yaml
|
||||
filename = "${path.module}/../../../output/kubeconfig/${var.name}.yaml"
|
||||
file_permission = "0600"
|
||||
directory_permission = "0755"
|
||||
}
|
||||
228
terraform/modules/cluster/main.tf
Normal file
228
terraform/modules/cluster/main.tf
Normal file
@@ -0,0 +1,228 @@
|
||||
variable "project" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "region" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "zones" {
|
||||
description = "Zone."
|
||||
type = set(string)
|
||||
}
|
||||
|
||||
variable "name" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "network_id" {
|
||||
}
|
||||
|
||||
variable "subnetwork_id" {
|
||||
}
|
||||
|
||||
variable "service_range_name" {
|
||||
}
|
||||
|
||||
variable "pod_range_name" {
|
||||
}
|
||||
|
||||
|
||||
variable "service_account_email" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "dns_managed_zone" {
|
||||
}
|
||||
|
||||
variable "service_container" {
|
||||
}
|
||||
|
||||
variable "public_ingress" {
|
||||
description = "Set to true to make the kubernetes ingresses exposed to the public internet."
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "ingress_type" {
|
||||
description = "What controller should we use to handle incoming http(s) connections."
|
||||
type = string
|
||||
default = "gateway"
|
||||
|
||||
validation {
|
||||
condition = contains(["gateway"], var.ingress_type)
|
||||
error_message = "Currently only \"gateway\" is supported."
|
||||
}
|
||||
}
|
||||
|
||||
variable "main_k8s_namespace" {
|
||||
type = string
|
||||
default = "default"
|
||||
}
|
||||
|
||||
variable "ssh_key" {
|
||||
description = "SSH key to install on user machine and GKE nodes. Format: username:public key"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "cluster_exists" {
|
||||
description = "Set to true after the kubernetes clusters exist to install the kubernetes_manifest resources. See https://github.com/hashicorp/terraform-provider-kubernetes/issues/1775"
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "routes_based" {
|
||||
description = "Set to true to create a routes-based cluster instead of VPC Native. This is mostly for testing."
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
output "gke_connect_command" {
|
||||
description = "Command to run to connect to the kubernetes cluster. By default this uses credentials from `gcloud auth login`, not application default."
|
||||
value = "gcloud container clusters get-credentials --region ${var.region} --project ${var.project} ${google_container_cluster.cluster.name}"
|
||||
}
|
||||
|
||||
output "gke_kubeconfig" {
|
||||
description = "Yaml kubeconfig to configure kubectl to connect to the cluster. Traditionally gets written to `~/.kube/config`."
|
||||
value = local.kubeconfig_yaml
|
||||
}
|
||||
|
||||
output "kubectl_command" {
|
||||
description = "Kubectl command to access the kubernetes cluster."
|
||||
value = "kubectl --kubeconfig ${abspath(local_file.kubeconfig.filename)}"
|
||||
}
|
||||
|
||||
output "cluster_ip_address_utilization_url" {
|
||||
description = "URL to a page showing IP address utilization within the cluster."
|
||||
value = "https://console.cloud.google.com/kubernetes/clusters/details/${var.region}/${var.name}/observability?pageState=(%22nav%22:(%22section%22:%22ipUtilization%22))&project=${var.project}"
|
||||
}
|
||||
|
||||
locals {
|
||||
node_count = 2
|
||||
cluster_node_type = "g1-small"
|
||||
}
|
||||
|
||||
resource "google_container_cluster" "cluster" {
|
||||
project = var.project
|
||||
name = var.name
|
||||
depends_on = [
|
||||
var.service_container
|
||||
]
|
||||
|
||||
network = var.network_id
|
||||
subnetwork = var.subnetwork_id
|
||||
|
||||
networking_mode = var.routes_based ? "ROUTES" : "VPC_NATIVE"
|
||||
|
||||
ip_allocation_policy {
|
||||
stack_type = "IPV4"
|
||||
services_secondary_range_name = var.service_range_name
|
||||
cluster_secondary_range_name = var.pod_range_name
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
# ip_allocation_policy always triggers recreating the cluster on a routes-based cluster. Might be a bug in the google cloud terraform provider? This is not needed for the default VPC_NATIVE cluster.
|
||||
ip_allocation_policy,
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
location = var.region
|
||||
# We can't create a cluster with no node pool defined, but we want to only use
|
||||
# separately managed node pools. So we create the smallest possible default
|
||||
# node pool and immediately delete it.
|
||||
remove_default_node_pool = true
|
||||
initial_node_count = 1
|
||||
node_locations = var.zones
|
||||
|
||||
node_config {
|
||||
# preemptible = true # Cost savings. Not needed in production, but we're going to be spinning up a lot of nodes in this demo.
|
||||
spot = true # Cost savings. Not needed in production, but we're going to be spinning up a lot of nodes in this demo.
|
||||
machine_type = local.cluster_node_type
|
||||
disk_size_gb = 10
|
||||
|
||||
tags = ["allow-iap-ssh"] # Allow ssh onto nodes for iptables investigation. Not needed in production.
|
||||
|
||||
|
||||
service_account = var.service_account_email
|
||||
oauth_scopes = [
|
||||
"https://www.googleapis.com/auth/cloud-platform"
|
||||
]
|
||||
}
|
||||
|
||||
workload_identity_config {
|
||||
workload_pool = "${var.project}.svc.id.goog"
|
||||
}
|
||||
|
||||
gateway_api_config {
|
||||
channel = "CHANNEL_STANDARD"
|
||||
}
|
||||
|
||||
addons_config {
|
||||
http_load_balancing {
|
||||
disabled = var.ingress_type == "nginx"
|
||||
}
|
||||
}
|
||||
|
||||
private_cluster_config {
|
||||
enable_private_endpoint = true # Disable public ip for control plane endpoint
|
||||
enable_private_nodes = true # Disable public ip for nodes
|
||||
}
|
||||
|
||||
master_authorized_networks_config {
|
||||
# TODO: What did this do again?
|
||||
gcp_public_cidrs_access_enabled = false
|
||||
}
|
||||
|
||||
control_plane_endpoints_config {
|
||||
dns_endpoint_config {
|
||||
allow_external_traffic = true
|
||||
}
|
||||
}
|
||||
|
||||
timeouts {
|
||||
create = "60m"
|
||||
delete = "60m"
|
||||
update = "60m"
|
||||
}
|
||||
|
||||
deletion_protection = false
|
||||
}
|
||||
|
||||
resource "google_container_node_pool" "node_pool" {
|
||||
project = var.project
|
||||
name = "${var.name}-pool"
|
||||
location = var.region
|
||||
cluster = google_container_cluster.cluster.name
|
||||
initial_node_count = local.node_count
|
||||
|
||||
node_locations = var.zones
|
||||
|
||||
autoscaling {
|
||||
# Forcing exactly 2 nodes to have a different quantity of nodes than clusters to show which is consuming the IP addresses.
|
||||
total_min_node_count = local.node_count
|
||||
total_max_node_count = local.node_count
|
||||
}
|
||||
|
||||
node_config {
|
||||
# preemptible = true # Cost savings
|
||||
spot = true # Cost savings
|
||||
machine_type = local.cluster_node_type
|
||||
disk_size_gb = 10
|
||||
|
||||
service_account = var.service_account_email
|
||||
oauth_scopes = [
|
||||
"https://www.googleapis.com/auth/cloud-platform"
|
||||
]
|
||||
|
||||
tags = ["allow-iap-ssh"] # Allow ssh onto nodes for iptables investigation. Not needed in production.
|
||||
|
||||
metadata = var.ssh_key != null ? {
|
||||
ssh-keys = var.ssh_key
|
||||
"disable-legacy-endpoints" = "true"
|
||||
} : { "disable-legacy-endpoints" = "true" }
|
||||
|
||||
workload_metadata_config {
|
||||
mode = "GKE_METADATA"
|
||||
}
|
||||
}
|
||||
}
|
||||
39
terraform/modules/cluster/workload.tf
Normal file
39
terraform/modules/cluster/workload.tf
Normal file
@@ -0,0 +1,39 @@
|
||||
variable "external_dns_k8s_namespace" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "external_dns_k8s_service_account" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "external_dns_gcp_service_account_email" {
|
||||
type = string
|
||||
}
|
||||
|
||||
data "google_client_config" "default" {}
|
||||
|
||||
provider "kubernetes" {
|
||||
host = "https://${google_container_cluster.cluster.control_plane_endpoints_config[0].dns_endpoint_config[0].endpoint}"
|
||||
token = data.google_client_config.default.access_token
|
||||
|
||||
ignore_annotations = [
|
||||
"^autopilot\\.gke\\.io\\/.*",
|
||||
"^cloud\\.google\\.com\\/.*"
|
||||
]
|
||||
}
|
||||
|
||||
module "workload" {
|
||||
count = var.cluster_exists ? 1 : 0
|
||||
source = "../k8s_workload"
|
||||
project = var.project
|
||||
region = var.region
|
||||
cluster = google_container_cluster.cluster
|
||||
node_pool = google_container_node_pool.node_pool
|
||||
external_dns_k8s_namespace = var.external_dns_k8s_namespace
|
||||
external_dns_k8s_service_account = var.external_dns_k8s_service_account
|
||||
external_dns_gcp_service_account_email = var.external_dns_gcp_service_account_email
|
||||
dns_managed_zone = var.dns_managed_zone
|
||||
public_ingress = var.public_ingress
|
||||
ingress_type = var.ingress_type
|
||||
main_k8s_namespace = var.main_k8s_namespace
|
||||
}
|
||||
154
terraform/modules/k8s_workload/external_dns.tf
Normal file
154
terraform/modules/k8s_workload/external_dns.tf
Normal file
@@ -0,0 +1,154 @@
|
||||
variable "external_dns_k8s_namespace" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "external_dns_k8s_service_account" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "external_dns_gcp_service_account_email" {
|
||||
type = string
|
||||
}
|
||||
|
||||
locals {
|
||||
external_dns_namespace = length(kubernetes_namespace.external_dns) == 0 ? var.external_dns_k8s_namespace : kubernetes_namespace.external_dns[0].metadata[0].name
|
||||
external_dns_domain_filter = trimsuffix("${var.cluster.name}.${var.dns_managed_zone.dns_name}", ".")
|
||||
}
|
||||
|
||||
resource "kubernetes_namespace" "external_dns" {
|
||||
count = var.external_dns_k8s_namespace != "default" ? 1 : 0
|
||||
metadata {
|
||||
name = var.external_dns_k8s_namespace
|
||||
}
|
||||
|
||||
timeouts {
|
||||
delete = "60m"
|
||||
}
|
||||
|
||||
depends_on = [var.node_pool]
|
||||
}
|
||||
|
||||
|
||||
resource "kubernetes_service_account" "external_dns" {
|
||||
metadata {
|
||||
name = var.external_dns_k8s_service_account
|
||||
namespace = local.external_dns_namespace
|
||||
annotations = {
|
||||
"iam.gke.io/gcp-service-account" = var.external_dns_gcp_service_account_email
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [var.node_pool]
|
||||
}
|
||||
|
||||
resource "kubernetes_cluster_role" "external_dns" {
|
||||
metadata {
|
||||
name = "external-dns"
|
||||
|
||||
labels = {
|
||||
"app.kubernetes.io/name" = "external-dns"
|
||||
}
|
||||
}
|
||||
|
||||
rule {
|
||||
verbs = ["get", "watch", "list"]
|
||||
api_groups = [""]
|
||||
resources = ["services", "endpoints", "pods", "nodes", "namespaces"]
|
||||
}
|
||||
|
||||
rule {
|
||||
verbs = ["get", "watch", "list"]
|
||||
api_groups = ["extensions", "networking.k8s.io"]
|
||||
resources = ["ingresses"]
|
||||
}
|
||||
|
||||
rule {
|
||||
verbs = ["get", "watch", "list"]
|
||||
api_groups = ["gateway.networking.k8s.io"]
|
||||
resources = ["gateways", "httproutes", "tlsroutes", "tcproutes", "udproutes"]
|
||||
}
|
||||
|
||||
depends_on = [var.node_pool]
|
||||
}
|
||||
|
||||
resource "kubernetes_cluster_role_binding" "external_dns_viewer" {
|
||||
metadata {
|
||||
name = "external-dns-viewer"
|
||||
|
||||
labels = {
|
||||
"app.kubernetes.io/name" = "external-dns"
|
||||
}
|
||||
}
|
||||
|
||||
subject {
|
||||
kind = "ServiceAccount"
|
||||
name = kubernetes_service_account.external_dns.metadata[0].name
|
||||
namespace = kubernetes_service_account.external_dns.metadata[0].namespace
|
||||
}
|
||||
|
||||
role_ref {
|
||||
api_group = "rbac.authorization.k8s.io"
|
||||
kind = "ClusterRole"
|
||||
name = "external-dns"
|
||||
}
|
||||
|
||||
depends_on = [var.node_pool]
|
||||
}
|
||||
|
||||
resource "kubernetes_deployment" "external_dns" {
|
||||
metadata {
|
||||
name = "external-dns"
|
||||
namespace = local.external_dns_namespace
|
||||
|
||||
labels = {
|
||||
"app.kubernetes.io/name" = "external-dns"
|
||||
}
|
||||
}
|
||||
|
||||
spec {
|
||||
selector {
|
||||
match_labels = {
|
||||
"app.kubernetes.io/name" = "external-dns"
|
||||
}
|
||||
}
|
||||
|
||||
revision_history_limit = 0
|
||||
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
"app.kubernetes.io/name" = "external-dns"
|
||||
}
|
||||
}
|
||||
|
||||
spec {
|
||||
container {
|
||||
name = "external-dns"
|
||||
image = "registry.k8s.io/external-dns/external-dns:v0.15.1"
|
||||
args = [
|
||||
"--source=service",
|
||||
"--source=ingress",
|
||||
"--source=gateway-httproute",
|
||||
# "--source=gateway-tlsroute",
|
||||
# "--source=gateway-tcproute",
|
||||
# "--source=gateway-udproute",
|
||||
"--domain-filter=${local.external_dns_domain_filter}",
|
||||
"--provider=google",
|
||||
"--log-format=json",
|
||||
"--registry=txt",
|
||||
"--txt-owner-id=k8s-${var.cluster.name}",
|
||||
# "--log-level=debug",
|
||||
]
|
||||
}
|
||||
|
||||
service_account_name = kubernetes_service_account.external_dns.metadata[0].name
|
||||
}
|
||||
}
|
||||
|
||||
strategy {
|
||||
type = "Recreate"
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [var.node_pool, time_sleep.wait_service_cleanup]
|
||||
}
|
||||
79
terraform/modules/k8s_workload/ingress_gateway.tf
Normal file
79
terraform/modules/k8s_workload/ingress_gateway.tf
Normal file
@@ -0,0 +1,79 @@
|
||||
# Terraform does not support gateway API so we need to use the generic kubernetes_manifest type instead.
|
||||
# https://github.com/hashicorp/terraform-provider-kubernetes/issues/2474
|
||||
|
||||
resource "kubernetes_manifest" "gateway" {
|
||||
count = var.ingress_type == "gateway" ? 1 : 0
|
||||
|
||||
manifest = {
|
||||
"apiVersion" = "gateway.networking.k8s.io/v1"
|
||||
"kind" = "Gateway"
|
||||
"metadata" = {
|
||||
"name" = "${var.cluster.name}-gateway"
|
||||
"namespace" = var.main_k8s_namespace
|
||||
}
|
||||
"spec" = {
|
||||
"gatewayClassName" = var.public_ingress ? "gke-l7-gxlb" : "gke-l7-rilb"
|
||||
"listeners" = [
|
||||
{
|
||||
"name" = "plain-http"
|
||||
"protocol" = "HTTP"
|
||||
"port" = 80
|
||||
"allowedRoutes" = {
|
||||
"kinds" = [
|
||||
{
|
||||
"kind" = "HTTPRoute"
|
||||
}
|
||||
]
|
||||
"namespaces" = {
|
||||
"from" : "All"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [time_sleep.wait_service_cleanup, var.cluster]
|
||||
}
|
||||
|
||||
resource "kubernetes_manifest" "httproute" {
|
||||
for_each = var.ingress_type == "gateway" ? { for k, v in kubernetes_service_v1.default : k => v } : {}
|
||||
|
||||
manifest = {
|
||||
"apiVersion" = "gateway.networking.k8s.io/v1"
|
||||
"kind" = "HTTPRoute"
|
||||
"metadata" = {
|
||||
"name" = "${var.cluster.name}-${each.value.metadata[0].name}"
|
||||
"namespace" = var.main_k8s_namespace
|
||||
}
|
||||
"spec" = {
|
||||
"parentRefs" = [
|
||||
{
|
||||
"name" : kubernetes_manifest.gateway[0].manifest.metadata.name
|
||||
"namespace" : kubernetes_manifest.gateway[0].manifest.metadata.namespace
|
||||
}
|
||||
]
|
||||
"hostnames" = [trimsuffix("${each.value.metadata[0].name}.${var.cluster.name}.${var.dns_managed_zone.dns_name}", ".")]
|
||||
"rules" = [
|
||||
{
|
||||
"backendRefs" = [
|
||||
{
|
||||
"name" = each.value.metadata[0].name
|
||||
"port" = 80
|
||||
},
|
||||
]
|
||||
"matches" = [
|
||||
{
|
||||
"path" = {
|
||||
"type" = "PathPrefix"
|
||||
"value" = "/"
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [time_sleep.wait_service_cleanup, var.cluster]
|
||||
}
|
||||
31
terraform/modules/k8s_workload/ingress_gce.tf
Normal file
31
terraform/modules/k8s_workload/ingress_gce.tf
Normal file
@@ -0,0 +1,31 @@
|
||||
resource "kubernetes_ingress_v1" "ingress_gce" {
|
||||
for_each = var.ingress_type == "gce" ? { for k, v in kubernetes_service_v1.default : k => v } : {}
|
||||
|
||||
metadata {
|
||||
name = "${var.cluster.name}-${each.value.metadata[0].name}"
|
||||
annotations = {
|
||||
"kubernetes.io/ingress.class" = var.public_ingress ? "gce" : "gce-internal"
|
||||
}
|
||||
}
|
||||
|
||||
spec {
|
||||
rule {
|
||||
host = trimsuffix("${each.value.metadata[0].name}.${var.cluster.name}.${var.dns_managed_zone.dns_name}", ".")
|
||||
http {
|
||||
path {
|
||||
path = "/"
|
||||
backend {
|
||||
service {
|
||||
name = each.value.metadata[0].name
|
||||
port {
|
||||
number = 80
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [time_sleep.wait_service_cleanup]
|
||||
}
|
||||
45
terraform/modules/k8s_workload/ingress_nginx.tf
Normal file
45
terraform/modules/k8s_workload/ingress_nginx.tf
Normal file
@@ -0,0 +1,45 @@
|
||||
# apiVersion: networking.k8s.io/v1
|
||||
# kind: IngressClass
|
||||
# metadata:
|
||||
# name: nginx-public
|
||||
# annotations:
|
||||
# ingressclass.kubernetes.io/is-default-class: "true"
|
||||
# spec:
|
||||
# controller: k8s.io/ingress-nginx
|
||||
|
||||
module "nginx_ingress_controller" {
|
||||
count = var.ingress_type == "nginx" ? 1 : 0
|
||||
source = "../nginx_ingress_controller"
|
||||
}
|
||||
|
||||
resource "kubernetes_ingress_v1" "ingress_nginx" {
|
||||
for_each = var.ingress_type == "nginx" ? { for k, v in kubernetes_service_v1.default : k => v } : {}
|
||||
|
||||
metadata {
|
||||
name = "${var.cluster.name}-${each.value.metadata[0].name}"
|
||||
annotations = {
|
||||
"kubernetes.io/ingress.class" = var.public_ingress ? "gce" : "gce-internal"
|
||||
}
|
||||
}
|
||||
|
||||
spec {
|
||||
rule {
|
||||
host = trimsuffix("${each.value.metadata[0].name}.${var.cluster.name}.${var.dns_managed_zone.dns_name}", ".")
|
||||
http {
|
||||
path {
|
||||
path = "/"
|
||||
backend {
|
||||
service {
|
||||
name = each.value.metadata[0].name
|
||||
port {
|
||||
number = 80
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [time_sleep.wait_service_cleanup]
|
||||
}
|
||||
143
terraform/modules/k8s_workload/main.tf
Normal file
143
terraform/modules/k8s_workload/main.tf
Normal file
@@ -0,0 +1,143 @@
|
||||
variable "project" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "region" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "cluster" {
|
||||
}
|
||||
|
||||
variable "node_pool" {
|
||||
}
|
||||
|
||||
variable "dns_managed_zone" {
|
||||
}
|
||||
|
||||
variable "public_ingress" {
|
||||
description = "Set to true to make the kubernetes ingresses exposed to the public internet."
|
||||
type = bool
|
||||
}
|
||||
|
||||
variable "ingress_type" {
|
||||
description = "What controller should we use to handle incoming http(s) connections."
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "main_k8s_namespace" {
|
||||
type = string
|
||||
}
|
||||
|
||||
# Provide time for Service cleanup
|
||||
resource "time_sleep" "wait_service_cleanup" {
|
||||
depends_on = [var.cluster]
|
||||
|
||||
destroy_duration = "180s"
|
||||
}
|
||||
|
||||
|
||||
resource "kubernetes_deployment_v1" "default" {
|
||||
count = 12
|
||||
metadata {
|
||||
name = "deployment${count.index + 1}"
|
||||
}
|
||||
|
||||
spec {
|
||||
replicas = 2
|
||||
|
||||
selector {
|
||||
match_labels = {
|
||||
app = "hello-app-${count.index + 1}"
|
||||
}
|
||||
}
|
||||
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
app = "hello-app-${count.index + 1}"
|
||||
}
|
||||
}
|
||||
|
||||
spec {
|
||||
container {
|
||||
image = "us-docker.pkg.dev/google-samples/containers/gke/hello-app:2.0"
|
||||
name = "hello-app-container"
|
||||
|
||||
port {
|
||||
container_port = 8080
|
||||
name = "hello-app-svc"
|
||||
}
|
||||
|
||||
security_context {
|
||||
allow_privilege_escalation = false
|
||||
privileged = false
|
||||
read_only_root_filesystem = false
|
||||
|
||||
capabilities {
|
||||
add = []
|
||||
drop = ["NET_RAW"]
|
||||
}
|
||||
}
|
||||
|
||||
liveness_probe {
|
||||
http_get {
|
||||
path = "/"
|
||||
port = "hello-app-svc"
|
||||
}
|
||||
|
||||
initial_delay_seconds = 3
|
||||
period_seconds = 3
|
||||
}
|
||||
}
|
||||
|
||||
security_context {
|
||||
run_as_non_root = true
|
||||
|
||||
seccomp_profile {
|
||||
type = "RuntimeDefault"
|
||||
}
|
||||
}
|
||||
|
||||
# Toleration is currently required to prevent perpetual diff:
|
||||
# https://github.com/hashicorp/terraform-provider-kubernetes/pull/2380
|
||||
toleration {
|
||||
effect = "NoSchedule"
|
||||
key = "kubernetes.io/arch"
|
||||
operator = "Equal"
|
||||
value = "amd64"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [var.node_pool]
|
||||
}
|
||||
|
||||
resource "kubernetes_service_v1" "default" {
|
||||
count = 12
|
||||
metadata {
|
||||
name = "service${count.index + 1}"
|
||||
annotations = {
|
||||
# TODO: Revisit this, is this needed with the gateway API?
|
||||
"networking.gke.io/load-balancer-type" = "Internal" # Remove to create an external loadbalancer
|
||||
}
|
||||
}
|
||||
|
||||
spec {
|
||||
selector = {
|
||||
app = kubernetes_deployment_v1.default[count.index].spec[0].selector[0].match_labels.app
|
||||
}
|
||||
|
||||
ip_family_policy = "SingleStack"
|
||||
|
||||
port {
|
||||
port = 80
|
||||
target_port = kubernetes_deployment_v1.default[count.index].spec[0].template[0].spec[0].container[0].port[0].name
|
||||
}
|
||||
|
||||
type = "ClusterIP"
|
||||
}
|
||||
|
||||
depends_on = [var.node_pool, time_sleep.wait_service_cleanup]
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
27
terraform/modules/nginx_ingress_controller/main.tf
Normal file
27
terraform/modules/nginx_ingress_controller/main.tf
Normal file
@@ -0,0 +1,27 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = ">= 2.36.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data "google_client_config" "default" {}
|
||||
|
||||
resource "kubernetes_cluster_role_binding" "cluster_admin_binding" {
|
||||
metadata {
|
||||
name = "cluster-admin-binding"
|
||||
}
|
||||
|
||||
subject {
|
||||
kind = "User"
|
||||
name = data.google_client_config.default.id
|
||||
}
|
||||
|
||||
role_ref {
|
||||
api_group = "rbac.authorization.k8s.io"
|
||||
kind = "ClusterRole"
|
||||
name = "cluster-admin"
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user