diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..53752db --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +output diff --git a/README.md b/README.md new file mode 100644 index 0000000..180cf75 --- /dev/null +++ b/README.md @@ -0,0 +1,317 @@ +REF https://cloud.google.com/kubernetes-engine/docs/concepts/alias-ips#cluster_sizing +REF Services only available within the cluster: https://cloud.google.com/kubernetes-engine/docs/how-to/alias-ips +REF https://wdenniss.com/gke-network-planning +REF https://cloud.google.com/blog/products/containers-kubernetes/best-practices-for-kubernetes-pod-ip-allocation-in-gke + +REF SHARE IP: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#terraform +REF GATEWAY: https://github.com/GoogleCloudPlatform/gke-networking-recipes/tree/main/gateway/single-cluster/regional-l7-ilb +REF node NAT: https://cloud.google.com/kubernetes-engine/docs/how-to/ip-masquerade-agent + +REF "GKE networking model doesn't allow IP addresses to be reused across the network. When you migrate to GKE, you must plan your IP address allocation to Reduce internal IP address usage in GKE." : https://cloud.google.com/kubernetes-engine/docs/concepts/network-overview + +REF "Combining multiple Ingress resources into a single Google Cloud load balancer is not supported." : https://cloud.google.com/kubernetes-engine/docs/concepts/ingress + +TOOD: replace tf with terraform + +GKE IP Address Usage Demo +========================= + +This repo contains a terraform configuration that demonstrates efficient use of RFC-1918 IP addresses with GKE kubernetes clusters. **IT IS NOT** meant to be an example of best practices (for example, in real use I would use [flux](https://github.com/fluxcd/flux2) to apply kubernetes manifests instead of terraform, I would use Horizontal Pod Autoscaling, and I would use node pool auto scaling) but rather it is a contrived example of nearly minimal RFC-1918 IP address consumption. + +TL;DR +----- +- Service IP addresses are not accessible outside a cluster (TODO REF) +- Pod IP addresses by default are not accessible outside a cluster (TODO REF) +- Therefore, we can use (TODO ranges) for pods and services (TODO REF) +- This is recommended by Google (TODO REF) + +What is spun up +--------------- + +The terraform configuration spins up: +- A Compute Engine virtual machine for you to use to test `gce-internal` ingresses. +- Cloud DNS for your (sub)domain +- [ExternalDNS](https://github.com/kubernetes-sigs/external-dns) for automatically creating DNS records for each ingress +- 14 clusters + +And on each cluster it spins up: +- 2 nodes +- 1 gateway +- 12 HTTP Routes +- 12 services +- 24 pods + +For a grand total of: +- 28 nodes (and 1 user machine) +- 14 gateways +- 168 HTTP Routes (and 168 subdomains) +- 168 services +- 336 pods + +All of this while only using `10.10.10.0/26` from the RFC-1918 space (64 addresses). + +What do I need to provide +------------------------- + +To use the terraform configuration, you will need: + +1. An already existing Google Cloud project (There is no need to set anything up in the project, terraform will handle all of that, but this configuration does not create an project for you). +2. `gcloud` authenticated with an account that has access to that project. +3. A (sub)domain that can have its nameservers pointed at Google Cloud DNS. + +IP Address Allocations +====================== + + +REF: https://cloud.google.com/kubernetes-engine/docs/concepts/alias-ips#cluster_sizing_secondary_range_pods +REF: https://cloud.google.com/vpc/docs/subnets#valid-ranges + + +| Purpose | CIDR | Notes | +|-------------------|---------------|---------------------------------------------------------------------------------------------| +| Node IP range | 10.10.10.0/26 | 1 address per node, 1 address per gateway, 1 address per cluster (cluster private endpoint) | +| Service IP range | | | +| Pod IP range | | | +| Envoy Proxy range | | This is used by the GKE ingress controller. Consumes a `/24` per network | + + +What consumes RFC-1918 IP addresses +----------------------------------- + +| Thing | Quantity Consumed | Notes | +|-------------------------------------------------------------------------------------------------------|-------------------|-----------------------------------------------------------------------------------------------------| +| [Unusable addresses](https://cloud.google.com/vpc/docs/subnets#unusable-ip-addresses-in-every-subnet) | 4 addresses | The first two and last two addresses of a primary IP range are unusable. | +| Each Node | 1 address | This example uses 2 nodes per cluster to make it numerically distinct from the quantity of clusters | +| The user-machine virtual machine | 1 address | This is not needed in a production deploy. | +| Each Gateway | 1 address | This can be 1 per cluster. | +| The control plane private endpoint | 1 address | 1 per cluster. | + +With our 64 addresses from `10.10.10.0/26`, we lose 4 as unusable addresses, we use another for the user machine, and then we have 4 addresses per cluster which means we can fit 14 clusters with 3 IP addresses left over. + + +Usage +===== + +To apply the terraform, authenticate with the gcloud CLI tool: + +``` +gcloud auth application-default login +``` + +Then go into the `terraform` folder and apply the configuration. We need to apply the config in two phases via the `cluster_exists` variable because the kubernetes terraform provider does not have native support for the Gateway API and the `kubernetes_manifest` terraform resource [has a shortcoming that requires the cluster exists at plan time](https://github.com/hashicorp/terraform-provider-kubernetes/issues/1775). + +``` +tf apply -var dns_root="k8sdemo.mydomain.example." -var quota_email="MrManager@mydomain.example" -var quota_justification="Explain why you need quotas increased here." -var cluster_exists=false +tf apply -var dns_root="k8sdemo.mydomain.example." -var quota_email="MrManager@mydomain.example" -var quota_justification="Explain why you need quotas increased here." -var cluster_exists=true +``` + +Please note that this will exceed the default quotas on new Google Cloud projects. The terraform configuration will automatically put in requests for quota increases but they can take multiple days to be approved or denied. You should be able to fit 3 clusters in the default quota until then. + +Please note that the kubernetes cluster will take a couple extra minutes to get fully set up and running after the `tf apply` command has finished. During this time, the cluster is getting IP addresses assigned to `Gateway` objects and updating DNS records via `ExternalDNS`. + +This will spin up the kubernetes clusters and output some helpful information. One such piece of information is the nameservers for Google Cloud DNS. We need to point our (sub)domain at those name servers. If you want to get the list of nameservers again without having to wait for `tf apply`, you can run `tf output dns_name_servers`. + +Personally, I run [PowerDNS](https://github.com/PowerDNS/pdns), so as an example, I would first clear the old `NS` records from previous runs from `k8sdemo.mydomain.example` (if you are setting this up for the first time you can skip this step): + +``` +pdnsutil delete-rrset mydomain.example k8sdemo NS +``` + +And then I'd add the new records (naturally you should use the domains output from `terraform`, they will change each time you add the domain to Cloud DNS: + +``` +pdnsutil add-record mydomain.example k8sdemo NS 600 ns-cloud-a1.googledomains.com. +pdnsutil add-record mydomain.example k8sdemo NS 600 ns-cloud-a2.googledomains.com. +pdnsutil add-record mydomain.example k8sdemo NS 600 ns-cloud-a3.googledomains.com. +pdnsutil add-record mydomain.example k8sdemo NS 600 ns-cloud-a4.googledomains.com. +``` + +Give some time for DNS caches to expire and then you should be able to access `service.cluster.k8sdemo.mydomain.example` by connecting the to `user-machine` over `ssh` and using `curl` to hit the internal ingresses. First, get the `gcloud` command to `ssh` into the `user-machine`: + +``` +tf output user_machine_ssh_command +``` + +Then `ssh` into the machine (your command will be different): + +``` +gcloud compute ssh --zone 'us-central1-c' 'user-machine' --project 'k8s-ip-demo-1aa0405a' +``` + +and hit the various ingresses on the various clusters: + +``` +curl service1.cluster1.k8sdemo.mydomain.example +``` + +Clean Up +======== +Just like we did a 2-stage apply by toggling the `cluster_exists` variable, we will need to do a 2-stage destroy. First we tear down any kubernetes resources by running *apply* with the `cluster_exists` variable set to `false`. Then we can destroy the entire project. + +``` +tf apply -var dns_root="k8sdemo.mydomain.example." -var quota_email="MrManager@mydomain.example" -var quota_justification="Explain why you need quotas increased here." -var cluster_exists=false +tf destroy -var dns_root="k8sdemo.mydomain.example." -var quota_email="MrManager@mydomain.example" -var quota_justification="Explain why you need quotas increased here." -var cluster_exists=false +``` + +Explanation +=========== + +To conserve the RFC-1918 address space, we need to take advantage of two facts: + +1. Service IP addresses aren't real +2. Pod IP addresses do not need to leave the cluster (and by default they do not on GKE) + +Service IP Addresses +-------------------- + +Service IP addresses are a fiction created by kubernetes. Service IP addresses are not routable from outside the cluster and packets to service IP addresses are never written to the wire. When a pod sends a packet to a service IP address, it is intercepted by iptables which perform DNAT to the pod's IP address. We can see this on our GKE cluster by connecting to the compute engine instance for a node over `ssh` and inspecting its iptables rules. + +```bash +gcloud compute ssh --zone 'us-central1-f' 'gke-cluster1-cluster1-pool-9d7804fe-fl8w' --project 'k8s-ip-demo-90bdaee2' +``` + +First, we look at the `PREROUTING` chain: +``` +$ sudo /sbin/iptables --table nat --list PREROUTING +Chain PREROUTING (policy ACCEPT) +target prot opt source destination +KUBE-SERVICES all -- anywhere anywhere /* kubernetes service portals */ +DNAT tcp -- anywhere metadata.google.internal tcp dpt:http-alt /* metadata-concealment: bridge traffic to metadata server goes to metadata proxy */ to:169.254.169.252:987 +DNAT tcp -- anywhere metadata.google.internal tcp dpt:http /* metadata-concealment: bridge traffic to metadata server goes to metadata proxy */ to:169.254.169.252:988 +``` + +That is sending all our traffic to the `KUBE-SERVICES` chain: + +``` +$ sudo /sbin/iptables --table nat --list KUBE-SERVICES +Chain KUBE-SERVICES (2 references) +target prot opt source destination +KUBE-SVC-XBBXYMVKK37OV7LG tcp -- anywhere 100.64.28.70 /* gmp-system/gmp-operator:webhook cluster IP */ tcp dpt:https +KUBE-SVC-GQKLSXF4KTGNIMSQ tcp -- anywhere 100.64.28.107 /* default/service11 cluster IP */ tcp dpt:http +KUBE-SVC-AI5DROXYLCYX27ZS tcp -- anywhere 100.64.11.22 /* default/service5 cluster IP */ tcp dpt:http +KUBE-SVC-F4AADAVBSY5MPKOB tcp -- anywhere 100.64.12.233 /* default/service6 cluster IP */ tcp dpt:http +KUBE-SVC-NPX46M4PTMTKRN6Y tcp -- anywhere 100.64.0.1 /* default/kubernetes:https cluster IP */ tcp dpt:https +KUBE-SVC-XP4WJ6VSLGWALMW5 tcp -- anywhere 100.64.25.226 /* kube-system/default-http-backend:http cluster IP */ tcp dpt:http +KUBE-SVC-TCOU7JCQXEZGVUNU udp -- anywhere 100.64.0.10 /* kube-system/kube-dns:dns cluster IP */ udp dpt:domain +KUBE-SVC-QMWWTXBG7KFJQKLO tcp -- anywhere 100.64.7.174 /* kube-system/metrics-server cluster IP */ tcp dpt:https +KUBE-SVC-3ISFTUHJIYANB2XG tcp -- anywhere 100.64.9.63 /* default/service4 cluster IP */ tcp dpt:http +KUBE-SVC-T467R3VJHOQP3KAJ tcp -- anywhere 100.64.8.240 /* default/service9 cluster IP */ tcp dpt:http +KUBE-SVC-ERIFXISQEP7F7OF4 tcp -- anywhere 100.64.0.10 /* kube-system/kube-dns:dns-tcp cluster IP */ tcp dpt:domain +KUBE-SVC-JOVDIF256A6Q5HDW tcp -- anywhere 100.64.16.250 /* default/service8 cluster IP */ tcp dpt:http +KUBE-SVC-E7SFLZD2Y2FAKTSV tcp -- anywhere 100.64.16.205 /* default/service2 cluster IP */ tcp dpt:http +KUBE-SVC-OA62VCLUSJYXZDQQ tcp -- anywhere 100.64.16.149 /* default/service10 cluster IP */ tcp dpt:http +KUBE-SVC-SAREEPXIBVBCS5LQ tcp -- anywhere 100.64.8.122 /* default/service12 cluster IP */ tcp dpt:http +KUBE-SVC-MVJGFDRMC5WIL772 tcp -- anywhere 100.64.6.210 /* default/service7 cluster IP */ tcp dpt:http +KUBE-SVC-4RM6KDP54NYR4K6S tcp -- anywhere 100.64.22.23 /* default/service1 cluster IP */ tcp dpt:http +KUBE-SVC-Y7ZLLRVMCD5M4HRL tcp -- anywhere 100.64.12.22 /* default/service3 cluster IP */ tcp dpt:http +KUBE-NODEPORTS all -- anywhere anywhere /* kubernetes service nodeports; NOTE: this must be the last rule in this chain */ ADDRTYPE match dst-type LOCAL +``` + +This matches packets destinated for each service IP address and sends them to their respective chains. For `service1` it is matching packets destined for `100.64.22.23`. That happens to be our service IP address for `service1`: +``` +$ kubectl --kubeconfig /bridge/git/kubernetes_ip_demo/output/kubeconfig/cluster1.yaml get svc service1 +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +service1 ClusterIP 100.64.22.23 80/TCP 34m +``` + +So its matching packets destined for `service1` and sending them to `KUBE-SVC-4RM6KDP54NYR4K6S`: + +``` +$ sudo /sbin/iptables --table nat --list KUBE-SVC-4RM6KDP54NYR4K6S +Chain KUBE-SVC-4RM6KDP54NYR4K6S (1 references) +target prot opt source destination +KUBE-MARK-MASQ tcp -- !240.10.0.0/24 100.64.22.23 /* default/service1 cluster IP */ tcp dpt:http +KUBE-SEP-XCTUYJ3QDWA727EN all -- anywhere anywhere /* default/service1 -> 240.10.0.24:8080 */ statistic mode random probability 0.50000000000 +KUBE-SEP-5LQWHS2W6LUXXNGL all -- anywhere anywhere /* default/service1 -> 240.10.0.25:8080 */ +``` + +This is how kubernetes load balances services: it uses `iptables` on the machine opening the connection to randomly distribute connections to the various pods. If we take a look at the chain for the first pod: + +``` +$ sudo /sbin/iptables --table nat --list KUBE-SEP-XCTUYJ3QDWA727EN +Chain KUBE-SEP-XCTUYJ3QDWA727EN (1 references) +target prot opt source destination +KUBE-MARK-MASQ all -- 240.10.0.24 anywhere /* default/service1 */ +DNAT tcp -- anywhere anywhere /* default/service1 */ tcp to:240.10.0.24:8080 +``` + +This corresponds to one of our pod IP addresses: +``` +$ kubectl --kubeconfig /bridge/git/kubernetes_ip_demo/output/kubeconfig/cluster1.yaml get pods -l 'app=hello-app-1' -o wide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +deployment1-69bddf99b6-gjl94 1/1 Running 0 55m 240.10.0.24 gke-cluster1-cluster1-pool-9d7804fe-fl8w +deployment1-69bddf99b6-vrtc7 1/1 Running 0 55m 240.10.0.25 gke-cluster1-cluster1-pool-9d7804fe-fl8w +``` + +If we launched a routes-based cluster instead of a VPC-native cluster then the ip addresses in `KUBE-SERVICES` would be the node IP addresses: +``` +$ sudo /sbin/iptables --table nat --list KUBE-SERVICES +Chain KUBE-SERVICES (2 references) +target prot opt source destination +KUBE-SVC-NPX46M4PTMTKRN6Y tcp -- anywhere 10.107.240.1 /* default/kubernetes:https cluster IP */ tcp dpt:https +KUBE-SVC-Y7ZLLRVMCD5M4HRL tcp -- anywhere 10.107.245.254 /* default/service3 cluster IP */ tcp dpt:http +KUBE-SVC-OA62VCLUSJYXZDQQ tcp -- anywhere 10.107.250.149 /* default/service10 cluster IP */ tcp dpt:http +KUBE-SVC-JOVDIF256A6Q5HDW tcp -- anywhere 10.107.250.156 /* default/service8 cluster IP */ tcp dpt:http +KUBE-SVC-4RM6KDP54NYR4K6S tcp -- anywhere 10.107.250.111 /* default/service1 cluster IP */ tcp dpt:http +KUBE-SVC-3ISFTUHJIYANB2XG tcp -- anywhere 10.107.241.148 /* default/service4 cluster IP */ tcp dpt:http +KUBE-SVC-E7SFLZD2Y2FAKTSV tcp -- anywhere 10.107.255.251 /* default/service2 cluster IP */ tcp dpt:http +KUBE-SVC-T467R3VJHOQP3KAJ tcp -- anywhere 10.107.246.240 /* default/service9 cluster IP */ tcp dpt:http +KUBE-SVC-AI5DROXYLCYX27ZS tcp -- anywhere 10.107.253.168 /* default/service5 cluster IP */ tcp dpt:http +KUBE-SVC-GQKLSXF4KTGNIMSQ tcp -- anywhere 10.107.255.31 /* default/service11 cluster IP */ tcp dpt:http +KUBE-SVC-XP4WJ6VSLGWALMW5 tcp -- anywhere 10.107.252.203 /* kube-system/default-http-backend:http cluster IP */ tcp dpt:http +KUBE-SVC-SAREEPXIBVBCS5LQ tcp -- anywhere 10.107.249.4 /* default/service12 cluster IP */ tcp dpt:http +KUBE-SVC-F4AADAVBSY5MPKOB tcp -- anywhere 10.107.250.177 /* default/service6 cluster IP */ tcp dpt:http +KUBE-SVC-MVJGFDRMC5WIL772 tcp -- anywhere 10.107.252.157 /* default/service7 cluster IP */ tcp dpt:http +KUBE-NODEPORTS all -- anywhere anywhere /* kubernetes service nodeports; NOTE: this must be the last rule in this chain */ ADDRTYPE match dst-type LOCAL +``` + +But regardless, the end result is the same: Service IP addresses aren't real, so they can be anything. Despite their fictional nature, Google uses a "flat" architecture that does not allow re-using IP addresses across multiple clusters so Google recommends using (TODO ip range) for service IP ranges. + +Pod IP Addresses +---------------- + +In the previous section we saw how sending a packet to `service1` results in iptables intercepting that packet and rewriting the destination to the pod IP address. In a VPC-native GKE cluster, each node has a network interface for each pod: + +``` +$ netstat -4nr +Kernel IP routing table +Destination Gateway Genmask Flags MSS Window irtt Iface +0.0.0.0 10.10.10.1 0.0.0.0 UG 0 0 0 eth0 +10.10.10.1 0.0.0.0 255.255.255.255 UH 0 0 0 eth0 +169.254.123.0 0.0.0.0 255.255.255.0 U 0 0 0 docker0 +169.254.169.254 10.10.10.1 255.255.255.255 UGH 0 0 0 eth0 +240.10.0.2 0.0.0.0 255.255.255.255 UH 0 0 0 gke200305c2a96 +240.10.0.3 0.0.0.0 255.255.255.255 UH 0 0 0 gke026d556ebe9 +240.10.0.4 0.0.0.0 255.255.255.255 UH 0 0 0 gke7d4f3a7a7fe +240.10.0.5 0.0.0.0 255.255.255.255 UH 0 0 0 gke60f18655088 +240.10.0.6 0.0.0.0 255.255.255.255 UH 0 0 0 gke1a72a682490 +240.10.0.8 0.0.0.0 255.255.255.255 UH 0 0 0 gke1c4d51adb0d +240.10.0.9 0.0.0.0 255.255.255.255 UH 0 0 0 gke9d25513aa8f +240.10.0.10 0.0.0.0 255.255.255.255 UH 0 0 0 gke6a364803b2a +240.10.0.12 0.0.0.0 255.255.255.255 UH 0 0 0 gke6a63d89ef86 +240.10.0.13 0.0.0.0 255.255.255.255 UH 0 0 0 gke35b91a8a487 +240.10.0.14 0.0.0.0 255.255.255.255 UH 0 0 0 gke96c13f51f03 +240.10.0.15 0.0.0.0 255.255.255.255 UH 0 0 0 gke84a95b2f8d9 +240.10.0.16 0.0.0.0 255.255.255.255 UH 0 0 0 gkec88ce3d8bdb +240.10.0.17 0.0.0.0 255.255.255.255 UH 0 0 0 gkeacb4e0652ac +240.10.0.18 0.0.0.0 255.255.255.255 UH 0 0 0 gke49bb9e75be2 +240.10.0.19 0.0.0.0 255.255.255.255 UH 0 0 0 gke0ece9ad356b +240.10.0.20 0.0.0.0 255.255.255.255 UH 0 0 0 gke0a1351c4ee3 +240.10.0.21 0.0.0.0 255.255.255.255 UH 0 0 0 gke72a06fc23ca +240.10.0.22 0.0.0.0 255.255.255.255 UH 0 0 0 gke9845db36eb5 +240.10.0.23 0.0.0.0 255.255.255.255 UH 0 0 0 gkecb6bf7230eb +240.10.0.24 0.0.0.0 255.255.255.255 UH 0 0 0 gke7dae60021d4 +240.10.0.25 0.0.0.0 255.255.255.255 UH 0 0 0 gkeb8396784860 +240.10.0.26 0.0.0.0 255.255.255.255 UH 0 0 0 gke4bd6d44f52d +240.10.0.27 0.0.0.0 255.255.255.255 UH 0 0 0 gke3adcfdc91bc +240.10.0.28 0.0.0.0 255.255.255.255 UH 0 0 0 gkefabe3212dac +240.10.0.29 0.0.0.0 255.255.255.255 UH 0 0 0 gke0f41cfda23e +240.10.0.30 0.0.0.0 255.255.255.255 UH 0 0 0 gke91fc0947c42 +240.10.0.31 0.0.0.0 255.255.255.255 UH 0 0 0 gke9ee620217b1 +240.10.0.32 0.0.0.0 255.255.255.255 UH 0 0 0 gke12336532836 +240.10.0.33 0.0.0.0 255.255.255.255 UH 0 0 0 gke369d5150571 +240.10.0.34 0.0.0.0 255.255.255.255 UH 0 0 0 gke97dfb4bceed +240.10.0.35 0.0.0.0 255.255.255.255 UH 0 0 0 gke085b5ff7d93 +``` + +We can see the pod IP addresses for `service1` of `240.10.0.24` and `240.10.0.25` would route over `gke7dae60021d4` and `gkeb8396784860`. At that point, Google's infrastructure takes over delivering the packet. diff --git a/terraform/.gitignore b/terraform/.gitignore new file mode 100644 index 0000000..3fa8c86 --- /dev/null +++ b/terraform/.gitignore @@ -0,0 +1 @@ +.terraform diff --git a/terraform/.terraform.lock.hcl b/terraform/.terraform.lock.hcl new file mode 100644 index 0000000..09bafa3 --- /dev/null +++ b/terraform/.terraform.lock.hcl @@ -0,0 +1,119 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/hashicorp/google" { + version = "6.21.0" + constraints = "6.21.0" + hashes = [ + "h1:pZhpGdzOtzGkX38PIYbXWilwA/LtWXQ22dkt6Fh2DAQ=", + "zh:1c2462367d92f6f8f6c527115905f7cca78e48cf5d5bc7448d3beeb7c9e895eb", + "zh:3644dbd09c3740e6d843e035de34a74ed41ffc32e7ed04a19aecddc4c57334cc", + "zh:3a586bbb9a9c6463c975a94ddd4671f2a84992a2c169bfb2f5053c2cea55849c", + "zh:4ae96672e6a52a077760a11c95946ec9d3f84f7ab84c0ba3c0cb66c3d3580d59", + "zh:9c26b3dbc1f9a594d1d07b6a25ce089f8463e8331324f4ecde73829e9d1d5ee5", + "zh:b99a602111d6ca5842c852ac1eff5c009f1d75492e355ea25f3dbd6e008e4d9a", + "zh:d45100c41c940c35c07fae2876f6cc654328d405077f01d268e8bd5a25b56c30", + "zh:de6e14e85a9ea2322a4fd971fde3b71071e7b6435a12dbcd3b8c5f42765e8b3c", + "zh:e22f6b54cfebb0c1a0991d83adc83b3d454ba6d9b5c21574af135799b488ed66", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + "zh:f6964268874a15788dfed47a26cb880718f47c94cba7c8a0284b70921fec807b", + "zh:ff51b3e83149798ce6c7545688fe3b9703b04d5c0376cd55215a93767144f40e", + ] +} + +provider "registry.terraform.io/hashicorp/google-beta" { + version = "6.21.0" + constraints = "6.21.0" + hashes = [ + "h1:mkoeBFeL2wpUgx11KGu36MWSqH0UthZZKr0PjZ40HG0=", + "zh:13945569f2f0859199f21d74395f4263ec576572db29fc7ab0c6af7b2c7611e7", + "zh:459e5114343509144397f7114a15e5eb4435e786fe4ab7a1d8809a3def0364f6", + "zh:45a363a8f31bfe3b238230949f568b0f591e6f5bebad839bcd13cd5b937ff6df", + "zh:86a8b26a4fd45da6561f87b6b01bc5d41ffe0dd05d285f144accc7c97a16a1f3", + "zh:aadd5a8828c87f482cf551224cc3ecfaa38d9ba8d6da54850a9dcdb24ffbab3a", + "zh:ae16ed6f8b971de85b28bf040c60e72dcd0d310f86288ad8cc52161c2208b461", + "zh:bc6c0f0147b78e103cd086acc29b18110ef3f84f970ea0291064c6b3552c133a", + "zh:bc796494f601caf538a83662c13fa7f43d118572ef6666bd1e163f8f17ce6b0e", + "zh:bce97850c2855eee3b8f94fa540bbe2ad4fe75ada841aa8e672140bb7d179dda", + "zh:bd7420d1c03cc72730e4b718d184ee769dc3dd4247606751b567a5ac416705a0", + "zh:f157138eecd0fdb1080994641521c50c7ab8fff0a5f3753f07915a7475e2c7fd", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + ] +} + +provider "registry.terraform.io/hashicorp/kubernetes" { + version = "2.36.0" + hashes = [ + "h1:vdY0sxo7ahwuz/y7flXTE04tSwn0Zhxyg6n62aTmAHI=", + "zh:07f38fcb7578984a3e2c8cf0397c880f6b3eb2a722a120a08a634a607ea495ca", + "zh:1adde61769c50dbb799d8bf8bfd5c8c504a37017dfd06c7820f82bcf44ca0d39", + "zh:39707f23ab58fd0e686967c0f973c0f5a39c14d6ccfc757f97c345fdd0cd4624", + "zh:4cc3dc2b5d06cc22d1c734f7162b0a8fdc61990ff9efb64e59412d65a7ccc92a", + "zh:8382dcb82ba7303715b5e67939e07dd1c8ecddbe01d12f39b82b2b7d7357e1d9", + "zh:88e8e4f90034186b8bfdea1b8d394621cbc46a064ff2418027e6dba6807d5227", + "zh:a6276a75ad170f76d88263fdb5f9558998bf3a3f7650d7bd3387b396410e59f3", + "zh:bc816c7e0606e5df98a0c7634b240bb0c8100c3107b8b17b554af702edc6a0c5", + "zh:cb2f31d58f37020e840af52755c18afd1f09a833c4903ac59270ab440fab57b7", + "zh:ee0d103b8d0089fb1918311683110b4492a9346f0471b136af46d3b019576b22", + "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", + "zh:f688b9ec761721e401f6859c19c083e3be20a650426f4747cd359cdc079d212a", + ] +} + +provider "registry.terraform.io/hashicorp/local" { + version = "2.5.2" + hashes = [ + "h1:JlMZD6nYqJ8sSrFfEAH0Vk/SL8WLZRmFaMUF9PJK5wM=", + "zh:136299545178ce281c56f36965bf91c35407c11897f7082b3b983d86cb79b511", + "zh:3b4486858aa9cb8163378722b642c57c529b6c64bfbfc9461d940a84cd66ebea", + "zh:4855ee628ead847741aa4f4fc9bed50cfdbf197f2912775dd9fe7bc43fa077c0", + "zh:4b8cd2583d1edcac4011caafe8afb7a95e8110a607a1d5fb87d921178074a69b", + "zh:52084ddaff8c8cd3f9e7bcb7ce4dc1eab00602912c96da43c29b4762dc376038", + "zh:71562d330d3f92d79b2952ffdda0dad167e952e46200c767dd30c6af8d7c0ed3", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:805f81ade06ff68fa8b908d31892eaed5c180ae031c77ad35f82cb7a74b97cf4", + "zh:8b6b3ebeaaa8e38dd04e56996abe80db9be6f4c1df75ac3cccc77642899bd464", + "zh:ad07750576b99248037b897de71113cc19b1a8d0bc235eb99173cc83d0de3b1b", + "zh:b9f1c3bfadb74068f5c205292badb0661e17ac05eb23bfe8bd809691e4583d0e", + "zh:cc4cbcd67414fefb111c1bf7ab0bc4beb8c0b553d01719ad17de9a047adff4d1", + ] +} + +provider "registry.terraform.io/hashicorp/random" { + version = "3.6.2" + constraints = "3.6.2" + hashes = [ + "h1:wmG0QFjQ2OfyPy6BB7mQ57WtoZZGGV07uAPQeDmIrAE=", + "zh:0ef01a4f81147b32c1bea3429974d4d104bbc4be2ba3cfa667031a8183ef88ec", + "zh:1bcd2d8161e89e39886119965ef0f37fcce2da9c1aca34263dd3002ba05fcb53", + "zh:37c75d15e9514556a5f4ed02e1548aaa95c0ecd6ff9af1119ac905144c70c114", + "zh:4210550a767226976bc7e57d988b9ce48f4411fa8a60cd74a6b246baf7589dad", + "zh:562007382520cd4baa7320f35e1370ffe84e46ed4e2071fdc7e4b1a9b1f8ae9b", + "zh:5efb9da90f665e43f22c2e13e0ce48e86cae2d960aaf1abf721b497f32025916", + "zh:6f71257a6b1218d02a573fc9bff0657410404fb2ef23bc66ae8cd968f98d5ff6", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:9647e18f221380a85f2f0ab387c68fdafd58af6193a932417299cdcae4710150", + "zh:bb6297ce412c3c2fa9fec726114e5e0508dd2638cad6a0cb433194930c97a544", + "zh:f83e925ed73ff8a5ef6e3608ad9225baa5376446349572c2449c0c0b3cf184b7", + "zh:fbef0781cb64de76b1df1ca11078aecba7800d82fd4a956302734999cfd9a4af", + ] +} + +provider "registry.terraform.io/hashicorp/time" { + version = "0.13.0" + hashes = [ + "h1:W2XSd8unrfQsFLBCqtOZf8GywZTU7FOgAI95YmIwxQw=", + "zh:3776dd78ef3053562ccb2f8916d5d3f21a28f05e78859f0f1e4510525f891ecb", + "zh:541ca0b56f808c15d208b9396f149563b133223c4b66cdefbcfe2d8f1c23497e", + "zh:67ed315f3572eb20ce6778423b14fbb6faba3090f454bc20ec4146489b4738c0", + "zh:69dc375845bcfc451426480119f2941ee28b9ef01273d228bb66918180863b3a", + "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", + "zh:93c24b7c87b5db9721f60782ac784152599aa78b30fdea2fc9c594d46d92767c", + "zh:95441cf14312041ae0b34640ff33975c09540125b01f9131358fca50e7be239d", + "zh:a294103aeed868c58987e131357a3ec259316c937c909e8a726b862d5a227b82", + "zh:adf6ded3f2e2f318e8aebf1040bc2791b448d006af7d12f7ddc3e8d40b22047a", + "zh:b2d9c16b7acd20d3813060c4d3647dc5f40598ebbdf59f642d53d189e4e3870a", + "zh:bc76a5161e9bcf74cadd76b3d4a51de508aa0c62e7f7ae536a87cd7595d81ebf", + "zh:ce6df2c1052c60b4432cb5c0ead471d7cdb4b285b807c265328a358631fc3610", + ] +} diff --git a/terraform/cluster.tf b/terraform/cluster.tf new file mode 100644 index 0000000..78ee9cc --- /dev/null +++ b/terraform/cluster.tf @@ -0,0 +1,1461 @@ +# TODO: Rename networks +resource "google_compute_network" "default" { + project = google_project.project.project_id + name = "example-network" + depends_on = [ + google_project_service.service["compute"] + ] + + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + project = google_project.project.project_id + name = "example-subnetwork" + + ip_cidr_range = "10.10.10.0/26" # Node range + region = var.region + + stack_type = "IPV4_ONLY" + + network = google_compute_network.default.id + + secondary_ip_range { + range_name = "services-range" + ip_cidr_range = "100.64.0.0/19" + } + + secondary_ip_range { + # Each node will grab a /24 from this range + range_name = "pod-ranges" + ip_cidr_range = "240.10.0.0/17" + } +} + +resource "google_compute_subnetwork" "network-for-l7lb" { + project = google_project.project.project_id + name = "l7lb-subnetwork" + region = var.region + + ip_cidr_range = "100.64.96.0/24" + purpose = "REGIONAL_MANAGED_PROXY" + role = "ACTIVE" + network = google_compute_network.default.id +} + +output "subnet_ip_address_list_url" { + description = "URL to a list of IP addresses on the subnet to see which addresses are being used." + value = "https://console.cloud.google.com/networking/addresses/list?project=${google_project.project.project_id}" +} + +resource "google_service_account" "k8s_service_account" { + project = google_project.project.project_id + account_id = "k8s-service-account" + display_name = "K8s Service Account" +} + +# ===================================================================================================================== +# count and for_each are not possible in terraform for providers so this is a gross copy+paste. +# ref: https://support.hashicorp.com/hc/en-us/articles/6304194229267-Using-count-or-for-each-in-Provider-Configuration +# ===================================================================================================================== + +module "cluster1" { + source = "./modules/cluster" + project = google_project.project.project_id + region = var.region + ssh_key = var.ssh_key + zones = toset([var.zone]) + name = "cluster1" + network_id = google_compute_network.default.id + subnetwork_id = google_compute_subnetwork.default.id + service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name + pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name + service_account_email = google_service_account.k8s_service_account.email + dns_managed_zone = google_dns_managed_zone.zone + ingress_type = var.ingress_type + public_ingress = var.public_ingress + external_dns_k8s_namespace = local.external_dns_k8s_namespace + external_dns_k8s_service_account = local.external_dns_k8s_service_account + external_dns_gcp_service_account_email = google_service_account.external_dns.email + cluster_exists = var.cluster_exists + service_container = google_project_service.service["container"] +} + +output "gke_connect_command_cluster1" { + description = "Command to run to connect to the kubernetes cluster." + value = module.cluster1.gke_connect_command +} + +output "kubectl_cluster1" { + description = "Kubectl command to access the kubernetes cluster." + value = module.cluster1.kubectl_command +} + +output "cluster_ip_address_utilization_url_cluster1" { + description = "URL to a page showing IP address utilization within the cluster." + value = module.cluster1.cluster_ip_address_utilization_url +} + +# module "cluster2" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster2" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster3" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster3" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster4" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster4" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster5" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster5" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster6" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster6" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster7" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster7" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster8" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster8" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster9" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster9" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster10" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster10" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster11" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster11" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster12" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster12" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster13" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster13" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster14" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster14" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster15" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster15" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster16" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster16" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster17" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster17" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster18" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster18" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster19" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster19" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster20" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster20" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster21" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster21" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster22" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster22" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster23" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster23" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster24" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster24" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster25" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster25" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster26" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster26" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster27" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster27" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster28" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster28" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster29" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster29" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster30" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster30" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster31" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster31" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster32" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster32" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster33" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster33" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster34" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster34" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster35" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster35" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster36" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster36" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster37" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster37" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster38" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster38" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster39" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster39" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster40" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster40" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster41" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster41" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster42" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster42" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster43" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster43" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster44" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster44" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster45" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster45" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster46" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster46" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster47" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster47" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster48" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster48" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster49" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster49" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster50" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster50" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster51" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster51" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster52" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster52" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster53" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster53" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster54" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster54" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster55" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster55" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster56" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster56" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster57" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster57" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster58" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster58" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster59" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster59" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster60" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster60" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster61" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster61" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster62" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster62" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } + +# module "cluster63" { +# source = "./modules/cluster" +# project = google_project.project.project_id +# region = var.region +# ssh_key = var.ssh_key +# zones = toset([var.zone]) +# name = "cluster63" +# network_id = google_compute_network.default.id +# subnetwork_id = google_compute_subnetwork.default.id +# service_range_name = google_compute_subnetwork.default.secondary_ip_range[0].range_name +# pod_range_name = google_compute_subnetwork.default.secondary_ip_range[1].range_name +# service_account_email = google_service_account.k8s_service_account.email +# dns_managed_zone = google_dns_managed_zone.zone +# ingress_type = var.ingress_type +# public_ingress = var.public_ingress +# external_dns_k8s_namespace = local.external_dns_k8s_namespace +# external_dns_k8s_service_account = local.external_dns_k8s_service_account +# external_dns_gcp_service_account_email = google_service_account.external_dns.email +# cluster_exists = var.cluster_exists +# service_container = google_project_service.service["container"] +# } diff --git a/terraform/dns.tf b/terraform/dns.tf new file mode 100644 index 0000000..380c523 --- /dev/null +++ b/terraform/dns.tf @@ -0,0 +1,20 @@ +# TODO: Switch to not requiring trailing period? + +variable "dns_root" { + description = "DNS domain root with trailing period. Example: \"foo.bar.com.\"" + type = string +} + + +resource "google_dns_managed_zone" "zone" { + project = google_project.project.project_id + name = "dns-zone" + dns_name = var.dns_root + + depends_on = [google_project_service.service["dns"], ] +} + +output "dns_name_servers" { + description = "Create NS records pointing your domain at these servers." + value = google_dns_managed_zone.zone.name_servers +} diff --git a/terraform/external_dns.tf b/terraform/external_dns.tf new file mode 100644 index 0000000..c7d8f4d --- /dev/null +++ b/terraform/external_dns.tf @@ -0,0 +1,51 @@ +locals { + external_dns_k8s_namespace = "external-dns" + external_dns_k8s_service_account = "external-dns" +} + +resource "random_string" "identity_pool" { + length = 6 + upper = false + special = false +} + +resource "google_iam_workload_identity_pool" "identity_pool" { + project = google_project.project.project_id + workload_identity_pool_id = "identity-pool-${random_string.identity_pool.result}" + depends_on = [google_project_service.service["iam"], ] +} + +resource "google_service_account" "external_dns" { + project = google_project.project.project_id + account_id = "wi-${local.external_dns_k8s_namespace}-${local.external_dns_k8s_service_account}" + display_name = "Workload identity account for GKE [${local.external_dns_k8s_namespace}/${local.external_dns_k8s_service_account}]" +} + +data "google_iam_policy" "policy" { + binding { + role = "roles/iam.workloadIdentityUser" + + members = [ + "serviceAccount:${google_project.project.project_id}.svc.id.goog[${local.external_dns_k8s_namespace}/${local.external_dns_k8s_service_account}]", + ] + } +} + +resource "google_service_account_iam_policy" "policy_binding" { + service_account_id = google_service_account.external_dns.name + policy_data = data.google_iam_policy.policy.policy_data + depends_on = [google_iam_workload_identity_pool.identity_pool, ] +} + +resource "google_project_iam_member" "external_dns" { + project = google_project.project.project_id + member = "serviceAccount:${google_service_account.external_dns.email}" + role = "roles/dns.reader" +} + +resource "google_dns_managed_zone_iam_member" "member" { + project = google_project.project.project_id + managed_zone = google_dns_managed_zone.zone.name + role = "roles/dns.admin" + member = "serviceAccount:${google_service_account.external_dns.email}" +} diff --git a/terraform/main.tf b/terraform/main.tf new file mode 100644 index 0000000..2855a30 --- /dev/null +++ b/terraform/main.tf @@ -0,0 +1,117 @@ +terraform { + backend "gcs" { + bucket = "tf-state-4b00" + prefix = "terraform/deid_test" # TODO: fix this + } + + required_providers { + google = { + source = "hashicorp/google" + version = "6.21.0" + } + google-beta = { + source = "hashicorp/google-beta" + version = "6.21.0" + } + random = { + source = "hashicorp/random" + version = "3.6.2" + } + } +} + +variable "provider_project" { + description = "Project ID." + type = string + default = "terraform-management-427323" +} + +variable "region" { + description = "Region." + type = string + default = "us-central1" +} + +variable "zone" { + description = "Zone." + type = string + default = "us-central1-f" +} + +variable "public_ingress" { + description = "Set to true to make the kubernetes ingresses exposed to the public internet." + type = bool + default = false +} + +variable "ingress_type" { + description = "What controller should we use to handle incoming http(s) connections." + type = string + default = "gateway" +} + +variable "cluster_exists" { + description = "Set to true after the kubernetes clusters exist to install the kubernetes_manifest resources. See https://github.com/hashicorp/terraform-provider-kubernetes/issues/1775" + type = bool +} + +variable "quota_email" { + description = "Contact E-Mail to put on quota increase requests." + type = string + default = null +} + +variable "quota_justification" { + description = "The reason given to Google for why the quotas need to be increased." + type = string + default = null +} + +variable "ssh_key" { + description = "SSH key to install on user machine and GKE nodes. Format: username:public key" + type = string + default = null +} + +# manual step: enable cloudbilling.googleapis.com in the terraform provider project +# https://console.developers.google.com/apis/api/cloudbilling.googleapis.com/overview?project=terraform-management-427323 +provider "google" { + project = var.provider_project + region = var.region + zone = var.zone + billing_project = var.provider_project + user_project_override = true +} + +provider "google-beta" { + project = var.provider_project + region = var.region + zone = var.zone + billing_project = var.provider_project + user_project_override = true +} + +# TODO: Switch to random_string +resource "random_id" "project" { + byte_length = 4 +} + +data "google_billing_account" "acct" { + display_name = "My Billing Account" + open = true +} + +resource "google_project" "project" { + name = "K8s IP Demo" + project_id = "k8s-ip-demo-${random_id.project.hex}" + billing_account = data.google_billing_account.acct.id + deletion_policy = "DELETE" +} + +resource "google_project_service" "service" { + # "recommender" is for enabling IP utilization metrics for GKE clusters + project = google_project.project.project_id + for_each = toset(["iam", "monitoring", "compute", "container", "logging", "recommender", "cloudquotas"]) + service = "${each.key}.googleapis.com" + disable_dependent_services = true +} diff --git a/terraform/modules/cluster/kubeconfig.tf b/terraform/modules/cluster/kubeconfig.tf new file mode 100644 index 0000000..6e0055d --- /dev/null +++ b/terraform/modules/cluster/kubeconfig.tf @@ -0,0 +1,50 @@ +locals { + kubeconfig_name = "gke_${google_container_cluster.cluster.project}_${google_container_cluster.cluster.location}_${google_container_cluster.cluster.name}" + kubeconfig_yaml = yamlencode(local.kubeconfig) + kubeconfig = { + apiVersion = "v1" + kind = "Config" + preferences = {} + clusters = [ + { + name = local.kubeconfig_name + cluster = { + server = "https://${google_container_cluster.cluster.control_plane_endpoints_config[0].dns_endpoint_config[0].endpoint}" + } + } + ] + contexts = [ + { + name = local.kubeconfig_name + context = { + cluster = local.kubeconfig_name + user = local.kubeconfig_name + } + } + ] + current-context = local.kubeconfig_name + users = [ + { + name = local.kubeconfig_name + user = { + exec = { + apiVersion = "client.authentication.k8s.io/v1beta1" + command = "gke-gcloud-auth-plugin" + provideClusterInfo = true + installHint = < v } : {} + + manifest = { + "apiVersion" = "gateway.networking.k8s.io/v1" + "kind" = "HTTPRoute" + "metadata" = { + "name" = "${var.cluster.name}-${each.value.metadata[0].name}" + "namespace" = var.main_k8s_namespace + } + "spec" = { + "parentRefs" = [ + { + "name" : kubernetes_manifest.gateway[0].manifest.metadata.name + "namespace" : kubernetes_manifest.gateway[0].manifest.metadata.namespace + } + ] + "hostnames" = [trimsuffix("${each.value.metadata[0].name}.${var.cluster.name}.${var.dns_managed_zone.dns_name}", ".")] + "rules" = [ + { + "backendRefs" = [ + { + "name" = each.value.metadata[0].name + "port" = 80 + }, + ] + "matches" = [ + { + "path" = { + "type" = "PathPrefix" + "value" = "/" + } + }, + ] + } + ] + } + } + + depends_on = [time_sleep.wait_service_cleanup, var.cluster] +} diff --git a/terraform/modules/k8s_workload/ingress_gce.tf b/terraform/modules/k8s_workload/ingress_gce.tf new file mode 100644 index 0000000..bb64247 --- /dev/null +++ b/terraform/modules/k8s_workload/ingress_gce.tf @@ -0,0 +1,31 @@ +resource "kubernetes_ingress_v1" "ingress_gce" { + for_each = var.ingress_type == "gce" ? { for k, v in kubernetes_service_v1.default : k => v } : {} + + metadata { + name = "${var.cluster.name}-${each.value.metadata[0].name}" + annotations = { + "kubernetes.io/ingress.class" = var.public_ingress ? "gce" : "gce-internal" + } + } + + spec { + rule { + host = trimsuffix("${each.value.metadata[0].name}.${var.cluster.name}.${var.dns_managed_zone.dns_name}", ".") + http { + path { + path = "/" + backend { + service { + name = each.value.metadata[0].name + port { + number = 80 + } + } + } + } + } + } + } + + depends_on = [time_sleep.wait_service_cleanup] +} diff --git a/terraform/modules/k8s_workload/ingress_nginx.tf b/terraform/modules/k8s_workload/ingress_nginx.tf new file mode 100644 index 0000000..984615a --- /dev/null +++ b/terraform/modules/k8s_workload/ingress_nginx.tf @@ -0,0 +1,45 @@ +# apiVersion: networking.k8s.io/v1 +# kind: IngressClass +# metadata: +# name: nginx-public +# annotations: +# ingressclass.kubernetes.io/is-default-class: "true" +# spec: +# controller: k8s.io/ingress-nginx + +module "nginx_ingress_controller" { + count = var.ingress_type == "nginx" ? 1 : 0 + source = "../nginx_ingress_controller" +} + +resource "kubernetes_ingress_v1" "ingress_nginx" { + for_each = var.ingress_type == "nginx" ? { for k, v in kubernetes_service_v1.default : k => v } : {} + + metadata { + name = "${var.cluster.name}-${each.value.metadata[0].name}" + annotations = { + "kubernetes.io/ingress.class" = var.public_ingress ? "gce" : "gce-internal" + } + } + + spec { + rule { + host = trimsuffix("${each.value.metadata[0].name}.${var.cluster.name}.${var.dns_managed_zone.dns_name}", ".") + http { + path { + path = "/" + backend { + service { + name = each.value.metadata[0].name + port { + number = 80 + } + } + } + } + } + } + } + + depends_on = [time_sleep.wait_service_cleanup] +} diff --git a/terraform/modules/k8s_workload/main.tf b/terraform/modules/k8s_workload/main.tf new file mode 100644 index 0000000..fae9098 --- /dev/null +++ b/terraform/modules/k8s_workload/main.tf @@ -0,0 +1,143 @@ +variable "project" { + type = string +} + +variable "region" { + type = string +} + +variable "cluster" { +} + +variable "node_pool" { +} + +variable "dns_managed_zone" { +} + +variable "public_ingress" { + description = "Set to true to make the kubernetes ingresses exposed to the public internet." + type = bool +} + +variable "ingress_type" { + description = "What controller should we use to handle incoming http(s) connections." + type = string +} + +variable "main_k8s_namespace" { + type = string +} + +# Provide time for Service cleanup +resource "time_sleep" "wait_service_cleanup" { + depends_on = [var.cluster] + + destroy_duration = "180s" +} + + +resource "kubernetes_deployment_v1" "default" { + count = 12 + metadata { + name = "deployment${count.index + 1}" + } + + spec { + replicas = 2 + + selector { + match_labels = { + app = "hello-app-${count.index + 1}" + } + } + + template { + metadata { + labels = { + app = "hello-app-${count.index + 1}" + } + } + + spec { + container { + image = "us-docker.pkg.dev/google-samples/containers/gke/hello-app:2.0" + name = "hello-app-container" + + port { + container_port = 8080 + name = "hello-app-svc" + } + + security_context { + allow_privilege_escalation = false + privileged = false + read_only_root_filesystem = false + + capabilities { + add = [] + drop = ["NET_RAW"] + } + } + + liveness_probe { + http_get { + path = "/" + port = "hello-app-svc" + } + + initial_delay_seconds = 3 + period_seconds = 3 + } + } + + security_context { + run_as_non_root = true + + seccomp_profile { + type = "RuntimeDefault" + } + } + + # Toleration is currently required to prevent perpetual diff: + # https://github.com/hashicorp/terraform-provider-kubernetes/pull/2380 + toleration { + effect = "NoSchedule" + key = "kubernetes.io/arch" + operator = "Equal" + value = "amd64" + } + } + } + } + + depends_on = [var.node_pool] +} + +resource "kubernetes_service_v1" "default" { + count = 12 + metadata { + name = "service${count.index + 1}" + annotations = { + # TODO: Revisit this, is this needed with the gateway API? + "networking.gke.io/load-balancer-type" = "Internal" # Remove to create an external loadbalancer + } + } + + spec { + selector = { + app = kubernetes_deployment_v1.default[count.index].spec[0].selector[0].match_labels.app + } + + ip_family_policy = "SingleStack" + + port { + port = 80 + target_port = kubernetes_deployment_v1.default[count.index].spec[0].template[0].spec[0].container[0].port[0].name + } + + type = "ClusterIP" + } + + depends_on = [var.node_pool, time_sleep.wait_service_cleanup] +} diff --git a/terraform/modules/nginx_ingress_controller/ingress-nginx-controller-v1.12.0.tf b/terraform/modules/nginx_ingress_controller/ingress-nginx-controller-v1.12.0.tf new file mode 100644 index 0000000..656be3e --- /dev/null +++ b/terraform/modules/nginx_ingress_controller/ingress-nginx-controller-v1.12.0.tf @@ -0,0 +1,1024 @@ +resource "kubernetes_manifest" "namespace_ingress_nginx" { + manifest = { + "apiVersion" = "v1" + "kind" = "Namespace" + "metadata" = { + "labels" = { + "app.kubernetes.io/instance" = "ingress-nginx" + "app.kubernetes.io/name" = "ingress-nginx" + } + "name" = "ingress-nginx" + } + } +} + +resource "kubernetes_manifest" "serviceaccount_ingress_nginx_ingress_nginx" { + manifest = { + "apiVersion" = "v1" + "automountServiceAccountToken" = true + "kind" = "ServiceAccount" + "metadata" = { + "labels" = { + "app.kubernetes.io/component" = "controller" + "app.kubernetes.io/instance" = "ingress-nginx" + "app.kubernetes.io/name" = "ingress-nginx" + "app.kubernetes.io/part-of" = "ingress-nginx" + "app.kubernetes.io/version" = "1.12.0" + } + "name" = "ingress-nginx" + "namespace" = kubernetes_manifest.namespace_ingress_nginx.manifest.metadata.name + } + } +} + +resource "kubernetes_manifest" "serviceaccount_ingress_nginx_ingress_nginx_admission" { + manifest = { + "apiVersion" = "v1" + "automountServiceAccountToken" = true + "kind" = "ServiceAccount" + "metadata" = { + "labels" = { + "app.kubernetes.io/component" = "admission-webhook" + "app.kubernetes.io/instance" = "ingress-nginx" + "app.kubernetes.io/name" = "ingress-nginx" + "app.kubernetes.io/part-of" = "ingress-nginx" + "app.kubernetes.io/version" = "1.12.0" + } + "name" = "ingress-nginx-admission" + "namespace" = kubernetes_manifest.namespace_ingress_nginx.manifest.metadata.name + } + } +} + +resource "kubernetes_manifest" "role_ingress_nginx_ingress_nginx" { + manifest = { + "apiVersion" = "rbac.authorization.k8s.io/v1" + "kind" = "Role" + "metadata" = { + "labels" = { + "app.kubernetes.io/component" = "controller" + "app.kubernetes.io/instance" = "ingress-nginx" + "app.kubernetes.io/name" = "ingress-nginx" + "app.kubernetes.io/part-of" = "ingress-nginx" + "app.kubernetes.io/version" = "1.12.0" + } + "name" = "ingress-nginx" + "namespace" = kubernetes_manifest.namespace_ingress_nginx.manifest.metadata.name + } + "rules" = [ + { + "apiGroups" = [ + "", + ] + "resources" = [ + "namespaces", + ] + "verbs" = [ + "get", + ] + }, + { + "apiGroups" = [ + "", + ] + "resources" = [ + "configmaps", + "pods", + "secrets", + "endpoints", + ] + "verbs" = [ + "get", + "list", + "watch", + ] + }, + { + "apiGroups" = [ + "", + ] + "resources" = [ + "services", + ] + "verbs" = [ + "get", + "list", + "watch", + ] + }, + { + "apiGroups" = [ + "networking.k8s.io", + ] + "resources" = [ + "ingresses", + ] + "verbs" = [ + "get", + "list", + "watch", + ] + }, + { + "apiGroups" = [ + "networking.k8s.io", + ] + "resources" = [ + "ingresses/status", + ] + "verbs" = [ + "update", + ] + }, + { + "apiGroups" = [ + "networking.k8s.io", + ] + "resources" = [ + "ingressclasses", + ] + "verbs" = [ + "get", + "list", + "watch", + ] + }, + { + "apiGroups" = [ + "coordination.k8s.io", + ] + "resourceNames" = [ + "ingress-nginx-leader", + ] + "resources" = [ + "leases", + ] + "verbs" = [ + "get", + "update", + ] + }, + { + "apiGroups" = [ + "coordination.k8s.io", + ] + "resources" = [ + "leases", + ] + "verbs" = [ + "create", + ] + }, + { + "apiGroups" = [ + "", + ] + "resources" = [ + "events", + ] + "verbs" = [ + "create", + "patch", + ] + }, + { + "apiGroups" = [ + "discovery.k8s.io", + ] + "resources" = [ + "endpointslices", + ] + "verbs" = [ + "list", + "watch", + "get", + ] + }, + ] + } +} + +resource "kubernetes_manifest" "role_ingress_nginx_ingress_nginx_admission" { + manifest = { + "apiVersion" = "rbac.authorization.k8s.io/v1" + "kind" = "Role" + "metadata" = { + "labels" = { + "app.kubernetes.io/component" = "admission-webhook" + "app.kubernetes.io/instance" = "ingress-nginx" + "app.kubernetes.io/name" = "ingress-nginx" + "app.kubernetes.io/part-of" = "ingress-nginx" + "app.kubernetes.io/version" = "1.12.0" + } + "name" = "ingress-nginx-admission" + "namespace" = kubernetes_manifest.namespace_ingress_nginx.manifest.metadata.name + } + "rules" = [ + { + "apiGroups" = [ + "", + ] + "resources" = [ + "secrets", + ] + "verbs" = [ + "get", + "create", + ] + }, + ] + } +} + +resource "kubernetes_manifest" "clusterrole_ingress_nginx" { + manifest = { + "apiVersion" = "rbac.authorization.k8s.io/v1" + "kind" = "ClusterRole" + "metadata" = { + "labels" = { + "app.kubernetes.io/instance" = "ingress-nginx" + "app.kubernetes.io/name" = "ingress-nginx" + "app.kubernetes.io/part-of" = "ingress-nginx" + "app.kubernetes.io/version" = "1.12.0" + } + "name" = "ingress-nginx" + } + "rules" = [ + { + "apiGroups" = [ + "", + ] + "resources" = [ + "configmaps", + "endpoints", + "nodes", + "pods", + "secrets", + "namespaces", + ] + "verbs" = [ + "list", + "watch", + ] + }, + { + "apiGroups" = [ + "coordination.k8s.io", + ] + "resources" = [ + "leases", + ] + "verbs" = [ + "list", + "watch", + ] + }, + { + "apiGroups" = [ + "", + ] + "resources" = [ + "nodes", + ] + "verbs" = [ + "get", + ] + }, + { + "apiGroups" = [ + "", + ] + "resources" = [ + "services", + ] + "verbs" = [ + "get", + "list", + "watch", + ] + }, + { + "apiGroups" = [ + "networking.k8s.io", + ] + "resources" = [ + "ingresses", + ] + "verbs" = [ + "get", + "list", + "watch", + ] + }, + { + "apiGroups" = [ + "", + ] + "resources" = [ + "events", + ] + "verbs" = [ + "create", + "patch", + ] + }, + { + "apiGroups" = [ + "networking.k8s.io", + ] + "resources" = [ + "ingresses/status", + ] + "verbs" = [ + "update", + ] + }, + { + "apiGroups" = [ + "networking.k8s.io", + ] + "resources" = [ + "ingressclasses", + ] + "verbs" = [ + "get", + "list", + "watch", + ] + }, + { + "apiGroups" = [ + "discovery.k8s.io", + ] + "resources" = [ + "endpointslices", + ] + "verbs" = [ + "list", + "watch", + "get", + ] + }, + ] + } +} + +resource "kubernetes_manifest" "clusterrole_ingress_nginx_admission" { + manifest = { + "apiVersion" = "rbac.authorization.k8s.io/v1" + "kind" = "ClusterRole" + "metadata" = { + "labels" = { + "app.kubernetes.io/component" = "admission-webhook" + "app.kubernetes.io/instance" = "ingress-nginx" + "app.kubernetes.io/name" = "ingress-nginx" + "app.kubernetes.io/part-of" = "ingress-nginx" + "app.kubernetes.io/version" = "1.12.0" + } + "name" = "ingress-nginx-admission" + } + "rules" = [ + { + "apiGroups" = [ + "admissionregistration.k8s.io", + ] + "resources" = [ + "validatingwebhookconfigurations", + ] + "verbs" = [ + "get", + "update", + ] + }, + ] + } +} + +resource "kubernetes_manifest" "rolebinding_ingress_nginx_ingress_nginx" { + manifest = { + "apiVersion" = "rbac.authorization.k8s.io/v1" + "kind" = "RoleBinding" + "metadata" = { + "labels" = { + "app.kubernetes.io/component" = "controller" + "app.kubernetes.io/instance" = "ingress-nginx" + "app.kubernetes.io/name" = "ingress-nginx" + "app.kubernetes.io/part-of" = "ingress-nginx" + "app.kubernetes.io/version" = "1.12.0" + } + "name" = "ingress-nginx" + "namespace" = kubernetes_manifest.namespace_ingress_nginx.manifest.metadata.name + } + "roleRef" = { + "apiGroup" = "rbac.authorization.k8s.io" + "kind" = "Role" + "name" = "ingress-nginx" + } + "subjects" = [ + { + "kind" = "ServiceAccount" + "name" = "ingress-nginx" + "namespace" = kubernetes_manifest.namespace_ingress_nginx.manifest.metadata.name + }, + ] + } +} + +resource "kubernetes_manifest" "rolebinding_ingress_nginx_ingress_nginx_admission" { + manifest = { + "apiVersion" = "rbac.authorization.k8s.io/v1" + "kind" = "RoleBinding" + "metadata" = { + "labels" = { + "app.kubernetes.io/component" = "admission-webhook" + "app.kubernetes.io/instance" = "ingress-nginx" + "app.kubernetes.io/name" = "ingress-nginx" + "app.kubernetes.io/part-of" = "ingress-nginx" + "app.kubernetes.io/version" = "1.12.0" + } + "name" = "ingress-nginx-admission" + "namespace" = kubernetes_manifest.namespace_ingress_nginx.manifest.metadata.name + } + "roleRef" = { + "apiGroup" = "rbac.authorization.k8s.io" + "kind" = "Role" + "name" = "ingress-nginx-admission" + } + "subjects" = [ + { + "kind" = "ServiceAccount" + "name" = "ingress-nginx-admission" + "namespace" = kubernetes_manifest.namespace_ingress_nginx.manifest.metadata.name + }, + ] + } +} + +resource "kubernetes_manifest" "clusterrolebinding_ingress_nginx" { + manifest = { + "apiVersion" = "rbac.authorization.k8s.io/v1" + "kind" = "ClusterRoleBinding" + "metadata" = { + "labels" = { + "app.kubernetes.io/instance" = "ingress-nginx" + "app.kubernetes.io/name" = "ingress-nginx" + "app.kubernetes.io/part-of" = "ingress-nginx" + "app.kubernetes.io/version" = "1.12.0" + } + "name" = "ingress-nginx" + } + "roleRef" = { + "apiGroup" = "rbac.authorization.k8s.io" + "kind" = "ClusterRole" + "name" = "ingress-nginx" + } + "subjects" = [ + { + "kind" = "ServiceAccount" + "name" = "ingress-nginx" + "namespace" = kubernetes_manifest.namespace_ingress_nginx.manifest.metadata.name + }, + ] + } +} + +resource "kubernetes_manifest" "clusterrolebinding_ingress_nginx_admission" { + manifest = { + "apiVersion" = "rbac.authorization.k8s.io/v1" + "kind" = "ClusterRoleBinding" + "metadata" = { + "labels" = { + "app.kubernetes.io/component" = "admission-webhook" + "app.kubernetes.io/instance" = "ingress-nginx" + "app.kubernetes.io/name" = "ingress-nginx" + "app.kubernetes.io/part-of" = "ingress-nginx" + "app.kubernetes.io/version" = "1.12.0" + } + "name" = "ingress-nginx-admission" + } + "roleRef" = { + "apiGroup" = "rbac.authorization.k8s.io" + "kind" = "ClusterRole" + "name" = "ingress-nginx-admission" + } + "subjects" = [ + { + "kind" = "ServiceAccount" + "name" = "ingress-nginx-admission" + "namespace" = kubernetes_manifest.namespace_ingress_nginx.manifest.metadata.name + }, + ] + } +} + +resource "kubernetes_manifest" "configmap_ingress_nginx_ingress_nginx_controller" { + manifest = { + "apiVersion" = "v1" + "data" = null + "kind" = "ConfigMap" + "metadata" = { + "labels" = { + "app.kubernetes.io/component" = "controller" + "app.kubernetes.io/instance" = "ingress-nginx" + "app.kubernetes.io/name" = "ingress-nginx" + "app.kubernetes.io/part-of" = "ingress-nginx" + "app.kubernetes.io/version" = "1.12.0" + } + "name" = "ingress-nginx-controller" + "namespace" = kubernetes_manifest.namespace_ingress_nginx.manifest.metadata.name + } + } +} + +resource "kubernetes_manifest" "service_ingress_nginx_ingress_nginx_controller" { + manifest = { + "apiVersion" = "v1" + "kind" = "Service" + "metadata" = { + "labels" = { + "app.kubernetes.io/component" = "controller" + "app.kubernetes.io/instance" = "ingress-nginx" + "app.kubernetes.io/name" = "ingress-nginx" + "app.kubernetes.io/part-of" = "ingress-nginx" + "app.kubernetes.io/version" = "1.12.0" + } + "name" = "ingress-nginx-controller" + "namespace" = kubernetes_manifest.namespace_ingress_nginx.manifest.metadata.name + } + "spec" = { + "externalTrafficPolicy" = "Local" + "ipFamilies" = [ + "IPv4", + ] + "ipFamilyPolicy" = "SingleStack" + "ports" = [ + { + "appProtocol" = "http" + "name" = "http" + "port" = 80 + "protocol" = "TCP" + "targetPort" = "http" + }, + { + "appProtocol" = "https" + "name" = "https" + "port" = 443 + "protocol" = "TCP" + "targetPort" = "https" + }, + ] + "selector" = { + "app.kubernetes.io/component" = "controller" + "app.kubernetes.io/instance" = "ingress-nginx" + "app.kubernetes.io/name" = "ingress-nginx" + } + "type" = "LoadBalancer" + } + } +} + +resource "kubernetes_manifest" "service_ingress_nginx_ingress_nginx_controller_admission" { + manifest = { + "apiVersion" = "v1" + "kind" = "Service" + "metadata" = { + "labels" = { + "app.kubernetes.io/component" = "controller" + "app.kubernetes.io/instance" = "ingress-nginx" + "app.kubernetes.io/name" = "ingress-nginx" + "app.kubernetes.io/part-of" = "ingress-nginx" + "app.kubernetes.io/version" = "1.12.0" + } + "name" = "ingress-nginx-controller-admission" + "namespace" = kubernetes_manifest.namespace_ingress_nginx.manifest.metadata.name + } + "spec" = { + "ports" = [ + { + "appProtocol" = "https" + "name" = "https-webhook" + "port" = 443 + "targetPort" = "webhook" + }, + ] + "selector" = { + "app.kubernetes.io/component" = "controller" + "app.kubernetes.io/instance" = "ingress-nginx" + "app.kubernetes.io/name" = "ingress-nginx" + } + "type" = "ClusterIP" + } + } +} + +resource "kubernetes_manifest" "deployment_ingress_nginx_ingress_nginx_controller" { + manifest = { + "apiVersion" = "apps/v1" + "kind" = "Deployment" + "metadata" = { + "labels" = { + "app.kubernetes.io/component" = "controller" + "app.kubernetes.io/instance" = "ingress-nginx" + "app.kubernetes.io/name" = "ingress-nginx" + "app.kubernetes.io/part-of" = "ingress-nginx" + "app.kubernetes.io/version" = "1.12.0" + } + "name" = "ingress-nginx-controller" + "namespace" = kubernetes_manifest.namespace_ingress_nginx.manifest.metadata.name + } + "spec" = { + "minReadySeconds" = 0 + "revisionHistoryLimit" = 10 + "selector" = { + "matchLabels" = { + "app.kubernetes.io/component" = "controller" + "app.kubernetes.io/instance" = "ingress-nginx" + "app.kubernetes.io/name" = "ingress-nginx" + } + } + "strategy" = { + "rollingUpdate" = { + "maxUnavailable" = 1 + } + "type" = "RollingUpdate" + } + "template" = { + "metadata" = { + "labels" = { + "app.kubernetes.io/component" = "controller" + "app.kubernetes.io/instance" = "ingress-nginx" + "app.kubernetes.io/name" = "ingress-nginx" + "app.kubernetes.io/part-of" = "ingress-nginx" + "app.kubernetes.io/version" = "1.12.0" + } + } + "spec" = { + "containers" = [ + { + "args" = [ + "/nginx-ingress-controller", + "--publish-service=$(POD_NAMESPACE)/ingress-nginx-controller", + "--election-id=ingress-nginx-leader", + "--controller-class=k8s.io/ingress-nginx", + "--ingress-class=nginx", + "--configmap=$(POD_NAMESPACE)/ingress-nginx-controller", + "--validating-webhook=:8443", + "--validating-webhook-certificate=/usr/local/certificates/cert", + "--validating-webhook-key=/usr/local/certificates/key", + ] + "env" = [ + { + "name" = "POD_NAME" + "valueFrom" = { + "fieldRef" = { + "fieldPath" = "metadata.name" + } + } + }, + { + "name" = "POD_NAMESPACE" + "valueFrom" = { + "fieldRef" = { + "fieldPath" = "metadata.namespace" + } + } + }, + { + "name" = "LD_PRELOAD" + "value" = "/usr/local/lib/libmimalloc.so" + }, + ] + "image" = "registry.k8s.io/ingress-nginx/controller:v1.12.0@sha256:e6b8de175acda6ca913891f0f727bca4527e797d52688cbe9fec9040d6f6b6fa" + "imagePullPolicy" = "IfNotPresent" + "lifecycle" = { + "preStop" = { + "exec" = { + "command" = [ + "/wait-shutdown", + ] + } + } + } + "livenessProbe" = { + "failureThreshold" = 5 + "httpGet" = { + "path" = "/healthz" + "port" = 10254 + "scheme" = "HTTP" + } + "initialDelaySeconds" = 10 + "periodSeconds" = 10 + "successThreshold" = 1 + "timeoutSeconds" = 1 + } + "name" = "controller" + "ports" = [ + { + "containerPort" = 80 + "name" = "http" + "protocol" = "TCP" + }, + { + "containerPort" = 443 + "name" = "https" + "protocol" = "TCP" + }, + { + "containerPort" = 8443 + "name" = "webhook" + "protocol" = "TCP" + }, + ] + "readinessProbe" = { + "failureThreshold" = 3 + "httpGet" = { + "path" = "/healthz" + "port" = 10254 + "scheme" = "HTTP" + } + "initialDelaySeconds" = 10 + "periodSeconds" = 10 + "successThreshold" = 1 + "timeoutSeconds" = 1 + } + "resources" = { + "requests" = { + "cpu" = "100m" + "memory" = "90Mi" + } + } + "securityContext" = { + "allowPrivilegeEscalation" = false + "capabilities" = { + "add" = [ + "NET_BIND_SERVICE", + ] + "drop" = [ + "ALL", + ] + } + "readOnlyRootFilesystem" = false + "runAsGroup" = 82 + "runAsNonRoot" = true + "runAsUser" = 101 + "seccompProfile" = { + "type" = "RuntimeDefault" + } + } + "volumeMounts" = [ + { + "mountPath" = "/usr/local/certificates/" + "name" = "webhook-cert" + "readOnly" = true + }, + ] + }, + ] + "dnsPolicy" = "ClusterFirst" + "nodeSelector" = { + "kubernetes.io/os" = "linux" + } + "serviceAccountName" = "ingress-nginx" + "terminationGracePeriodSeconds" = 300 + "volumes" = [ + { + "name" = "webhook-cert" + "secret" = { + "secretName" = "ingress-nginx-admission" + } + }, + ] + } + } + } + } +} + +resource "kubernetes_manifest" "job_ingress_nginx_ingress_nginx_admission_create" { + manifest = { + "apiVersion" = "batch/v1" + "kind" = "Job" + "metadata" = { + "labels" = { + "app.kubernetes.io/component" = "admission-webhook" + "app.kubernetes.io/instance" = "ingress-nginx" + "app.kubernetes.io/name" = "ingress-nginx" + "app.kubernetes.io/part-of" = "ingress-nginx" + "app.kubernetes.io/version" = "1.12.0" + } + "name" = "ingress-nginx-admission-create" + "namespace" = kubernetes_manifest.namespace_ingress_nginx.manifest.metadata.name + } + "spec" = { + "template" = { + "metadata" = { + "labels" = { + "app.kubernetes.io/component" = "admission-webhook" + "app.kubernetes.io/instance" = "ingress-nginx" + "app.kubernetes.io/name" = "ingress-nginx" + "app.kubernetes.io/part-of" = "ingress-nginx" + "app.kubernetes.io/version" = "1.12.0" + } + "name" = "ingress-nginx-admission-create" + } + "spec" = { + "containers" = [ + { + "args" = [ + "create", + "--host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc", + "--namespace=$(POD_NAMESPACE)", + "--secret-name=ingress-nginx-admission", + ] + "env" = [ + { + "name" = "POD_NAMESPACE" + "valueFrom" = { + "fieldRef" = { + "fieldPath" = "metadata.namespace" + } + } + }, + ] + "image" = "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.5.0@sha256:aaafd456bda110628b2d4ca6296f38731a3aaf0bf7581efae824a41c770a8fc4" + "imagePullPolicy" = "IfNotPresent" + "name" = "create" + "securityContext" = { + "allowPrivilegeEscalation" = false + "capabilities" = { + "drop" = [ + "ALL", + ] + } + "readOnlyRootFilesystem" = true + "runAsGroup" = 65532 + "runAsNonRoot" = true + "runAsUser" = 65532 + "seccompProfile" = { + "type" = "RuntimeDefault" + } + } + }, + ] + "nodeSelector" = { + "kubernetes.io/os" = "linux" + } + "restartPolicy" = "OnFailure" + "serviceAccountName" = "ingress-nginx-admission" + } + } + } + } +} + +resource "kubernetes_manifest" "job_ingress_nginx_ingress_nginx_admission_patch" { + manifest = { + "apiVersion" = "batch/v1" + "kind" = "Job" + "metadata" = { + "labels" = { + "app.kubernetes.io/component" = "admission-webhook" + "app.kubernetes.io/instance" = "ingress-nginx" + "app.kubernetes.io/name" = "ingress-nginx" + "app.kubernetes.io/part-of" = "ingress-nginx" + "app.kubernetes.io/version" = "1.12.0" + } + "name" = "ingress-nginx-admission-patch" + "namespace" = kubernetes_manifest.namespace_ingress_nginx.manifest.metadata.name + } + "spec" = { + "template" = { + "metadata" = { + "labels" = { + "app.kubernetes.io/component" = "admission-webhook" + "app.kubernetes.io/instance" = "ingress-nginx" + "app.kubernetes.io/name" = "ingress-nginx" + "app.kubernetes.io/part-of" = "ingress-nginx" + "app.kubernetes.io/version" = "1.12.0" + } + "name" = "ingress-nginx-admission-patch" + } + "spec" = { + "containers" = [ + { + "args" = [ + "patch", + "--webhook-name=ingress-nginx-admission", + "--namespace=$(POD_NAMESPACE)", + "--patch-mutating=false", + "--secret-name=ingress-nginx-admission", + "--patch-failure-policy=Fail", + ] + "env" = [ + { + "name" = "POD_NAMESPACE" + "valueFrom" = { + "fieldRef" = { + "fieldPath" = "metadata.namespace" + } + } + }, + ] + "image" = "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.5.0@sha256:aaafd456bda110628b2d4ca6296f38731a3aaf0bf7581efae824a41c770a8fc4" + "imagePullPolicy" = "IfNotPresent" + "name" = "patch" + "securityContext" = { + "allowPrivilegeEscalation" = false + "capabilities" = { + "drop" = [ + "ALL", + ] + } + "readOnlyRootFilesystem" = true + "runAsGroup" = 65532 + "runAsNonRoot" = true + "runAsUser" = 65532 + "seccompProfile" = { + "type" = "RuntimeDefault" + } + } + }, + ] + "nodeSelector" = { + "kubernetes.io/os" = "linux" + } + "restartPolicy" = "OnFailure" + "serviceAccountName" = "ingress-nginx-admission" + } + } + } + } +} + +resource "kubernetes_manifest" "ingressclass_nginx" { + manifest = { + "apiVersion" = "networking.k8s.io/v1" + "kind" = "IngressClass" + "metadata" = { + "labels" = { + "app.kubernetes.io/component" = "controller" + "app.kubernetes.io/instance" = "ingress-nginx" + "app.kubernetes.io/name" = "ingress-nginx" + "app.kubernetes.io/part-of" = "ingress-nginx" + "app.kubernetes.io/version" = "1.12.0" + } + "name" = "nginx" + } + "spec" = { + "controller" = "k8s.io/ingress-nginx" + } + } +} + +resource "kubernetes_manifest" "validatingwebhookconfiguration_ingress_nginx_admission" { + manifest = { + "apiVersion" = "admissionregistration.k8s.io/v1" + "kind" = "ValidatingWebhookConfiguration" + "metadata" = { + "labels" = { + "app.kubernetes.io/component" = "admission-webhook" + "app.kubernetes.io/instance" = "ingress-nginx" + "app.kubernetes.io/name" = "ingress-nginx" + "app.kubernetes.io/part-of" = "ingress-nginx" + "app.kubernetes.io/version" = "1.12.0" + } + "name" = "ingress-nginx-admission" + } + "webhooks" = [ + { + "admissionReviewVersions" = [ + "v1", + ] + "clientConfig" = { + "service" = { + "name" = "ingress-nginx-controller-admission" + "namespace" = kubernetes_manifest.namespace_ingress_nginx.manifest.metadata.name + "path" = "/networking/v1/ingresses" + "port" = 443 + } + } + "failurePolicy" = "Fail" + "matchPolicy" = "Equivalent" + "name" = "validate.nginx.ingress.kubernetes.io" + "rules" = [ + { + "apiGroups" = [ + "networking.k8s.io", + ] + "apiVersions" = [ + "v1", + ] + "operations" = [ + "CREATE", + "UPDATE", + ] + "resources" = [ + "ingresses", + ] + }, + ] + "sideEffects" = "None" + }, + ] + } +} diff --git a/terraform/modules/nginx_ingress_controller/main.tf b/terraform/modules/nginx_ingress_controller/main.tf new file mode 100644 index 0000000..ef57cc4 --- /dev/null +++ b/terraform/modules/nginx_ingress_controller/main.tf @@ -0,0 +1,27 @@ +terraform { + required_providers { + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.36.0" + } + } +} + +data "google_client_config" "default" {} + +resource "kubernetes_cluster_role_binding" "cluster_admin_binding" { + metadata { + name = "cluster-admin-binding" + } + + subject { + kind = "User" + name = data.google_client_config.default.id + } + + role_ref { + api_group = "rbac.authorization.k8s.io" + kind = "ClusterRole" + name = "cluster-admin" + } +} diff --git a/terraform/public_nat.tf b/terraform/public_nat.tf new file mode 100644 index 0000000..a5123e2 --- /dev/null +++ b/terraform/public_nat.tf @@ -0,0 +1,22 @@ +# This enables NAT to external IP addresses so our GKE nodes do not need public IP addresses because this demo is going to spin up a lot of nodes. + +resource "google_compute_router" "router" { + project = google_project.project.project_id + name = "snat-router" + network = google_compute_network.default.id + region = google_compute_subnetwork.default.region +} + +resource "google_compute_router_nat" "nat" { + project = google_project.project.project_id + name = "my-router-nat" + router = google_compute_router.router.name + region = google_compute_router.router.region + nat_ip_allocate_option = "AUTO_ONLY" + source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_IP_RANGES" + + log_config { + enable = true + filter = "ERRORS_ONLY" + } +} diff --git a/terraform/quota.tf b/terraform/quota.tf new file mode 100644 index 0000000..2d82d8a --- /dev/null +++ b/terraform/quota.tf @@ -0,0 +1,76 @@ +# TODO: Make public IP quota dependent on var.public_ingress and update the amount to match what is expected to be spun up. + +# resource "google_cloud_quotas_quota_preference" "clusters_per_region" { +# count = var.quota_email == null ? 0 : 1 +# parent = "projects/${google_project.project.project_id}" +# name = "container-clusters_per_region" +# dimensions = { region = var.region } +# service = "container.googleapis.com" +# quota_id = "ClustersPerRegion" +# contact_email = var.quota_email +# quota_config { +# preferred_value = 70 +# } +# justification = var.quota_justification +# depends_on = [google_project_service.service["cloudquotas"], ] +# } + +# resource "google_cloud_quotas_quota_preference" "public_ip_per_project_region" { +# count = var.quota_email == null ? 0 : 1 +# parent = "projects/${google_project.project.project_id}" +# name = "compute-IN-USE-ADDRESSES-per-project-region" +# dimensions = { region = var.region } +# service = "compute.googleapis.com" +# quota_id = "IN-USE-ADDRESSES-per-project-region" +# contact_email = var.quota_email +# quota_config { +# preferred_value = 70 +# } +# justification = var.quota_justification +# depends_on = [google_project_service.service["cloudquotas"], ] +# } + +# resource "google_cloud_quotas_quota_preference" "compute_vm_instances" { +# count = var.quota_email == null ? 0 : 1 +# parent = "projects/${google_project.project.project_id}" +# name = "compute-INSTANCES-per-project-region" +# dimensions = { region = var.region } +# service = "compute.googleapis.com" +# quota_id = "INSTANCES-per-project-region" +# contact_email = var.quota_email +# quota_config { +# preferred_value = 150 +# } +# justification = var.quota_justification +# depends_on = [google_project_service.service["cloudquotas"], ] +# } + +# resource "google_cloud_quotas_quota_preference" "compute_cpus" { +# count = var.quota_email == null ? 0 : 1 +# parent = "projects/${google_project.project.project_id}" +# name = "compute-CPUS-per-project-region" +# dimensions = { region = var.region } +# service = "compute.googleapis.com" +# quota_id = "CPUS-per-project-region" +# contact_email = var.quota_email +# quota_config { +# preferred_value = 150 +# } +# justification = var.quota_justification +# depends_on = [google_project_service.service["cloudquotas"], ] +# } + +# resource "google_cloud_quotas_quota_preference" "compute_cpus_all_regions" { +# count = var.quota_email == null ? 0 : 1 +# parent = "projects/${google_project.project.project_id}" +# name = "compute-CPUS-ALL-REGIONS-per-project" +# dimensions = {} +# service = "compute.googleapis.com" +# quota_id = "CPUS-ALL-REGIONS-per-project" +# contact_email = var.quota_email +# quota_config { +# preferred_value = 150 +# } +# justification = var.quota_justification +# depends_on = [google_project_service.service["cloudquotas"], ] +# } diff --git a/terraform/user_machine.tf b/terraform/user_machine.tf new file mode 100644 index 0000000..c6b3a26 --- /dev/null +++ b/terraform/user_machine.tf @@ -0,0 +1,58 @@ +# +# This machine exists for the end-user to ssh into and access +# internal-only ingresses. +# + +output "user_machine_ssh_command" { + description = "Command to launch an ssh shell on the user machine." + value = "gcloud compute ssh --zone '${var.zone}' '${google_compute_instance.user_machine.name}' --project '${google_project.project.project_id}'" +} + +resource "google_service_account" "user_machine" { + project = google_project.project.project_id + account_id = "user-machine" + display_name = "Custom SA for User Machine VM Instance" +} + +resource "google_compute_instance" "user_machine" { + project = google_project.project.project_id + name = "user-machine" + machine_type = "g1-small" + zone = var.zone + tags = ["allow-iap-ssh"] + + boot_disk { + initialize_params { + image = "debian-cloud/debian-12" + } + } + + network_interface { + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + } + + service_account { + email = google_service_account.user_machine.email + scopes = ["cloud-platform"] + } + + metadata = var.ssh_key != null ? { + ssh-keys = var.ssh_key + } : {} + + depends_on = [google_project_service.service["compute"], google_project_service.service["logging"]] +} + +resource "google_compute_firewall" "allow_iap_ssh" { + project = google_project.project.project_id + name = "allow-iap-ssh" + network = google_compute_network.default.id + direction = "INGRESS" + allow { + protocol = "tcp" + ports = ["22"] + } + source_ranges = ["35.235.240.0/20"] + target_tags = ["allow-iap-ssh"] +}