· 6 years ago · Feb 19, 2020, 11:32 AM
1provider "aws" {
2 profile = "default"
3 region = "us-east-1"
4}
5
6variable "cluster-name" {
7 default = "terraform-eks-k8s"
8 type = string
9}
10
11# This data source is included for ease of sample architecture deployment
12 # and can be swapped out as necessary.
13data "aws_availability_zones" "available" {
14}
15
16 resource "aws_vpc" "k8s_cluster_vpc" {
17 cidr_block = "10.0.0.0/16"
18
19 tags = {
20 "Name" = "terraform-eks-k8s-node"
21 "kubernetes.io/cluster/${var.cluster-name}" = "shared"
22 }
23 }
24
25 resource "aws_subnet" "k8sNode" {
26 count = 2
27
28 availability_zone = data.aws_availability_zones.available.names[count.index]
29 cidr_block = "10.0.${count.index}.0/24"
30 vpc_id = aws_vpc.k8s_cluster_vpc.id
31
32 tags = {
33 "Name" = "terraform-eks-k8s-node"
34 "kubernetes.io/cluster/${var.cluster-name}" = "shared"
35 }
36 }
37
38 resource "aws_internet_gateway" "k8s_cluster_vpc" {
39 vpc_id = aws_vpc.k8s_cluster_vpc.id
40
41 tags = {
42 Name = "terraform-eks-k8s"
43 }
44 }
45
46 resource "aws_route_table" "k8s_route" {
47 vpc_id = aws_vpc.k8s_cluster_vpc.id
48
49 route {
50 cidr_block = "0.0.0.0/0"
51 gateway_id = aws_internet_gateway.k8s_cluster_vpc.id
52 }
53 }
54
55 resource "aws_route_table_association" "k8s_route" {
56 count = 2
57
58 subnet_id = aws_subnet.k8sNode[count.index].id
59 route_table_id = aws_route_table.k8s_route.id
60 }
61
62
63#IAM config
64
65resource "aws_iam_role" "k8s-node" {
66 name = "terraform-eks-k8s-cluster"
67
68 assume_role_policy = <<POLICY
69{
70 "Version": "2012-10-17",
71 "Statement": [
72 {
73 "Effect": "Allow",
74 "Principal": {
75 "Service": "eks.amazonaws.com"
76 },
77 "Action": "sts:AssumeRole"
78 }
79 ]
80}
81POLICY
82}
83
84resource "aws_iam_role_policy_attachment" "k8s-cluster-AmazonEKSClusterPolicy" {
85 policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
86 role = aws_iam_role.k8s-node.name
87}
88
89resource "aws_iam_role_policy_attachment" "k8s-cluster-AmazonEKSServicePolicy" {
90 policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy"
91 role = aws_iam_role.k8s-node.name
92}
93resource "aws_security_group" "k8s-cluster" {
94 name = "terraform-eks-k8s-cluster"
95 description = "Cluster communication with worker nodes"
96 vpc_id = aws_vpc.k8s_cluster_vpc.id
97
98 egress {
99 from_port = 0
100 to_port = 0
101 protocol = "-1"
102 cidr_blocks = ["0.0.0.0/0"]
103 }
104
105 tags = {
106 Name = "terraform-eks-k8s"
107 }
108}
109
110# OPTIONAL: Allow inbound traffic from your local workstation external IP
111# to the Kubernetes. You will need to replace A.B.C.D below with
112# your real IP. Services like icanhazip.com can help you find this.
113resource "aws_security_group_rule" "k8s-cluster-ingress-workstation-https" {
114 cidr_blocks = ["173.38.220.53/32"]
115 description = "Allow workstation to communicate with the cluster API Server"
116 from_port = 443
117 protocol = "tcp"
118 security_group_id = aws_security_group.k8s-cluster.id
119 to_port = 443
120 type = "ingress"
121}
122resource "aws_eks_cluster" "k8s" {
123 name = var.cluster-name
124 role_arn = aws_iam_role.k8s-node.arn
125
126 vpc_config {
127 security_group_ids = ["${aws_security_group.k8s-cluster.id}"]
128 subnet_ids = aws_subnet.k8sNode[*].id
129 }
130
131 depends_on = [
132 aws_iam_role_policy_attachment.k8s-cluster-AmazonEKSClusterPolicy,
133 aws_iam_role_policy_attachment.k8s-cluster-AmazonEKSServicePolicy,
134 ]
135}
136#resource "aws_iam_role" "k8s-cluster" {
137# name = "terraform-eks-k8s-cluster"
138#
139# assume_role_policy = <<POLICY
140#{
141# "Version": "2012-10-17",
142# "Statement": [
143# {
144# "Effect": "Allow",
145# "Principal": {
146# "Service": "eks.amazonaws.com"
147# },
148# "Action": "sts:AssumeRole"
149# }
150# ]
151#}
152#POLICY
153#
154#}
155
156resource "aws_security_group" "k8s-node" {
157 name = "terraform-eks-k8s-node"
158 description = "Security group for all nodes in the cluster"
159 vpc_id = aws_vpc.k8s_cluster_vpc.id
160
161 egress {
162 from_port = 0
163 to_port = 0
164 protocol = "-1"
165 cidr_blocks = ["0.0.0.0/0"]
166 }
167
168 tags = {
169 "Name" = "terraform-eks-k8s-node"
170 "kubernetes.io/cluster/${var.cluster-name}" = "owned"
171 }
172}
173
174resource "aws_security_group_rule" "k8s-node-ingress-self" {
175 description = "Allow node to communicate with each other"
176 from_port = 0
177 protocol = "-1"
178 security_group_id = aws_security_group.k8s-node.id
179 source_security_group_id = aws_security_group.k8s-node.id
180 to_port = 65535
181 type = "ingress"
182}
183
184resource "aws_security_group_rule" "k8s-node-ingress-cluster" {
185 description = "Allow worker Kubelets and pods to receive communication from the cluster control plane"
186 from_port = 1025
187 protocol = "tcp"
188 security_group_id = aws_security_group.k8s-node.id
189 source_security_group_id = aws_security_group.k8s-cluster.id
190 to_port = 65535
191 type = "ingress"
192}
193data "aws_ami" "eks-worker" {
194 filter {
195 name = "name"
196 values = ["amazon-eks-node-${aws_eks_cluster.k8s.version}-v*"]
197 }
198
199 most_recent = true
200 owners = ["602401143452"] # Amazon EKS AMI Account ID
201}
202data "aws_region" "current" {
203}
204
205locals {
206 k8s-node-userdata = <<USERDATA
207#!/bin/bash
208set -o xtrace
209/etc/eks/bootstrap.sh --apiserver-endpoint '${aws_eks_cluster.k8s.endpoint}' --b64-cluster-ca '${aws_eks_cluster. k8s.certificate_authority[0].data}' '${var.cluster-name}'
210USERDATA
211
212}
213
214resource "aws_launch_configuration" "k8s" {
215 associate_public_ip_address = true
216# iam_instance_profile = aws_iam_instance_profile.k8s-node.name
217 image_id = data.aws_ami.eks-worker.id
218 instance_type = "m4.large"
219 name_prefix = "terraform-eks-k8s"
220 security_groups = [aws_security_group.k8s-node.id]
221 user_data_base64 = base64encode(local.k8s-node-userdata)
222
223 lifecycle {
224 create_before_destroy = true
225 }
226}
227resource "aws_autoscaling_group" "k8s" {
228 desired_capacity = 2
229 launch_configuration = aws_launch_configuration.k8s.id
230 max_size = 2
231 min_size = 1
232 name = "terraform-eks-k8s"
233 vpc_zone_identifier = aws_subnet.k8sNode[*].id
234
235 tag {
236 key = "Name"
237 value = "terraform-eks-k8s"
238 propagate_at_launch = true
239 }
240
241 tag {
242 key = "kubernetes.io/cluster/${var.cluster-name}"
243 value = "owned"
244 propagate_at_launch = true
245 }
246}
247locals {
248 config_map_aws_auth = <<CONFIGMAPAWSAUTH
249
250
251apiVersion: v1
252kind: ConfigMap
253metadata:
254 name: aws-auth
255 namespace: kube-system
256data:
257 mapRoles: |
258 - rolearn: ${aws_iam_role.k8s-node.arn}
259 username: system:node:{{EC2PrivateDNSName}}
260 groups:
261 - system:bootstrappers
262 - system:nodes
263CONFIGMAPAWSAUTH
264
265}
266
267output "config_map_aws_auth" {
268 value = local.config_map_aws_auth
269}