· 6 years ago · Mar 24, 2020, 12:06 PM
1Refreshing Terraform state in-memory prior to plan...
2The refreshed state will be used to calculate this plan, but will not be
3persisted to local or remote state storage.
4
5module.p002-sb2-k8s-nlam0.data.template_file.metallb_config: Refreshing state...
6module.p002-sb2-k8s-nlam0.data.template_file.kubespray_all: Refreshing state...
7module.p002-sb2-k8s-nlam0.data.template_file.kubespray_k8s_cluster: Refreshing state...
8module.p002-sb2-k8s-nlam0.data.vsphere_network.network: Refreshing state...
9module.p002-sb2-k8s-nlam0.data.vsphere_datacenter.dc: Refreshing state...
10module.p002-sb2-k8s-nlam0.data.vsphere_folder.vm_folder: Refreshing state...
11module.p002-sb2-k8s-nlam0.data.vsphere_resource_pool.pool["cluster_a"]: Refreshing state...
12module.p002-sb2-k8s-nlam0.data.vsphere_resource_pool.pool["cluster_c"]: Refreshing state...
13module.p002-sb2-k8s-nlam0.data.vsphere_virtual_machine.nix_template: Refreshing state...
14module.p002-sb2-k8s-nlam0.data.vsphere_resource_pool.pool["cluster_b"]: Refreshing state...
15module.p002-sb2-k8s-nlam0.data.vsphere_datastore.datastore["cluster_b_vsan"]: Refreshing state...
16module.p002-sb2-k8s-nlam0.data.vsphere_compute_cluster.cluster["cluster_a"]: Refreshing state...
17module.p002-sb2-k8s-nlam0.data.vsphere_compute_cluster.cluster["cluster_c"]: Refreshing state...
18module.p002-sb2-k8s-nlam0.data.vsphere_compute_cluster.cluster["cluster_b"]: Refreshing state...
19module.p002-sb2-k8s-nlam0.data.vsphere_datastore.datastore["cluster_a_vsan"]: Refreshing state...
20module.p002-sb2-k8s-nlam0.data.vsphere_datastore.datastore["cluster_c_vsan"]: Refreshing state...
21
22------------------------------------------------------------------------
23
24An execution plan has been generated and is shown below.
25Resource actions are indicated with the following symbols:
26 + create
27 <= read (data resources)
28
29Terraform will perform the following actions:
30
31 # null_resource.p002_namespace will be created
32 + resource "null_resource" "p002_namespace" {
33 + id = (known after apply)
34 }
35
36 # null_resource.p002_quota_limits will be created
37 + resource "null_resource" "p002_quota_limits" {
38 + id = (known after apply)
39 }
40
41 # module.p002-sb2-k8s-nlam0.data.template_file.kubespray_hosts_master[0] will be read during apply
42 # (config refers to values not yet known)
43 <= data "template_file" "kubespray_hosts_master" {
44 + id = (known after apply)
45 + rendered = (known after apply)
46 + template = <<~EOT
47 ${hostname} ansible_host=${host_ip}
48 EOT
49 + vars = {
50 + "host_ip" = (known after apply)
51 + "hostname" = "p002xknm000.nix.tech.altenar.net"
52 }
53 }
54
55 # module.p002-sb2-k8s-nlam0.data.template_file.kubespray_hosts_master[1] will be read during apply
56 # (config refers to values not yet known)
57 <= data "template_file" "kubespray_hosts_master" {
58 + id = (known after apply)
59 + rendered = (known after apply)
60 + template = <<~EOT
61 ${hostname} ansible_host=${host_ip}
62 EOT
63 + vars = {
64 + "host_ip" = (known after apply)
65 + "hostname" = "p002xknm100.nix.tech.altenar.net"
66 }
67 }
68
69 # module.p002-sb2-k8s-nlam0.data.template_file.kubespray_hosts_master[2] will be read during apply
70 # (config refers to values not yet known)
71 <= data "template_file" "kubespray_hosts_master" {
72 + id = (known after apply)
73 + rendered = (known after apply)
74 + template = <<~EOT
75 ${hostname} ansible_host=${host_ip}
76 EOT
77 + vars = {
78 + "host_ip" = (known after apply)
79 + "hostname" = "p002xknm200.nix.tech.altenar.net"
80 }
81 }
82
83 # module.p002-sb2-k8s-nlam0.data.template_file.kubespray_hosts_master_list[0] will be read during apply
84 # (config refers to values not yet known)
85 <= data "template_file" "kubespray_hosts_master_list" {
86 + id = (known after apply)
87 + rendered = (known after apply)
88 + template = <<~EOT
89 ${hostname}
90 EOT
91 + vars = {
92 + "hostname" = "p002xknm000.nix.tech.altenar.net"
93 }
94 }
95
96 # module.p002-sb2-k8s-nlam0.data.template_file.kubespray_hosts_master_list[1] will be read during apply
97 # (config refers to values not yet known)
98 <= data "template_file" "kubespray_hosts_master_list" {
99 + id = (known after apply)
100 + rendered = (known after apply)
101 + template = <<~EOT
102 ${hostname}
103 EOT
104 + vars = {
105 + "hostname" = "p002xknm100.nix.tech.altenar.net"
106 }
107 }
108
109 # module.p002-sb2-k8s-nlam0.data.template_file.kubespray_hosts_master_list[2] will be read during apply
110 # (config refers to values not yet known)
111 <= data "template_file" "kubespray_hosts_master_list" {
112 + id = (known after apply)
113 + rendered = (known after apply)
114 + template = <<~EOT
115 ${hostname}
116 EOT
117 + vars = {
118 + "hostname" = "p002xknm200.nix.tech.altenar.net"
119 }
120 }
121
122 # module.p002-sb2-k8s-nlam0.data.template_file.kubespray_hosts_worker[0] will be read during apply
123 # (config refers to values not yet known)
124 <= data "template_file" "kubespray_hosts_worker" {
125 + id = (known after apply)
126 + rendered = (known after apply)
127 + template = <<~EOT
128 ${hostname} ansible_host=${host_ip}
129 EOT
130 + vars = {
131 + "host_ip" = (known after apply)
132 + "hostname" = "p002xknw000.nix.tech.altenar.net"
133 }
134 }
135
136 # module.p002-sb2-k8s-nlam0.data.template_file.kubespray_hosts_worker[1] will be read during apply
137 # (config refers to values not yet known)
138 <= data "template_file" "kubespray_hosts_worker" {
139 + id = (known after apply)
140 + rendered = (known after apply)
141 + template = <<~EOT
142 ${hostname} ansible_host=${host_ip}
143 EOT
144 + vars = {
145 + "host_ip" = (known after apply)
146 + "hostname" = "p002xknw001.nix.tech.altenar.net"
147 }
148 }
149
150 # module.p002-sb2-k8s-nlam0.data.template_file.kubespray_hosts_worker[2] will be read during apply
151 # (config refers to values not yet known)
152 <= data "template_file" "kubespray_hosts_worker" {
153 + id = (known after apply)
154 + rendered = (known after apply)
155 + template = <<~EOT
156 ${hostname} ansible_host=${host_ip}
157 EOT
158 + vars = {
159 + "host_ip" = (known after apply)
160 + "hostname" = "p002xknw100.nix.tech.altenar.net"
161 }
162 }
163
164 # module.p002-sb2-k8s-nlam0.data.template_file.kubespray_hosts_worker[3] will be read during apply
165 # (config refers to values not yet known)
166 <= data "template_file" "kubespray_hosts_worker" {
167 + id = (known after apply)
168 + rendered = (known after apply)
169 + template = <<~EOT
170 ${hostname} ansible_host=${host_ip}
171 EOT
172 + vars = {
173 + "host_ip" = (known after apply)
174 + "hostname" = "p002xknw101.nix.tech.altenar.net"
175 }
176 }
177
178 # module.p002-sb2-k8s-nlam0.data.template_file.kubespray_hosts_worker[4] will be read during apply
179 # (config refers to values not yet known)
180 <= data "template_file" "kubespray_hosts_worker" {
181 + id = (known after apply)
182 + rendered = (known after apply)
183 + template = <<~EOT
184 ${hostname} ansible_host=${host_ip}
185 EOT
186 + vars = {
187 + "host_ip" = (known after apply)
188 + "hostname" = "p002xknw200.nix.tech.altenar.net"
189 }
190 }
191
192 # module.p002-sb2-k8s-nlam0.data.template_file.kubespray_hosts_worker[5] will be read during apply
193 # (config refers to values not yet known)
194 <= data "template_file" "kubespray_hosts_worker" {
195 + id = (known after apply)
196 + rendered = (known after apply)
197 + template = <<~EOT
198 ${hostname} ansible_host=${host_ip}
199 EOT
200 + vars = {
201 + "host_ip" = (known after apply)
202 + "hostname" = "p002xknw201.nix.tech.altenar.net"
203 }
204 }
205
206 # module.p002-sb2-k8s-nlam0.data.template_file.kubespray_hosts_worker_list[0] will be read during apply
207 # (config refers to values not yet known)
208 <= data "template_file" "kubespray_hosts_worker_list" {
209 + id = (known after apply)
210 + rendered = (known after apply)
211 + template = <<~EOT
212 ${hostname}
213 EOT
214 + vars = {
215 + "hostname" = "p002xknw000.nix.tech.altenar.net"
216 }
217 }
218
219 # module.p002-sb2-k8s-nlam0.data.template_file.kubespray_hosts_worker_list[1] will be read during apply
220 # (config refers to values not yet known)
221 <= data "template_file" "kubespray_hosts_worker_list" {
222 + id = (known after apply)
223 + rendered = (known after apply)
224 + template = <<~EOT
225 ${hostname}
226 EOT
227 + vars = {
228 + "hostname" = "p002xknw001.nix.tech.altenar.net"
229 }
230 }
231
232 # module.p002-sb2-k8s-nlam0.data.template_file.kubespray_hosts_worker_list[2] will be read during apply
233 # (config refers to values not yet known)
234 <= data "template_file" "kubespray_hosts_worker_list" {
235 + id = (known after apply)
236 + rendered = (known after apply)
237 + template = <<~EOT
238 ${hostname}
239 EOT
240 + vars = {
241 + "hostname" = "p002xknw100.nix.tech.altenar.net"
242 }
243 }
244
245 # module.p002-sb2-k8s-nlam0.data.template_file.kubespray_hosts_worker_list[3] will be read during apply
246 # (config refers to values not yet known)
247 <= data "template_file" "kubespray_hosts_worker_list" {
248 + id = (known after apply)
249 + rendered = (known after apply)
250 + template = <<~EOT
251 ${hostname}
252 EOT
253 + vars = {
254 + "hostname" = "p002xknw101.nix.tech.altenar.net"
255 }
256 }
257
258 # module.p002-sb2-k8s-nlam0.data.template_file.kubespray_hosts_worker_list[4] will be read during apply
259 # (config refers to values not yet known)
260 <= data "template_file" "kubespray_hosts_worker_list" {
261 + id = (known after apply)
262 + rendered = (known after apply)
263 + template = <<~EOT
264 ${hostname}
265 EOT
266 + vars = {
267 + "hostname" = "p002xknw200.nix.tech.altenar.net"
268 }
269 }
270
271 # module.p002-sb2-k8s-nlam0.data.template_file.kubespray_hosts_worker_list[5] will be read during apply
272 # (config refers to values not yet known)
273 <= data "template_file" "kubespray_hosts_worker_list" {
274 + id = (known after apply)
275 + rendered = (known after apply)
276 + template = <<~EOT
277 ${hostname}
278 EOT
279 + vars = {
280 + "hostname" = "p002xknw201.nix.tech.altenar.net"
281 }
282 }
283
284 # module.p002-sb2-k8s-nlam0.local_file.kubespray_all will be created
285 + resource "local_file" "kubespray_all" {
286 + content = <<~EOT
287 ## Directory where etcd data stored
288 etcd_data_dir: /var/lib/etcd
289
290 ## Directory where the binaries will be installed
291 bin_dir: /usr/local/bin
292
293 ## The access_ip variable is used to define how other nodes should access
294 ## the node. This is used in flannel to allow other flannel nodes to see
295 ## this node for example. The access_ip is really useful AWS and Google
296 ## environments where the nodes are accessed remotely by the "public" ip,
297 ## but don't know about that address themselves.
298 #access_ip: 1.1.1.1
299
300
301 ## External LB example config
302 apiserver_loadbalancer_domain_name: "10.202.32.26"
303 loadbalancer_apiserver:
304 address: "10.202.32.26"
305 port: 6443
306
307 ## Internal loadbalancers for apiservers
308 loadbalancer_apiserver_localhost: true
309
310 ## Local loadbalancer should use this port instead, if defined.
311 ## Defaults to kube_apiserver_port (6443)
312 #nginx_kube_apiserver_port: 8443
313
314 ### OTHER OPTIONAL VARIABLES
315 ## For some things, kubelet needs to load kernel modules. For example, dynamic kernel services are needed
316 ## for mounting persistent volumes into containers. These may not be loaded by preinstall kubernetes
317 ## processes. For example, ceph and rbd backed volumes. Set to true to allow kubelet to load kernel
318 ## modules.
319 #kubelet_load_modules: false
320
321 ## Upstream dns servers used by dnsmasq
322 #upstream_dns_servers:
323 # - 8.8.8.8
324 # - 8.8.4.4
325
326 docker_dns_servers_strict: false
327
328 ## There are some changes specific to the cloud providers
329 ## for instance we need to encapsulate packets with some network plugins
330 ## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
331 ## When openstack is used make sure to source in the openstack credentials
332 ## like you would do when using nova-client before starting the playbook.
333 ## Note: The 'external' cloud provider is not supported.
334 ## TODO(riverzhang): https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager
335 cloud_provider: "vsphere"
336 vsphere_vcenter_ip: "10.202.4.250"
337 vsphere_vcenter_port: 443
338 vsphere_insecure: 1
339 vsphere_user: "terraform@vsphere.local"
340 vsphere_password: "$e33t%ZFFUWJQPw3B3Ra"
341 vsphere_datacenter: "NLAM0C0"
342 vsphere_datastore: "ds-nlam0c0_clusA-vsan"
343 vsphere_working_dir: "P002"
344 vsphere_scsi_controller_type: "pvscsi"
345 vsphere_resource_pool: ""
346
347 ## kubeadm deployment mode
348 kubeadm_enabled: true
349
350 # Skip alert information
351 skip_non_kubeadm_warning: false
352
353 ## Set these proxy values in order to update package manager and docker daemon to use proxies
354 #http_proxy: ""
355 #https_proxy: ""
356
357 ## Refer to roles/kubespray-defaults/defaults/main.yml before modifying no_proxy
358 #no_proxy: ""
359
360 ## Some problems may occur when downloading files over https proxy due to ansible bug
361 ## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable
362 ## SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
363 #download_validate_certs: False
364
365 ## If you need exclude all cluster nodes from proxy and other resources, add other resources here.
366 #additional_no_proxy: ""
367
368 ## Certificate Management
369 ## This setting determines whether certs are generated via scripts.
370 ## Chose 'none' if you provide your own certificates.
371 ## Option is "script", "none"
372 ## note: vault is removed
373 #cert_management: script
374
375 ## Set to true to allow pre-checks to fail and continue deployment
376 #ignore_assert_errors: false
377
378 ## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
379 #kube_read_only_port: 10255
380
381 ## Set true to download and cache container
382 #download_container: true
383
384 ## Deploy container engine
385 # Set false if you want to deploy container engine manually.
386 #deploy_container_engine: true
387
388 ## Set Pypi repo and cert accordingly
389 #pyrepo_index: https://pypi.example.com/simple
390 #pyrepo_cert: /etc/ssl/certs/ca-certificates.crt
391
392 # Kubernetes dashboard
393 # RBAC required. see docs/getting-started.md for access details.
394 dashboard_enabled: true
395
396 # Helm deployment
397 helm_enabled: true
398
399 # Metrics Server deployment
400 metrics_server_enabled: true
401 metrics_server_kubelet_insecure_tls: true
402 metrics_server_metric_resolution: 60s
403 metrics_server_kubelet_preferred_address_types: "InternalIP"
404
405 EOT
406 + directory_permission = "0777"
407 + file_permission = "0777"
408 + filename = "config/group_vars/all.yml"
409 + id = (known after apply)
410 }
411
412 # module.p002-sb2-k8s-nlam0.local_file.kubespray_hosts will be created
413 + resource "local_file" "kubespray_hosts" {
414 + content = (known after apply)
415 + directory_permission = "0777"
416 + file_permission = "0777"
417 + filename = "config/hosts.ini"
418 + id = (known after apply)
419 }
420
421 # module.p002-sb2-k8s-nlam0.local_file.kubespray_k8s_cluster will be created
422 + resource "local_file" "kubespray_k8s_cluster" {
423 + content = <<~EOT
424 # Kubernetes configuration dirs and system namespace.
425 # Those are where all the additional config stuff goes
426 # the kubernetes normally puts in /srv/kubernetes.
427 # This puts them in a sane location and namespace.
428 # Editing those values will almost surely break something.
429 kube_config_dir: /etc/kubernetes
430 kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
431 kube_manifest_dir: "{{ kube_config_dir }}/manifests"
432
433 # This is where all the cert scripts and certs will be located
434 kube_cert_dir: "{{ kube_config_dir }}/ssl"
435
436 # This is where all of the bearer tokens will be stored
437 kube_token_dir: "{{ kube_config_dir }}/tokens"
438
439 # This is where to save basic auth file
440 kube_users_dir: "{{ kube_config_dir }}/users"
441
442 kube_api_anonymous_auth: true
443
444 ## Change this to use another Kubernetes version, e.g. a current beta release
445 kube_version: 1.14.16
446
447 # kubernetes image repo define
448 kube_image_repo: "gcr.io/google-containers"
449
450 # Where the binaries will be downloaded.
451 # Note: ensure that you've enough disk space (about 1G)
452 local_release_dir: "/tmp/releases"
453 # Random shifts for retrying failed ops like pushing/downloading
454 retry_stagger: 5
455
456 # This is the group that the cert creation scripts chgrp the
457 # cert files to. Not really changeable...
458 kube_cert_group: kube-cert
459
460 # Cluster Loglevel configuration
461 kube_log_level: 2
462
463 # Directory where credentials will be stored
464 credentials_dir: "{{ inventory_dir }}/credentials"
465
466 # Users to create for basic auth in Kubernetes API via HTTP
467 # Optionally add groups for user
468 kube_api_pwd: "{{ lookup('password', credentials_dir + '/kube_user.creds length=15 chars=ascii_letters,digits') }}"
469 kube_users:
470 kube:
471 pass: "{{kube_api_pwd}}"
472 role: admin
473 groups:
474 - system:masters
475
476 ## It is possible to activate / deactivate selected authentication methods (basic auth, static token auth)
477 #kube_oidc_auth: false
478 #kube_basic_auth: false
479 #kube_token_auth: false
480
481
482 ## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/
483 ## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...)
484
485 # kube_oidc_url: https:// ...
486 # kube_oidc_client_id: kubernetes
487 ## Optional settings for OIDC
488 # kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem"
489 # kube_oidc_username_claim: sub
490 # kube_oidc_username_prefix: oidc:
491 # kube_oidc_groups_claim: groups
492 # kube_oidc_groups_prefix: oidc:
493
494
495 # Choose network plugin (cilium, calico, contiv, weave or flannel)
496 # Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
497 kube_network_plugin: calico
498
499 # Kubernetes internal network for services, unused block of space.
500 kube_service_addresses: 10.206.0.0/22
501
502 # internal network. When used, it will assign IP
503 # addresses from this range to individual pods.
504 # This network must be unused in your network infrastructure!
505 kube_pods_subnet: 10.206.32.0/19
506
507 # internal network node size allocation (optional). This is the size allocated
508 # to each node on your network. With these defaults you should have
509 # room for 4096 nodes with 254 pods per node.
510 kube_network_node_prefix: 24
511
512 # The port the API Server will be listening on.
513 kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
514 kube_apiserver_port: 6443 # (https)
515 #kube_apiserver_insecure_port: 8080 # (http)
516 # Set to 0 to disable insecure port - Requires RBAC in authorization_modes and kube_api_anonymous_auth: true
517 kube_apiserver_insecure_port: 0 # (disabled)
518
519 # Kube-proxy proxyMode configuration.
520 # Can be ipvs, iptables
521 kube_proxy_mode: ipvs
522
523 # A string slice of values which specify the addresses to use for NodePorts.
524 # Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32).
525 # The default empty string slice ([]) means to use all local addresses.
526 # kube_proxy_nodeport_addresses_cidr is retained for legacy config
527 kube_proxy_nodeport_addresses: >-
528 {%- if kube_proxy_nodeport_addresses_cidr is defined -%}
529 [{{ kube_proxy_nodeport_addresses_cidr }}]
530 {%- else -%}
531 []
532 {%- endif -%}
533
534 ## Encrypting Secret Data at Rest (experimental)
535 kube_encrypt_secret_data: false
536
537 # DNS configuration.
538 # Kubernetes cluster name, also will be used as DNS domain
539 cluster_name: cluster.local
540 # Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods
541 ndots: 2
542 # Can be dnsmasq_kubedns, kubedns, coredns, coredns_dual, manual or none
543 dns_mode: coredns
544 # Set manual server if using a custom cluster DNS server
545 #manual_dns_server: 10.x.x.x
546
547 # Can be docker_dns, host_resolvconf or none
548 resolvconf_mode: docker_dns
549 # Deploy netchecker app to verify DNS resolve as an HTTP service
550 deploy_netchecker: false
551 # Ip address of the kubernetes skydns service
552 skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
553 skydns_server_secondary: "{{ kube_service_addresses|ipaddr('net')|ipaddr(4)|ipaddr('address') }}"
554 dnsmasq_dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
555 dns_domain: "{{ cluster_name }}"
556
557 ## Container runtime
558 ## docker for docker and crio for cri-o.
559 container_manager: docker
560
561 ## Settings for containerized control plane (etcd/kubelet/secrets)
562 etcd_deployment_type: docker
563 kubelet_deployment_type: host
564 helm_deployment_type: host
565
566 # K8s image pull policy (imagePullPolicy)
567 k8s_image_pull_policy: Always
568
569 # audit log for kubernetes
570 kubernetes_audit: true
571
572 # dynamic kubelet configuration
573 dynamic_kubelet_configuration: false
574
575 # define kubelet config dir for dynamic kubelet
576 #kubelet_config_dir:
577 default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir"
578 dynamic_kubelet_configuration_dir: "{{ kubelet_config_dir | default(default_kubelet_config_dir) }}"
579
580 # pod security policy (RBAC must be enabled either by having 'RBAC' in authorization_modes or kubeadm enabled)
581 podsecuritypolicy_enabled: true
582
583 # Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts
584 kubeconfig_localhost: true
585 # Download kubectl onto the host that runs Ansible in {{ bin_dir }}
586 kubectl_localhost: true
587
588 # dnsmasq
589 # dnsmasq_upstream_dns_servers:
590 # - /resolvethiszone.with/10.0.4.250
591 # - 8.8.8.8
592
593 # Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created. (default true)
594 # kubelet_cgroups_per_qos: true
595
596 # A comma separated list of levels of node allocatable enforcement to be enforced by kubelet.
597 # Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "".
598 # kubelet_enforce_node_allocatable: pods
599
600 ## Supplementary addresses that can be added in kubernetes ssl keys.
601 ## That can be useful for example to setup a keepalived virtual IP
602 # supplementary_addresses_in_ssl_keys:
603
604 ## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler.
605 ## See https://github.com/kubernetes-sigs/kubespray/issues/2141
606 ## Set this variable to true to get rid of this issue
607 volume_cross_zone_attachment: false
608 # Add Persistent Volumes Storage Class for corresponding cloud provider ( OpenStack is only supported now )
609 persistent_volumes_enabled: false
610
611 ## Container Engine Acceleration
612 ## Enable container acceleration feature, for example use gpu acceleration in containers
613 # nvidia_accelerator_enabled: true
614 ## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset.
615 ## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2'
616 ## Array with nvida_gpu_nodes, leave empty or comment if you dont't want to install drivers.
617 ## Labels and taints won't be set to nodes if they are not in the array.
618 # nvidia_gpu_nodes:
619 # - kube-gpu-001
620 # nvidia_driver_version: "384.111"
621 ## flavor can be tesla or gtx
622 # nvidia_gpu_flavor: gtx
623
624 # DOPS-1185
625 # https://github.com/zalando-incubator/kubernetes-on-aws/issues/1026
626 # Disable CPU CFQ quota to avoid CPU throttling in Guaranteed QoS class
627 # Enable 'static' CPU manager policy to bind containers to CPU cores
628
629 kubelet_node_custom_flags:
630 - "--cpu-cfs-quota=false"
631 - "--cpu-manager-policy=static"
632
633 # DOPS-1172
634 # Enable PodPreset admission plugin
635
636 kube_apiserver_enable_admission_plugins:
637 - "PodPreset"
638
639 kube_api_runtime_config:
640 - "settings.k8s.io/v1alpha1=true"
641 EOT
642 + directory_permission = "0777"
643 + file_permission = "0777"
644 + filename = "config/group_vars/k8s-cluster.yml"
645 + id = (known after apply)
646 }
647
648 # module.p002-sb2-k8s-nlam0.local_file.metallb_config will be created
649 + resource "local_file" "metallb_config" {
650 + content = <<~EOT
651 apiVersion: v1
652 kind: ConfigMap
653 metadata:
654 namespace: kube-system
655 name: metallb-config
656 data:
657 config: |
658 address-pools:
659 - name: default
660 protocol: layer2
661 addresses:
662 - 10.202.40.101-10.202.40.140
663 EOT
664 + directory_permission = "0777"
665 + file_permission = "0777"
666 + filename = "yaml/metallb_config.yaml"
667 + id = (known after apply)
668 }
669
670 # module.p002-sb2-k8s-nlam0.null_resource.config_permission will be created
671 + resource "null_resource" "config_permission" {
672 + id = (known after apply)
673 }
674
675 # module.p002-sb2-k8s-nlam0.null_resource.gcr will be created
676 + resource "null_resource" "gcr" {
677 + id = (known after apply)
678 }
679
680 # module.p002-sb2-k8s-nlam0.null_resource.helm-tiller will be created
681 + resource "null_resource" "helm-tiller" {
682 + id = (known after apply)
683 }
684
685 # module.p002-sb2-k8s-nlam0.null_resource.kubespray_create[0] will be created
686 + resource "null_resource" "kubespray_create" {
687 + id = (known after apply)
688 }
689
690 # module.p002-sb2-k8s-nlam0.null_resource.kubespray_download will be created
691 + resource "null_resource" "kubespray_download" {
692 + id = (known after apply)
693 }
694
695 # module.p002-sb2-k8s-nlam0.null_resource.metallb will be created
696 + resource "null_resource" "metallb" {
697 + id = (known after apply)
698 }
699
700 # module.p002-sb2-k8s-nlam0.null_resource.metallb_config will be created
701 + resource "null_resource" "metallb_config" {
702 + id = (known after apply)
703 }
704
705 # module.p002-sb2-k8s-nlam0.null_resource.voyager will be created
706 + resource "null_resource" "voyager" {
707 + id = (known after apply)
708 }
709
710 # module.p002-sb2-k8s-nlam0.null_resource.voyager-ready will be created
711 + resource "null_resource" "voyager-ready" {
712 + id = (known after apply)
713 }
714
715 # module.p002-sb2-k8s-nlam0.null_resource.voyager_psp will be created
716 + resource "null_resource" "voyager_psp" {
717 + id = (known after apply)
718 }
719
720 # module.p002-sb2-k8s-nlam0.vsphere_compute_cluster_vm_anti_affinity_rule.vm_master_anti_affinity_rule_cluster_a will be created
721 + resource "vsphere_compute_cluster_vm_anti_affinity_rule" "vm_master_anti_affinity_rule_cluster_a" {
722 + compute_cluster_id = "domain-c891"
723 + enabled = true
724 + id = (known after apply)
725 + name = "p002_k8s_master_anti_affinity_rule_cluster_a"
726 + virtual_machine_ids = (known after apply)
727 }
728
729 # module.p002-sb2-k8s-nlam0.vsphere_compute_cluster_vm_anti_affinity_rule.vm_master_anti_affinity_rule_cluster_b will be created
730 + resource "vsphere_compute_cluster_vm_anti_affinity_rule" "vm_master_anti_affinity_rule_cluster_b" {
731 + compute_cluster_id = "domain-c950"
732 + enabled = true
733 + id = (known after apply)
734 + name = "p002_k8s_master_anti_affinity_rule_cluster_b"
735 + virtual_machine_ids = (known after apply)
736 }
737
738 # module.p002-sb2-k8s-nlam0.vsphere_compute_cluster_vm_anti_affinity_rule.vm_master_anti_affinity_rule_cluster_c will be created
739 + resource "vsphere_compute_cluster_vm_anti_affinity_rule" "vm_master_anti_affinity_rule_cluster_c" {
740 + compute_cluster_id = "domain-c1033"
741 + enabled = true
742 + id = (known after apply)
743 + name = "p002_k8s_master_anti_affinity_rule_cluster_c"
744 + virtual_machine_ids = (known after apply)
745 }
746
747 # module.p002-sb2-k8s-nlam0.vsphere_compute_cluster_vm_anti_affinity_rule.vm_worker_anti_affinity_rule_cluster_a will be created
748 + resource "vsphere_compute_cluster_vm_anti_affinity_rule" "vm_worker_anti_affinity_rule_cluster_a" {
749 + compute_cluster_id = "domain-c891"
750 + enabled = true
751 + id = (known after apply)
752 + name = "p002_k8s_worker_anti_affinity_rule_cluster_a"
753 + virtual_machine_ids = (known after apply)
754 }
755
756 # module.p002-sb2-k8s-nlam0.vsphere_compute_cluster_vm_anti_affinity_rule.vm_worker_anti_affinity_rule_cluster_b will be created
757 + resource "vsphere_compute_cluster_vm_anti_affinity_rule" "vm_worker_anti_affinity_rule_cluster_b" {
758 + compute_cluster_id = "domain-c950"
759 + enabled = true
760 + id = (known after apply)
761 + name = "p002_k8s_worker_anti_affinity_rule_cluster_b"
762 + virtual_machine_ids = (known after apply)
763 }
764
765 # module.p002-sb2-k8s-nlam0.vsphere_compute_cluster_vm_anti_affinity_rule.vm_worker_anti_affinity_rule_cluster_c will be created
766 + resource "vsphere_compute_cluster_vm_anti_affinity_rule" "vm_worker_anti_affinity_rule_cluster_c" {
767 + compute_cluster_id = "domain-c1033"
768 + enabled = true
769 + id = (known after apply)
770 + name = "p002_k8s_worker_anti_affinity_rule_cluster_c"
771 + virtual_machine_ids = (known after apply)
772 }
773
774 # module.p002-sb2-k8s-nlam0.vsphere_virtual_machine.vm_master_a[0] will be created
775 + resource "vsphere_virtual_machine" "vm_master_a" {
776 + boot_retry_delay = 10000
777 + change_version = (known after apply)
778 + cpu_limit = -1
779 + cpu_share_count = (known after apply)
780 + cpu_share_level = "normal"
781 + datastore_id = "datastore-943"
782 + default_ip_address = (known after apply)
783 + enable_disk_uuid = true
784 + ept_rvi_mode = "automatic"
785 + firmware = "bios"
786 + folder = "P002"
787 + force_power_off = true
788 + guest_id = "rhel7_64Guest"
789 + guest_ip_addresses = (known after apply)
790 + host_system_id = (known after apply)
791 + hv_mode = "hvAuto"
792 + id = (known after apply)
793 + imported = (known after apply)
794 + latency_sensitivity = "normal"
795 + memory = 8192
796 + memory_limit = -1
797 + memory_share_count = (known after apply)
798 + memory_share_level = "normal"
799 + migrate_wait_timeout = 30
800 + moid = (known after apply)
801 + name = "p002xknm000.nix.tech.altenar.net"
802 + num_cores_per_socket = 2
803 + num_cpus = 4
804 + reboot_required = (known after apply)
805 + resource_pool_id = "resgroup-892"
806 + run_tools_scripts_after_power_on = true
807 + run_tools_scripts_after_resume = true
808 + run_tools_scripts_before_guest_shutdown = true
809 + run_tools_scripts_before_guest_standby = true
810 + scsi_bus_sharing = "noSharing"
811 + scsi_controller_count = 1
812 + scsi_type = "pvscsi"
813 + shutdown_wait_timeout = 3
814 + swap_placement_policy = "inherit"
815 + uuid = (known after apply)
816 + vapp_transport = (known after apply)
817 + vmware_tools_status = (known after apply)
818 + vmx_path = (known after apply)
819 + wait_for_guest_ip_timeout = 0
820 + wait_for_guest_net_routable = true
821 + wait_for_guest_net_timeout = 5
822
823 + clone {
824 + template_uuid = "4206e6d2-d35a-6cea-bd14-d70a02e54f5e"
825 + timeout = 120
826
827 + customize {
828 + dns_server_list = [
829 + "10.202.8.160",
830 + "10.202.8.161",
831 ]
832 + ipv4_gateway = "10.202.40.254"
833 + timeout = 40
834
835 + linux_options {
836 + domain = "nix.tech.altenar.net"
837 + host_name = "p002xknm000"
838 + hw_clock_utc = true
839 }
840
841 + network_interface {
842 + ipv4_address = "10.202.40.10"
843 + ipv4_netmask = 24
844 }
845 }
846 }
847
848 + disk {
849 + attach = false
850 + datastore_id = "<computed>"
851 + device_address = (known after apply)
852 + disk_mode = "persistent"
853 + disk_sharing = "sharingNone"
854 + eagerly_scrub = false
855 + io_limit = -1
856 + io_reservation = 0
857 + io_share_count = 0
858 + io_share_level = "normal"
859 + keep_on_remove = false
860 + key = 0
861 + label = "disk0"
862 + path = (known after apply)
863 + size = 200
864 + thin_provisioned = true
865 + unit_number = 0
866 + uuid = (known after apply)
867 + write_through = false
868 }
869
870 + network_interface {
871 + adapter_type = "vmxnet3"
872 + bandwidth_limit = -1
873 + bandwidth_reservation = 0
874 + bandwidth_share_count = (known after apply)
875 + bandwidth_share_level = "normal"
876 + device_address = (known after apply)
877 + key = (known after apply)
878 + mac_address = (known after apply)
879 + network_id = "dvportgroup-1663"
880 }
881 }
882
883 # module.p002-sb2-k8s-nlam0.vsphere_virtual_machine.vm_master_b[0] will be created
884 + resource "vsphere_virtual_machine" "vm_master_b" {
885 + boot_retry_delay = 10000
886 + change_version = (known after apply)
887 + cpu_limit = -1
888 + cpu_share_count = (known after apply)
889 + cpu_share_level = "normal"
890 + datastore_id = "datastore-968"
891 + default_ip_address = (known after apply)
892 + enable_disk_uuid = true
893 + ept_rvi_mode = "automatic"
894 + firmware = "bios"
895 + folder = "P002"
896 + force_power_off = true
897 + guest_id = "rhel7_64Guest"
898 + guest_ip_addresses = (known after apply)
899 + host_system_id = (known after apply)
900 + hv_mode = "hvAuto"
901 + id = (known after apply)
902 + imported = (known after apply)
903 + latency_sensitivity = "normal"
904 + memory = 8192
905 + memory_limit = -1
906 + memory_share_count = (known after apply)
907 + memory_share_level = "normal"
908 + migrate_wait_timeout = 30
909 + moid = (known after apply)
910 + name = "p002xknm100.nix.tech.altenar.net"
911 + num_cores_per_socket = 2
912 + num_cpus = 4
913 + reboot_required = (known after apply)
914 + resource_pool_id = "resgroup-951"
915 + run_tools_scripts_after_power_on = true
916 + run_tools_scripts_after_resume = true
917 + run_tools_scripts_before_guest_shutdown = true
918 + run_tools_scripts_before_guest_standby = true
919 + scsi_bus_sharing = "noSharing"
920 + scsi_controller_count = 1
921 + scsi_type = "pvscsi"
922 + shutdown_wait_timeout = 3
923 + swap_placement_policy = "inherit"
924 + uuid = (known after apply)
925 + vapp_transport = (known after apply)
926 + vmware_tools_status = (known after apply)
927 + vmx_path = (known after apply)
928 + wait_for_guest_ip_timeout = 0
929 + wait_for_guest_net_routable = true
930 + wait_for_guest_net_timeout = 5
931
932 + clone {
933 + template_uuid = "4206e6d2-d35a-6cea-bd14-d70a02e54f5e"
934 + timeout = 120
935
936 + customize {
937 + dns_server_list = [
938 + "10.202.8.160",
939 + "10.202.8.161",
940 ]
941 + ipv4_gateway = "10.202.40.254"
942 + timeout = 40
943
944 + linux_options {
945 + domain = "nix.tech.altenar.net"
946 + host_name = "p002xknm100"
947 + hw_clock_utc = true
948 }
949
950 + network_interface {
951 + ipv4_address = "10.202.40.11"
952 + ipv4_netmask = 24
953 }
954 }
955 }
956
957 + disk {
958 + attach = false
959 + datastore_id = "<computed>"
960 + device_address = (known after apply)
961 + disk_mode = "persistent"
962 + disk_sharing = "sharingNone"
963 + eagerly_scrub = false
964 + io_limit = -1
965 + io_reservation = 0
966 + io_share_count = 0
967 + io_share_level = "normal"
968 + keep_on_remove = false
969 + key = 0
970 + label = "disk0"
971 + path = (known after apply)
972 + size = 200
973 + thin_provisioned = true
974 + unit_number = 0
975 + uuid = (known after apply)
976 + write_through = false
977 }
978
979 + network_interface {
980 + adapter_type = "vmxnet3"
981 + bandwidth_limit = -1
982 + bandwidth_reservation = 0
983 + bandwidth_share_count = (known after apply)
984 + bandwidth_share_level = "normal"
985 + device_address = (known after apply)
986 + key = (known after apply)
987 + mac_address = (known after apply)
988 + network_id = "dvportgroup-1663"
989 }
990 }
991
992 # module.p002-sb2-k8s-nlam0.vsphere_virtual_machine.vm_master_c[0] will be created
993 + resource "vsphere_virtual_machine" "vm_master_c" {
994 + boot_retry_delay = 10000
995 + change_version = (known after apply)
996 + cpu_limit = -1
997 + cpu_share_count = (known after apply)
998 + cpu_share_level = "normal"
999 + datastore_id = "datastore-1035"
1000 + default_ip_address = (known after apply)
1001 + enable_disk_uuid = true
1002 + ept_rvi_mode = "automatic"
1003 + firmware = "bios"
1004 + folder = "P002"
1005 + force_power_off = true
1006 + guest_id = "rhel7_64Guest"
1007 + guest_ip_addresses = (known after apply)
1008 + host_system_id = (known after apply)
1009 + hv_mode = "hvAuto"
1010 + id = (known after apply)
1011 + imported = (known after apply)
1012 + latency_sensitivity = "normal"
1013 + memory = 8192
1014 + memory_limit = -1
1015 + memory_share_count = (known after apply)
1016 + memory_share_level = "normal"
1017 + migrate_wait_timeout = 30
1018 + moid = (known after apply)
1019 + name = "p002xknm200.nix.tech.altenar.net"
1020 + num_cores_per_socket = 2
1021 + num_cpus = 4
1022 + reboot_required = (known after apply)
1023 + resource_pool_id = "resgroup-1034"
1024 + run_tools_scripts_after_power_on = true
1025 + run_tools_scripts_after_resume = true
1026 + run_tools_scripts_before_guest_shutdown = true
1027 + run_tools_scripts_before_guest_standby = true
1028 + scsi_bus_sharing = "noSharing"
1029 + scsi_controller_count = 1
1030 + scsi_type = "pvscsi"
1031 + shutdown_wait_timeout = 3
1032 + swap_placement_policy = "inherit"
1033 + uuid = (known after apply)
1034 + vapp_transport = (known after apply)
1035 + vmware_tools_status = (known after apply)
1036 + vmx_path = (known after apply)
1037 + wait_for_guest_ip_timeout = 0
1038 + wait_for_guest_net_routable = true
1039 + wait_for_guest_net_timeout = 5
1040
1041 + clone {
1042 + template_uuid = "4206e6d2-d35a-6cea-bd14-d70a02e54f5e"
1043 + timeout = 120
1044
1045 + customize {
1046 + dns_server_list = [
1047 + "10.202.8.160",
1048 + "10.202.8.161",
1049 ]
1050 + ipv4_gateway = "10.202.40.254"
1051 + timeout = 40
1052
1053 + linux_options {
1054 + domain = "nix.tech.altenar.net"
1055 + host_name = "p002xknm200"
1056 + hw_clock_utc = true
1057 }
1058
1059 + network_interface {
1060 + ipv4_address = "10.202.40.12"
1061 + ipv4_netmask = 24
1062 }
1063 }
1064 }
1065
1066 + disk {
1067 + attach = false
1068 + datastore_id = "<computed>"
1069 + device_address = (known after apply)
1070 + disk_mode = "persistent"
1071 + disk_sharing = "sharingNone"
1072 + eagerly_scrub = false
1073 + io_limit = -1
1074 + io_reservation = 0
1075 + io_share_count = 0
1076 + io_share_level = "normal"
1077 + keep_on_remove = false
1078 + key = 0
1079 + label = "disk0"
1080 + path = (known after apply)
1081 + size = 200
1082 + thin_provisioned = true
1083 + unit_number = 0
1084 + uuid = (known after apply)
1085 + write_through = false
1086 }
1087
1088 + network_interface {
1089 + adapter_type = "vmxnet3"
1090 + bandwidth_limit = -1
1091 + bandwidth_reservation = 0
1092 + bandwidth_share_count = (known after apply)
1093 + bandwidth_share_level = "normal"
1094 + device_address = (known after apply)
1095 + key = (known after apply)
1096 + mac_address = (known after apply)
1097 + network_id = "dvportgroup-1663"
1098 }
1099 }
1100
1101 # module.p002-sb2-k8s-nlam0.vsphere_virtual_machine.vm_worker_a[0] will be created
1102 + resource "vsphere_virtual_machine" "vm_worker_a" {
1103 + boot_retry_delay = 10000
1104 + change_version = (known after apply)
1105 + cpu_limit = -1
1106 + cpu_share_count = (known after apply)
1107 + cpu_share_level = "normal"
1108 + datastore_id = "datastore-943"
1109 + default_ip_address = (known after apply)
1110 + enable_disk_uuid = true
1111 + ept_rvi_mode = "automatic"
1112 + firmware = "bios"
1113 + folder = "P002"
1114 + force_power_off = true
1115 + guest_id = "rhel7_64Guest"
1116 + guest_ip_addresses = (known after apply)
1117 + host_system_id = (known after apply)
1118 + hv_mode = "hvAuto"
1119 + id = (known after apply)
1120 + imported = (known after apply)
1121 + latency_sensitivity = "normal"
1122 + memory = 16384
1123 + memory_limit = -1
1124 + memory_share_count = (known after apply)
1125 + memory_share_level = "normal"
1126 + migrate_wait_timeout = 30
1127 + moid = (known after apply)
1128 + name = "p002xknw000.nix.tech.altenar.net"
1129 + num_cores_per_socket = 2
1130 + num_cpus = 8
1131 + reboot_required = (known after apply)
1132 + resource_pool_id = "resgroup-892"
1133 + run_tools_scripts_after_power_on = true
1134 + run_tools_scripts_after_resume = true
1135 + run_tools_scripts_before_guest_shutdown = true
1136 + run_tools_scripts_before_guest_standby = true
1137 + scsi_bus_sharing = "noSharing"
1138 + scsi_controller_count = 1
1139 + scsi_type = "pvscsi"
1140 + shutdown_wait_timeout = 3
1141 + swap_placement_policy = "inherit"
1142 + uuid = (known after apply)
1143 + vapp_transport = (known after apply)
1144 + vmware_tools_status = (known after apply)
1145 + vmx_path = (known after apply)
1146 + wait_for_guest_ip_timeout = 0
1147 + wait_for_guest_net_routable = true
1148 + wait_for_guest_net_timeout = 5
1149
1150 + clone {
1151 + template_uuid = "4206e6d2-d35a-6cea-bd14-d70a02e54f5e"
1152 + timeout = 120
1153
1154 + customize {
1155 + dns_server_list = [
1156 + "10.202.8.160",
1157 + "10.202.8.161",
1158 ]
1159 + ipv4_gateway = "10.202.40.254"
1160 + timeout = 40
1161
1162 + linux_options {
1163 + domain = "nix.tech.altenar.net"
1164 + host_name = "p002xknw000"
1165 + hw_clock_utc = true
1166 }
1167
1168 + network_interface {
1169 + ipv4_address = "10.202.40.20"
1170 + ipv4_netmask = 24
1171 }
1172 }
1173 }
1174
1175 + disk {
1176 + attach = false
1177 + datastore_id = "<computed>"
1178 + device_address = (known after apply)
1179 + disk_mode = "persistent"
1180 + disk_sharing = "sharingNone"
1181 + eagerly_scrub = false
1182 + io_limit = -1
1183 + io_reservation = 0
1184 + io_share_count = 0
1185 + io_share_level = "normal"
1186 + keep_on_remove = false
1187 + key = 0
1188 + label = "disk0"
1189 + path = (known after apply)
1190 + size = 200
1191 + thin_provisioned = true
1192 + unit_number = 0
1193 + uuid = (known after apply)
1194 + write_through = false
1195 }
1196
1197 + network_interface {
1198 + adapter_type = "vmxnet3"
1199 + bandwidth_limit = -1
1200 + bandwidth_reservation = 0
1201 + bandwidth_share_count = (known after apply)
1202 + bandwidth_share_level = "normal"
1203 + device_address = (known after apply)
1204 + key = (known after apply)
1205 + mac_address = (known after apply)
1206 + network_id = "dvportgroup-1663"
1207 }
1208 }
1209
1210 # module.p002-sb2-k8s-nlam0.vsphere_virtual_machine.vm_worker_a[1] will be created
1211 + resource "vsphere_virtual_machine" "vm_worker_a" {
1212 + boot_retry_delay = 10000
1213 + change_version = (known after apply)
1214 + cpu_limit = -1
1215 + cpu_share_count = (known after apply)
1216 + cpu_share_level = "normal"
1217 + datastore_id = "datastore-943"
1218 + default_ip_address = (known after apply)
1219 + enable_disk_uuid = true
1220 + ept_rvi_mode = "automatic"
1221 + firmware = "bios"
1222 + folder = "P002"
1223 + force_power_off = true
1224 + guest_id = "rhel7_64Guest"
1225 + guest_ip_addresses = (known after apply)
1226 + host_system_id = (known after apply)
1227 + hv_mode = "hvAuto"
1228 + id = (known after apply)
1229 + imported = (known after apply)
1230 + latency_sensitivity = "normal"
1231 + memory = 16384
1232 + memory_limit = -1
1233 + memory_share_count = (known after apply)
1234 + memory_share_level = "normal"
1235 + migrate_wait_timeout = 30
1236 + moid = (known after apply)
1237 + name = "p002xknw001.nix.tech.altenar.net"
1238 + num_cores_per_socket = 2
1239 + num_cpus = 8
1240 + reboot_required = (known after apply)
1241 + resource_pool_id = "resgroup-892"
1242 + run_tools_scripts_after_power_on = true
1243 + run_tools_scripts_after_resume = true
1244 + run_tools_scripts_before_guest_shutdown = true
1245 + run_tools_scripts_before_guest_standby = true
1246 + scsi_bus_sharing = "noSharing"
1247 + scsi_controller_count = 1
1248 + scsi_type = "pvscsi"
1249 + shutdown_wait_timeout = 3
1250 + swap_placement_policy = "inherit"
1251 + uuid = (known after apply)
1252 + vapp_transport = (known after apply)
1253 + vmware_tools_status = (known after apply)
1254 + vmx_path = (known after apply)
1255 + wait_for_guest_ip_timeout = 0
1256 + wait_for_guest_net_routable = true
1257 + wait_for_guest_net_timeout = 5
1258
1259 + clone {
1260 + template_uuid = "4206e6d2-d35a-6cea-bd14-d70a02e54f5e"
1261 + timeout = 120
1262
1263 + customize {
1264 + dns_server_list = [
1265 + "10.202.8.160",
1266 + "10.202.8.161",
1267 ]
1268 + ipv4_gateway = "10.202.40.254"
1269 + timeout = 40
1270
1271 + linux_options {
1272 + domain = "nix.tech.altenar.net"
1273 + host_name = "p002xknw001"
1274 + hw_clock_utc = true
1275 }
1276
1277 + network_interface {
1278 + ipv4_address = "10.202.40.23"
1279 + ipv4_netmask = 24
1280 }
1281 }
1282 }
1283
1284 + disk {
1285 + attach = false
1286 + datastore_id = "<computed>"
1287 + device_address = (known after apply)
1288 + disk_mode = "persistent"
1289 + disk_sharing = "sharingNone"
1290 + eagerly_scrub = false
1291 + io_limit = -1
1292 + io_reservation = 0
1293 + io_share_count = 0
1294 + io_share_level = "normal"
1295 + keep_on_remove = false
1296 + key = 0
1297 + label = "disk0"
1298 + path = (known after apply)
1299 + size = 200
1300 + thin_provisioned = true
1301 + unit_number = 0
1302 + uuid = (known after apply)
1303 + write_through = false
1304 }
1305
1306 + network_interface {
1307 + adapter_type = "vmxnet3"
1308 + bandwidth_limit = -1
1309 + bandwidth_reservation = 0
1310 + bandwidth_share_count = (known after apply)
1311 + bandwidth_share_level = "normal"
1312 + device_address = (known after apply)
1313 + key = (known after apply)
1314 + mac_address = (known after apply)
1315 + network_id = "dvportgroup-1663"
1316 }
1317 }
1318
1319 # module.p002-sb2-k8s-nlam0.vsphere_virtual_machine.vm_worker_b[0] will be created
1320 + resource "vsphere_virtual_machine" "vm_worker_b" {
1321 + boot_retry_delay = 10000
1322 + change_version = (known after apply)
1323 + cpu_limit = -1
1324 + cpu_share_count = (known after apply)
1325 + cpu_share_level = "normal"
1326 + datastore_id = "datastore-968"
1327 + default_ip_address = (known after apply)
1328 + enable_disk_uuid = true
1329 + ept_rvi_mode = "automatic"
1330 + firmware = "bios"
1331 + folder = "P002"
1332 + force_power_off = true
1333 + guest_id = "rhel7_64Guest"
1334 + guest_ip_addresses = (known after apply)
1335 + host_system_id = (known after apply)
1336 + hv_mode = "hvAuto"
1337 + id = (known after apply)
1338 + imported = (known after apply)
1339 + latency_sensitivity = "normal"
1340 + memory = 16384
1341 + memory_limit = -1
1342 + memory_share_count = (known after apply)
1343 + memory_share_level = "normal"
1344 + migrate_wait_timeout = 30
1345 + moid = (known after apply)
1346 + name = "p002xknw100.nix.tech.altenar.net"
1347 + num_cores_per_socket = 2
1348 + num_cpus = 8
1349 + reboot_required = (known after apply)
1350 + resource_pool_id = "resgroup-951"
1351 + run_tools_scripts_after_power_on = true
1352 + run_tools_scripts_after_resume = true
1353 + run_tools_scripts_before_guest_shutdown = true
1354 + run_tools_scripts_before_guest_standby = true
1355 + scsi_bus_sharing = "noSharing"
1356 + scsi_controller_count = 1
1357 + scsi_type = "pvscsi"
1358 + shutdown_wait_timeout = 3
1359 + swap_placement_policy = "inherit"
1360 + uuid = (known after apply)
1361 + vapp_transport = (known after apply)
1362 + vmware_tools_status = (known after apply)
1363 + vmx_path = (known after apply)
1364 + wait_for_guest_ip_timeout = 0
1365 + wait_for_guest_net_routable = true
1366 + wait_for_guest_net_timeout = 5
1367
1368 + clone {
1369 + template_uuid = "4206e6d2-d35a-6cea-bd14-d70a02e54f5e"
1370 + timeout = 120
1371
1372 + customize {
1373 + dns_server_list = [
1374 + "10.202.8.160",
1375 + "10.202.8.161",
1376 ]
1377 + ipv4_gateway = "10.202.40.254"
1378 + timeout = 40
1379
1380 + linux_options {
1381 + domain = "nix.tech.altenar.net"
1382 + host_name = "p002xknw100"
1383 + hw_clock_utc = true
1384 }
1385
1386 + network_interface {
1387 + ipv4_address = "10.202.40.21"
1388 + ipv4_netmask = 24
1389 }
1390 }
1391 }
1392
1393 + disk {
1394 + attach = false
1395 + datastore_id = "<computed>"
1396 + device_address = (known after apply)
1397 + disk_mode = "persistent"
1398 + disk_sharing = "sharingNone"
1399 + eagerly_scrub = false
1400 + io_limit = -1
1401 + io_reservation = 0
1402 + io_share_count = 0
1403 + io_share_level = "normal"
1404 + keep_on_remove = false
1405 + key = 0
1406 + label = "disk0"
1407 + path = (known after apply)
1408 + size = 200
1409 + thin_provisioned = true
1410 + unit_number = 0
1411 + uuid = (known after apply)
1412 + write_through = false
1413 }
1414
1415 + network_interface {
1416 + adapter_type = "vmxnet3"
1417 + bandwidth_limit = -1
1418 + bandwidth_reservation = 0
1419 + bandwidth_share_count = (known after apply)
1420 + bandwidth_share_level = "normal"
1421 + device_address = (known after apply)
1422 + key = (known after apply)
1423 + mac_address = (known after apply)
1424 + network_id = "dvportgroup-1663"
1425 }
1426 }
1427
1428 # module.p002-sb2-k8s-nlam0.vsphere_virtual_machine.vm_worker_b[1] will be created
1429 + resource "vsphere_virtual_machine" "vm_worker_b" {
1430 + boot_retry_delay = 10000
1431 + change_version = (known after apply)
1432 + cpu_limit = -1
1433 + cpu_share_count = (known after apply)
1434 + cpu_share_level = "normal"
1435 + datastore_id = "datastore-968"
1436 + default_ip_address = (known after apply)
1437 + enable_disk_uuid = true
1438 + ept_rvi_mode = "automatic"
1439 + firmware = "bios"
1440 + folder = "P002"
1441 + force_power_off = true
1442 + guest_id = "rhel7_64Guest"
1443 + guest_ip_addresses = (known after apply)
1444 + host_system_id = (known after apply)
1445 + hv_mode = "hvAuto"
1446 + id = (known after apply)
1447 + imported = (known after apply)
1448 + latency_sensitivity = "normal"
1449 + memory = 16384
1450 + memory_limit = -1
1451 + memory_share_count = (known after apply)
1452 + memory_share_level = "normal"
1453 + migrate_wait_timeout = 30
1454 + moid = (known after apply)
1455 + name = "p002xknw101.nix.tech.altenar.net"
1456 + num_cores_per_socket = 2
1457 + num_cpus = 8
1458 + reboot_required = (known after apply)
1459 + resource_pool_id = "resgroup-951"
1460 + run_tools_scripts_after_power_on = true
1461 + run_tools_scripts_after_resume = true
1462 + run_tools_scripts_before_guest_shutdown = true
1463 + run_tools_scripts_before_guest_standby = true
1464 + scsi_bus_sharing = "noSharing"
1465 + scsi_controller_count = 1
1466 + scsi_type = "pvscsi"
1467 + shutdown_wait_timeout = 3
1468 + swap_placement_policy = "inherit"
1469 + uuid = (known after apply)
1470 + vapp_transport = (known after apply)
1471 + vmware_tools_status = (known after apply)
1472 + vmx_path = (known after apply)
1473 + wait_for_guest_ip_timeout = 0
1474 + wait_for_guest_net_routable = true
1475 + wait_for_guest_net_timeout = 5
1476
1477 + clone {
1478 + template_uuid = "4206e6d2-d35a-6cea-bd14-d70a02e54f5e"
1479 + timeout = 120
1480
1481 + customize {
1482 + dns_server_list = [
1483 + "10.202.8.160",
1484 + "10.202.8.161",
1485 ]
1486 + ipv4_gateway = "10.202.40.254"
1487 + timeout = 40
1488
1489 + linux_options {
1490 + domain = "nix.tech.altenar.net"
1491 + host_name = "p002xknw101"
1492 + hw_clock_utc = true
1493 }
1494
1495 + network_interface {
1496 + ipv4_address = "10.202.40.24"
1497 + ipv4_netmask = 24
1498 }
1499 }
1500 }
1501
1502 + disk {
1503 + attach = false
1504 + datastore_id = "<computed>"
1505 + device_address = (known after apply)
1506 + disk_mode = "persistent"
1507 + disk_sharing = "sharingNone"
1508 + eagerly_scrub = false
1509 + io_limit = -1
1510 + io_reservation = 0
1511 + io_share_count = 0
1512 + io_share_level = "normal"
1513 + keep_on_remove = false
1514 + key = 0
1515 + label = "disk0"
1516 + path = (known after apply)
1517 + size = 200
1518 + thin_provisioned = true
1519 + unit_number = 0
1520 + uuid = (known after apply)
1521 + write_through = false
1522 }
1523
1524 + network_interface {
1525 + adapter_type = "vmxnet3"
1526 + bandwidth_limit = -1
1527 + bandwidth_reservation = 0
1528 + bandwidth_share_count = (known after apply)
1529 + bandwidth_share_level = "normal"
1530 + device_address = (known after apply)
1531 + key = (known after apply)
1532 + mac_address = (known after apply)
1533 + network_id = "dvportgroup-1663"
1534 }
1535 }
1536
1537 # module.p002-sb2-k8s-nlam0.vsphere_virtual_machine.vm_worker_c[0] will be created
1538 + resource "vsphere_virtual_machine" "vm_worker_c" {
1539 + boot_retry_delay = 10000
1540 + change_version = (known after apply)
1541 + cpu_limit = -1
1542 + cpu_share_count = (known after apply)
1543 + cpu_share_level = "normal"
1544 + datastore_id = "datastore-1035"
1545 + default_ip_address = (known after apply)
1546 + enable_disk_uuid = true
1547 + ept_rvi_mode = "automatic"
1548 + firmware = "bios"
1549 + folder = "P002"
1550 + force_power_off = true
1551 + guest_id = "rhel7_64Guest"
1552 + guest_ip_addresses = (known after apply)
1553 + host_system_id = (known after apply)
1554 + hv_mode = "hvAuto"
1555 + id = (known after apply)
1556 + imported = (known after apply)
1557 + latency_sensitivity = "normal"
1558 + memory = 16384
1559 + memory_limit = -1
1560 + memory_share_count = (known after apply)
1561 + memory_share_level = "normal"
1562 + migrate_wait_timeout = 30
1563 + moid = (known after apply)
1564 + name = "p002xknw200.nix.tech.altenar.net"
1565 + num_cores_per_socket = 2
1566 + num_cpus = 8
1567 + reboot_required = (known after apply)
1568 + resource_pool_id = "resgroup-1034"
1569 + run_tools_scripts_after_power_on = true
1570 + run_tools_scripts_after_resume = true
1571 + run_tools_scripts_before_guest_shutdown = true
1572 + run_tools_scripts_before_guest_standby = true
1573 + scsi_bus_sharing = "noSharing"
1574 + scsi_controller_count = 1
1575 + scsi_type = "pvscsi"
1576 + shutdown_wait_timeout = 3
1577 + swap_placement_policy = "inherit"
1578 + uuid = (known after apply)
1579 + vapp_transport = (known after apply)
1580 + vmware_tools_status = (known after apply)
1581 + vmx_path = (known after apply)
1582 + wait_for_guest_ip_timeout = 0
1583 + wait_for_guest_net_routable = true
1584 + wait_for_guest_net_timeout = 5
1585
1586 + clone {
1587 + template_uuid = "4206e6d2-d35a-6cea-bd14-d70a02e54f5e"
1588 + timeout = 120
1589
1590 + customize {
1591 + dns_server_list = [
1592 + "10.202.8.160",
1593 + "10.202.8.161",
1594 ]
1595 + ipv4_gateway = "10.202.40.254"
1596 + timeout = 40
1597
1598 + linux_options {
1599 + domain = "nix.tech.altenar.net"
1600 + host_name = "p002xknw200"
1601 + hw_clock_utc = true
1602 }
1603
1604 + network_interface {
1605 + ipv4_address = "10.202.40.22"
1606 + ipv4_netmask = 24
1607 }
1608 }
1609 }
1610
1611 + disk {
1612 + attach = false
1613 + datastore_id = "<computed>"
1614 + device_address = (known after apply)
1615 + disk_mode = "persistent"
1616 + disk_sharing = "sharingNone"
1617 + eagerly_scrub = false
1618 + io_limit = -1
1619 + io_reservation = 0
1620 + io_share_count = 0
1621 + io_share_level = "normal"
1622 + keep_on_remove = false
1623 + key = 0
1624 + label = "disk0"
1625 + path = (known after apply)
1626 + size = 200
1627 + thin_provisioned = true
1628 + unit_number = 0
1629 + uuid = (known after apply)
1630 + write_through = false
1631 }
1632
1633 + network_interface {
1634 + adapter_type = "vmxnet3"
1635 + bandwidth_limit = -1
1636 + bandwidth_reservation = 0
1637 + bandwidth_share_count = (known after apply)
1638 + bandwidth_share_level = "normal"
1639 + device_address = (known after apply)
1640 + key = (known after apply)
1641 + mac_address = (known after apply)
1642 + network_id = "dvportgroup-1663"
1643 }
1644 }
1645
1646 # module.p002-sb2-k8s-nlam0.vsphere_virtual_machine.vm_worker_c[1] will be created
1647 + resource "vsphere_virtual_machine" "vm_worker_c" {
1648 + boot_retry_delay = 10000
1649 + change_version = (known after apply)
1650 + cpu_limit = -1
1651 + cpu_share_count = (known after apply)
1652 + cpu_share_level = "normal"
1653 + datastore_id = "datastore-1035"
1654 + default_ip_address = (known after apply)
1655 + enable_disk_uuid = true
1656 + ept_rvi_mode = "automatic"
1657 + firmware = "bios"
1658 + folder = "P002"
1659 + force_power_off = true
1660 + guest_id = "rhel7_64Guest"
1661 + guest_ip_addresses = (known after apply)
1662 + host_system_id = (known after apply)
1663 + hv_mode = "hvAuto"
1664 + id = (known after apply)
1665 + imported = (known after apply)
1666 + latency_sensitivity = "normal"
1667 + memory = 16384
1668 + memory_limit = -1
1669 + memory_share_count = (known after apply)
1670 + memory_share_level = "normal"
1671 + migrate_wait_timeout = 30
1672 + moid = (known after apply)
1673 + name = "p002xknw201.nix.tech.altenar.net"
1674 + num_cores_per_socket = 2
1675 + num_cpus = 8
1676 + reboot_required = (known after apply)
1677 + resource_pool_id = "resgroup-1034"
1678 + run_tools_scripts_after_power_on = true
1679 + run_tools_scripts_after_resume = true
1680 + run_tools_scripts_before_guest_shutdown = true
1681 + run_tools_scripts_before_guest_standby = true
1682 + scsi_bus_sharing = "noSharing"
1683 + scsi_controller_count = 1
1684 + scsi_type = "pvscsi"
1685 + shutdown_wait_timeout = 3
1686 + swap_placement_policy = "inherit"
1687 + uuid = (known after apply)
1688 + vapp_transport = (known after apply)
1689 + vmware_tools_status = (known after apply)
1690 + vmx_path = (known after apply)
1691 + wait_for_guest_ip_timeout = 0
1692 + wait_for_guest_net_routable = true
1693 + wait_for_guest_net_timeout = 5
1694
1695 + clone {
1696 + template_uuid = "4206e6d2-d35a-6cea-bd14-d70a02e54f5e"
1697 + timeout = 120
1698
1699 + customize {
1700 + dns_server_list = [
1701 + "10.202.8.160",
1702 + "10.202.8.161",
1703 ]
1704 + ipv4_gateway = "10.202.40.254"
1705 + timeout = 40
1706
1707 + linux_options {
1708 + domain = "nix.tech.altenar.net"
1709 + host_name = "p002xknw201"
1710 + hw_clock_utc = true
1711 }
1712
1713 + network_interface {
1714 + ipv4_address = "10.202.40.25"
1715 + ipv4_netmask = 24
1716 }
1717 }
1718 }
1719
1720 + disk {
1721 + attach = false
1722 + datastore_id = "<computed>"
1723 + device_address = (known after apply)
1724 + disk_mode = "persistent"
1725 + disk_sharing = "sharingNone"
1726 + eagerly_scrub = false
1727 + io_limit = -1
1728 + io_reservation = 0
1729 + io_share_count = 0
1730 + io_share_level = "normal"
1731 + keep_on_remove = false
1732 + key = 0
1733 + label = "disk0"
1734 + path = (known after apply)
1735 + size = 200
1736 + thin_provisioned = true
1737 + unit_number = 0
1738 + uuid = (known after apply)
1739 + write_through = false
1740 }
1741
1742 + network_interface {
1743 + adapter_type = "vmxnet3"
1744 + bandwidth_limit = -1
1745 + bandwidth_reservation = 0
1746 + bandwidth_share_count = (known after apply)
1747 + bandwidth_share_level = "normal"
1748 + device_address = (known after apply)
1749 + key = (known after apply)
1750 + mac_address = (known after apply)
1751 + network_id = "dvportgroup-1663"
1752 }
1753 }
1754
1755Plan: 31 to add, 0 to change, 0 to destroy.
1756
1757------------------------------------------------------------------------
1758
1759Note: You didn't specify an "-out" parameter to save this plan, so Terraform
1760can't guarantee that exactly these actions will be performed if
1761"terraform apply" is subsequently run.