· 5 years ago · Jul 10, 2020, 09:30 AM
1---
2# Source: calico/templates/calico-config.yaml
3# This ConfigMap is used to configure a self-hosted Calico installation.
4kind: ConfigMap
5apiVersion: v1
6metadata:
7 name: calico-config
8 namespace: kube-system
9 labels:
10 pipeline-managed: kube-system
11data:
12 # You must set a non-zero value for Typha replicas below.
13 typha_service_name: "calico-typha"
14 # Configure the backend to use.
15 calico_backend: "bird"
16
17 # Configure the MTU to use
18 veth_mtu: "1440"
19
20 # The CNI network configuration to install on each node. The special
21 # values in this config will be automatically populated.
22 cni_network_config: |-
23 {
24 "name": "k8s-pod-network",
25 "cniVersion": "0.3.1",
26 "plugins": [
27 {
28 "type": "calico",
29 "log_level": "info",
30 "datastore_type": "kubernetes",
31 "nodename": "__KUBERNETES_NODE_NAME__",
32 "mtu": __CNI_MTU__,
33 "ipam": {
34 "type": "host-local",
35 "subnet": "usePodCidr"
36 },
37 "policy": {
38 "type": "k8s"
39 },
40 "kubernetes": {
41 "kubeconfig": "__KUBECONFIG_FILEPATH__"
42 }
43 },
44 {
45 "type": "portmap",
46 "snat": true,
47 "capabilities": {"portMappings": true}
48 },
49 {
50 "type": "bandwidth",
51 "capabilities": {"bandwidth": true}
52 }
53 ]
54 }
55
56---
57# Source: calico/templates/kdd-crds.yaml
58apiVersion: apiextensions.k8s.io/v1beta1
59kind: CustomResourceDefinition
60metadata:
61 name: felixconfigurations.crd.projectcalico.org
62 labels:
63 pipeline-managed: kube-system
64spec:
65 scope: Cluster
66 group: crd.projectcalico.org
67 version: v1
68 names:
69 kind: FelixConfiguration
70 plural: felixconfigurations
71 singular: felixconfiguration
72---
73
74apiVersion: apiextensions.k8s.io/v1beta1
75kind: CustomResourceDefinition
76metadata:
77 name: ipamblocks.crd.projectcalico.org
78 labels:
79 pipeline-managed: kube-system
80spec:
81 scope: Cluster
82 group: crd.projectcalico.org
83 version: v1
84 names:
85 kind: IPAMBlock
86 plural: ipamblocks
87 singular: ipamblock
88
89---
90
91apiVersion: apiextensions.k8s.io/v1beta1
92kind: CustomResourceDefinition
93metadata:
94 name: blockaffinities.crd.projectcalico.org
95 labels:
96 pipeline-managed: kube-system
97spec:
98 scope: Cluster
99 group: crd.projectcalico.org
100 version: v1
101 names:
102 kind: BlockAffinity
103 plural: blockaffinities
104 singular: blockaffinity
105
106---
107
108apiVersion: apiextensions.k8s.io/v1beta1
109kind: CustomResourceDefinition
110metadata:
111 name: ipamhandles.crd.projectcalico.org
112 labels:
113 pipeline-managed: kube-system
114spec:
115 scope: Cluster
116 group: crd.projectcalico.org
117 version: v1
118 names:
119 kind: IPAMHandle
120 plural: ipamhandles
121 singular: ipamhandle
122
123---
124
125apiVersion: apiextensions.k8s.io/v1beta1
126kind: CustomResourceDefinition
127metadata:
128 name: ipamconfigs.crd.projectcalico.org
129 labels:
130 pipeline-managed: kube-system
131spec:
132 scope: Cluster
133 group: crd.projectcalico.org
134 version: v1
135 names:
136 kind: IPAMConfig
137 plural: ipamconfigs
138 singular: ipamconfig
139
140---
141
142apiVersion: apiextensions.k8s.io/v1beta1
143kind: CustomResourceDefinition
144metadata:
145 name: bgppeers.crd.projectcalico.org
146 labels:
147 pipeline-managed: kube-system
148spec:
149 scope: Cluster
150 group: crd.projectcalico.org
151 version: v1
152 names:
153 kind: BGPPeer
154 plural: bgppeers
155 singular: bgppeer
156
157---
158
159apiVersion: apiextensions.k8s.io/v1beta1
160kind: CustomResourceDefinition
161metadata:
162 name: bgpconfigurations.crd.projectcalico.org
163 labels:
164 pipeline-managed: kube-system
165spec:
166 scope: Cluster
167 group: crd.projectcalico.org
168 version: v1
169 names:
170 kind: BGPConfiguration
171 plural: bgpconfigurations
172 singular: bgpconfiguration
173
174---
175
176apiVersion: apiextensions.k8s.io/v1beta1
177kind: CustomResourceDefinition
178metadata:
179 name: ippools.crd.projectcalico.org
180 labels:
181 pipeline-managed: kube-system
182spec:
183 scope: Cluster
184 group: crd.projectcalico.org
185 version: v1
186 names:
187 kind: IPPool
188 plural: ippools
189 singular: ippool
190
191---
192
193apiVersion: apiextensions.k8s.io/v1beta1
194kind: CustomResourceDefinition
195metadata:
196 name: hostendpoints.crd.projectcalico.org
197 labels:
198 pipeline-managed: kube-system
199spec:
200 scope: Cluster
201 group: crd.projectcalico.org
202 version: v1
203 names:
204 kind: HostEndpoint
205 plural: hostendpoints
206 singular: hostendpoint
207
208---
209
210apiVersion: apiextensions.k8s.io/v1beta1
211kind: CustomResourceDefinition
212metadata:
213 name: clusterinformations.crd.projectcalico.org
214 labels:
215 pipeline-managed: kube-system
216spec:
217 scope: Cluster
218 group: crd.projectcalico.org
219 version: v1
220 names:
221 kind: ClusterInformation
222 plural: clusterinformations
223 singular: clusterinformation
224
225---
226
227apiVersion: apiextensions.k8s.io/v1beta1
228kind: CustomResourceDefinition
229metadata:
230 name: globalnetworkpolicies.crd.projectcalico.org
231 labels:
232 pipeline-managed: kube-system
233spec:
234 scope: Cluster
235 group: crd.projectcalico.org
236 version: v1
237 names:
238 kind: GlobalNetworkPolicy
239 plural: globalnetworkpolicies
240 singular: globalnetworkpolicy
241
242---
243
244apiVersion: apiextensions.k8s.io/v1beta1
245kind: CustomResourceDefinition
246metadata:
247 name: globalnetworksets.crd.projectcalico.org
248 labels:
249 pipeline-managed: kube-system
250spec:
251 scope: Cluster
252 group: crd.projectcalico.org
253 version: v1
254 names:
255 kind: GlobalNetworkSet
256 plural: globalnetworksets
257 singular: globalnetworkset
258
259---
260
261apiVersion: apiextensions.k8s.io/v1beta1
262kind: CustomResourceDefinition
263metadata:
264 name: networkpolicies.crd.projectcalico.org
265 labels:
266 pipeline-managed: kube-system
267spec:
268 scope: Namespaced
269 group: crd.projectcalico.org
270 version: v1
271 names:
272 kind: NetworkPolicy
273 plural: networkpolicies
274 singular: networkpolicy
275
276---
277
278apiVersion: apiextensions.k8s.io/v1beta1
279kind: CustomResourceDefinition
280metadata:
281 name: networksets.crd.projectcalico.org
282 labels:
283 pipeline-managed: kube-system
284spec:
285 scope: Namespaced
286 group: crd.projectcalico.org
287 version: v1
288 names:
289 kind: NetworkSet
290 plural: networksets
291 singular: networkset
292---
293# Source: calico/templates/rbac.yaml
294
295# Include a clusterrole for the kube-controllers component,
296# and bind it to the calico-kube-controllers serviceaccount.
297kind: ClusterRole
298apiVersion: rbac.authorization.k8s.io/v1
299metadata:
300 name: calico-kube-controllers
301 labels:
302 pipeline-managed: kube-system
303rules:
304 # Nodes are watched to monitor for deletions.
305 - apiGroups: [""]
306 resources:
307 - nodes
308 verbs:
309 - watch
310 - list
311 - get
312 # Pods are queried to check for existence.
313 - apiGroups: [""]
314 resources:
315 - pods
316 verbs:
317 - get
318 # IPAM resources are manipulated when nodes are deleted.
319 - apiGroups: ["crd.projectcalico.org"]
320 resources:
321 - ippools
322 verbs:
323 - list
324 - apiGroups: ["crd.projectcalico.org"]
325 resources:
326 - blockaffinities
327 - ipamblocks
328 - ipamhandles
329 verbs:
330 - get
331 - list
332 - create
333 - update
334 - delete
335 # Needs access to update clusterinformations.
336 - apiGroups: ["crd.projectcalico.org"]
337 resources:
338 - clusterinformations
339 verbs:
340 - get
341 - create
342 - update
343---
344kind: ClusterRoleBinding
345apiVersion: rbac.authorization.k8s.io/v1
346metadata:
347 name: calico-kube-controllers
348 labels:
349 pipeline-managed: kube-system
350roleRef:
351 apiGroup: rbac.authorization.k8s.io
352 kind: ClusterRole
353 name: calico-kube-controllers
354subjects:
355- kind: ServiceAccount
356 name: calico-kube-controllers
357 namespace: kube-system
358---
359# Include a clusterrole for the calico-node DaemonSet,
360# and bind it to the calico-node serviceaccount.
361kind: ClusterRole
362apiVersion: rbac.authorization.k8s.io/v1
363metadata:
364 name: calico-node
365 labels:
366 pipeline-managed: kube-system
367rules:
368 # The CNI plugin needs to get pods, nodes, and namespaces.
369 - apiGroups: [""]
370 resources:
371 - pods
372 - nodes
373 - namespaces
374 verbs:
375 - get
376 - apiGroups: [""]
377 resources:
378 - endpoints
379 - services
380 verbs:
381 # Used to discover service IPs for advertisement.
382 - watch
383 - list
384 # Used to discover Typhas.
385 - get
386 - apiGroups: [""]
387 resources:
388 - nodes/status
389 verbs:
390 # Needed for clearing NodeNetworkUnavailable flag.
391 - patch
392 # Calico stores some configuration information in node annotations.
393 - update
394 # Watch for changes to Kubernetes NetworkPolicies.
395 - apiGroups: ["networking.k8s.io"]
396 resources:
397 - networkpolicies
398 verbs:
399 - watch
400 - list
401 # Used by Calico for policy information.
402 - apiGroups: [""]
403 resources:
404 - pods
405 - namespaces
406 - serviceaccounts
407 verbs:
408 - list
409 - watch
410 # The CNI plugin patches pods/status.
411 - apiGroups: [""]
412 resources:
413 - pods/status
414 verbs:
415 - patch
416 # Calico monitors various CRDs for config.
417 - apiGroups: ["crd.projectcalico.org"]
418 resources:
419 - globalfelixconfigs
420 - felixconfigurations
421 - bgppeers
422 - globalbgpconfigs
423 - bgpconfigurations
424 - ippools
425 - ipamblocks
426 - globalnetworkpolicies
427 - globalnetworksets
428 - networkpolicies
429 - networksets
430 - clusterinformations
431 - hostendpoints
432 - blockaffinities
433 verbs:
434 - get
435 - list
436 - watch
437 # Calico must create and update some CRDs on startup.
438 - apiGroups: ["crd.projectcalico.org"]
439 resources:
440 - ippools
441 - felixconfigurations
442 - clusterinformations
443 verbs:
444 - create
445 - update
446 # Calico stores some configuration information on the node.
447 - apiGroups: [""]
448 resources:
449 - nodes
450 verbs:
451 - get
452 - list
453 - watch
454 # These permissions are only requried for upgrade from v2.6, and can
455 # be removed after upgrade or on fresh installations.
456 - apiGroups: ["crd.projectcalico.org"]
457 resources:
458 - bgpconfigurations
459 - bgppeers
460 verbs:
461 - create
462 - update
463 # These permissions are required for Calico CNI to perform IPAM allocations.
464 - apiGroups: ["crd.projectcalico.org"]
465 resources:
466 - blockaffinities
467 - ipamblocks
468 - ipamhandles
469 verbs:
470 - get
471 - list
472 - create
473 - update
474 - delete
475 - apiGroups: ["crd.projectcalico.org"]
476 resources:
477 - ipamconfigs
478 verbs:
479 - get
480 # Block affinities must also be watchable by confd for route aggregation.
481 - apiGroups: ["crd.projectcalico.org"]
482 resources:
483 - blockaffinities
484 verbs:
485 - watch
486 # The Calico IPAM migration needs to get daemonsets. These permissions can be
487 # removed if not upgrading from an installation using host-local IPAM.
488 - apiGroups: ["apps"]
489 resources:
490 - daemonsets
491 verbs:
492 - get
493---
494apiVersion: rbac.authorization.k8s.io/v1
495kind: ClusterRoleBinding
496metadata:
497 name: calico-node
498 labels:
499 pipeline-managed: kube-system
500roleRef:
501 apiGroup: rbac.authorization.k8s.io
502 kind: ClusterRole
503 name: calico-node
504subjects:
505- kind: ServiceAccount
506 name: calico-node
507 namespace: kube-system
508
509---
510# Source: calico/templates/calico-typha.yaml
511# This manifest creates a Service, which will be backed by Calico's Typha daemon.
512# Typha sits in between Felix and the API server, reducing Calico's load on the API server.
513
514apiVersion: v1
515kind: Service
516metadata:
517 name: calico-typha
518 namespace: kube-system
519 labels:
520 pipeline-managed: kube-system
521 k8s-app: calico-typha
522spec:
523 ports:
524 - port: 5473
525 protocol: TCP
526 targetPort: calico-typha
527 name: calico-typha
528 selector:
529 k8s-app: calico-typha
530
531---
532
533# This manifest creates a Deployment of Typha to back the above service.
534
535apiVersion: apps/v1
536kind: Deployment
537metadata:
538 name: calico-typha
539 namespace: kube-system
540 labels:
541 pipeline-managed: kube-system
542 k8s-app: calico-typha
543spec:
544 # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the
545 # typha_service_name variable in the calico-config ConfigMap above.
546 #
547 # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential
548 # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In
549 # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade.
550 replicas: 1
551 revisionHistoryLimit: 2
552 selector:
553 matchLabels:
554 k8s-app: calico-typha
555 template:
556 metadata:
557 labels:
558 k8s-app: calico-typha
559 annotations:
560 # This, along with the CriticalAddonsOnly toleration below, marks the pod as a critical
561 # add-on, ensuring it gets priority scheduling and that its resources are reserved
562 # if it ever gets evicted.
563 scheduler.alpha.kubernetes.io/critical-pod: ''
564 cluster-autoscaler.kubernetes.io/safe-to-evict: 'true'
565 prometheus.io/port: "9093"
566 prometheus.io/scrape: "true"
567 spec:
568 nodeSelector:
569 beta.kubernetes.io/os: linux
570 hostNetwork: true
571 tolerations:
572 # Mark the pod as a critical add-on for rescheduling.
573 - key: CriticalAddonsOnly
574 operator: Exists
575 # Since Calico can't network a pod until Typha is up, we need to run Typha itself
576 # as a host-networked pod.
577 serviceAccountName: calico-node
578 priorityClassName: system-cluster-critical
579 # fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573
580 securityContext:
581 fsGroup: 65534
582 containers:
583 - image: calico/typha:v3.11.1
584 name: calico-typha
585 ports:
586 - containerPort: 5473
587 name: calico-typha
588 protocol: TCP
589 env:
590 # Enable "info" logging by default. Can be set to "debug" to increase verbosity.
591 - name: TYPHA_LOGSEVERITYSCREEN
592 value: "info"
593 # Disable logging to file and syslog since those don't make sense in Kubernetes.
594 - name: TYPHA_LOGFILEPATH
595 value: "none"
596 - name: TYPHA_LOGSEVERITYSYS
597 value: "none"
598 # Monitor the Kubernetes API to find the number of running instances and rebalance
599 # connections.
600 - name: TYPHA_CONNECTIONREBALANCINGMODE
601 value: "kubernetes"
602 - name: TYPHA_DATASTORETYPE
603 value: "kubernetes"
604 - name: TYPHA_HEALTHENABLED
605 value: "true"
606 # Uncomment these lines to enable prometheus metrics. Since Typha is host-networked,
607 # this opens a port on the host, which may need to be secured.
608 - name: TYPHA_PROMETHEUSMETRICSENABLED
609 value: "true"
610 - name: TYPHA_PROMETHEUSMETRICSPORT
611 value: "9093"
612 livenessProbe:
613 httpGet:
614 path: /liveness
615 port: 9098
616 host: localhost
617 periodSeconds: 30
618 initialDelaySeconds: 30
619 securityContext:
620 runAsNonRoot: true
621 allowPrivilegeEscalation: false
622 readinessProbe:
623 httpGet:
624 path: /readiness
625 port: 9098
626 host: localhost
627 periodSeconds: 10
628
629---
630
631# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict
632
633apiVersion: policy/v1beta1
634kind: PodDisruptionBudget
635metadata:
636 name: calico-typha
637 namespace: kube-system
638 labels:
639 pipeline-managed: kube-system
640 k8s-app: calico-typha
641spec:
642 maxUnavailable: 1
643 selector:
644 matchLabels:
645 k8s-app: calico-typha
646---
647# Source: calico/templates/calico-node.yaml
648# This manifest installs the calico-node container, as well
649# as the CNI plugins and network config on
650# each master and worker node in a Kubernetes cluster.
651kind: DaemonSet
652apiVersion: apps/v1
653metadata:
654 name: calico-node
655 namespace: kube-system
656 labels:
657 pipeline-managed: kube-system
658 k8s-app: calico-node
659spec:
660 selector:
661 matchLabels:
662 k8s-app: calico-node
663 updateStrategy:
664 type: RollingUpdate
665 rollingUpdate:
666 maxUnavailable: 1
667 template:
668 metadata:
669 labels:
670 k8s-app: calico-node
671 annotations:
672 # This, along with the CriticalAddonsOnly toleration below,
673 # marks the pod as a critical add-on, ensuring it gets
674 # priority scheduling and that its resources are reserved
675 # if it ever gets evicted.
676 scheduler.alpha.kubernetes.io/critical-pod: ''
677 prometheus.io/port: "9091"
678 prometheus.io/scrape: "true"
679 spec:
680 nodeSelector:
681 beta.kubernetes.io/os: linux
682 hostNetwork: true
683 tolerations:
684 # Make sure calico-node gets scheduled on all nodes.
685 - effect: NoSchedule
686 operator: Exists
687 # Mark the pod as a critical add-on for rescheduling.
688 - key: CriticalAddonsOnly
689 operator: Exists
690 - effect: NoExecute
691 operator: Exists
692 serviceAccountName: calico-node
693 # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
694 # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
695 terminationGracePeriodSeconds: 0
696 priorityClassName: system-node-critical
697 initContainers:
698 # This container performs upgrade from host-local IPAM to calico-ipam.
699 # It can be deleted if this is a fresh installation, or if you have already
700 # upgraded to use calico-ipam.
701 - name: upgrade-ipam
702 image: calico/cni:v3.11.1
703 command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
704 env:
705 - name: KUBERNETES_NODE_NAME
706 valueFrom:
707 fieldRef:
708 fieldPath: spec.nodeName
709 - name: CALICO_NETWORKING_BACKEND
710 valueFrom:
711 configMapKeyRef:
712 name: calico-config
713 key: calico_backend
714 volumeMounts:
715 - mountPath: /var/lib/cni/networks
716 name: host-local-net-dir
717 - mountPath: /host/opt/cni/bin
718 name: cni-bin-dir
719 securityContext:
720 privileged: true
721 # This container installs the CNI binaries
722 # and CNI network config file on each node.
723 - name: install-cni
724 image: calico/cni:v3.11.1
725 command: ["/install-cni.sh"]
726 env:
727 # Name of the CNI config file to create.
728 - name: CNI_CONF_NAME
729 value: "10-calico.conflist"
730 # The CNI network config to install on each node.
731 - name: CNI_NETWORK_CONFIG
732 valueFrom:
733 configMapKeyRef:
734 name: calico-config
735 key: cni_network_config
736 # Set the hostname based on the k8s node name.
737 - name: KUBERNETES_NODE_NAME
738 valueFrom:
739 fieldRef:
740 fieldPath: spec.nodeName
741 # CNI MTU Config variable
742 - name: CNI_MTU
743 valueFrom:
744 configMapKeyRef:
745 name: calico-config
746 key: veth_mtu
747 # Prevents the container from sleeping forever.
748 - name: SLEEP
749 value: "false"
750 volumeMounts:
751 - mountPath: /host/opt/cni/bin
752 name: cni-bin-dir
753 - mountPath: /host/etc/cni/net.d
754 name: cni-net-dir
755 securityContext:
756 privileged: true
757 # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
758 # to communicate with Felix over the Policy Sync API.
759 - name: flexvol-driver
760 image: calico/pod2daemon-flexvol:v3.11.1
761 volumeMounts:
762 - name: flexvol-driver-host
763 mountPath: /host/driver
764 securityContext:
765 privileged: true
766 containers:
767 # Runs calico-node container on each Kubernetes node. This
768 # container programs network policy and routes on each
769 # host.
770 - name: calico-node
771 image: calico/node:v3.11.1
772 env:
773 # Use Kubernetes API as the backing datastore.
774 - name: DATASTORE_TYPE
775 value: "kubernetes"
776 # Typha support: controlled by the ConfigMap.
777 - name: FELIX_TYPHAK8SSERVICENAME
778 valueFrom:
779 configMapKeyRef:
780 name: calico-config
781 key: typha_service_name
782 # Wait for the datastore.
783 - name: WAIT_FOR_DATASTORE
784 value: "true"
785 # Set based on the k8s node name.
786 - name: NODENAME
787 valueFrom:
788 fieldRef:
789 fieldPath: spec.nodeName
790 # Choose the backend to use.
791 - name: CALICO_NETWORKING_BACKEND
792 valueFrom:
793 configMapKeyRef:
794 name: calico-config
795 key: calico_backend
796 # Cluster type to identify the deployment type
797 - name: CLUSTER_TYPE
798 value: "k8s,bgp"
799 # Auto-detect the BGP IP address.
800 - name: IP
801 value: "autodetect"
802 # Enable IPIP
803 - name: CALICO_IPV4POOL_IPIP
804 value: "Always"
805 # Set MTU for tunnel device used if ipip is enabled
806 - name: FELIX_IPINIPMTU
807 valueFrom:
808 configMapKeyRef:
809 name: calico-config
810 key: veth_mtu
811 # The default IPv4 pool to create on startup if none exists. Pod IPs will be
812 # chosen from this range. Changing this value after installation will have
813 # no effect. This should fall within `--cluster-cidr`.
814 - name: CALICO_IPV4POOL_CIDR
815 value: "100.192.0.0/11"
816 # Disable file logging so `kubectl logs` works.
817 - name: CALICO_DISABLE_FILE_LOGGING
818 value: "true"
819 # Set Felix endpoint to host default action to ACCEPT.
820 - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
821 value: "ACCEPT"
822 # Disable IPv6 on Kubernetes.
823 - name: FELIX_IPV6SUPPORT
824 value: "false"
825 # Set Felix logging to "info"
826 - name: FELIX_LOGSEVERITYSCREEN
827 value: "info"
828 - name: FELIX_HEALTHENABLED
829 value: "true"
830 # check this
831 - name: FELIX_PROMETHEUSMETRICSENABLED
832 value: "true"
833 securityContext:
834 privileged: true
835 resources:
836 requests:
837 cpu: 250m
838 livenessProbe:
839 exec:
840 command:
841 - /bin/calico-node
842 - -felix-live
843 - -bird-live
844 periodSeconds: 10
845 initialDelaySeconds: 10
846 failureThreshold: 6
847 readinessProbe:
848 exec:
849 command:
850 - /bin/calico-node
851 - -felix-ready
852 - -bird-ready
853 periodSeconds: 10
854 volumeMounts:
855 - mountPath: /lib/modules
856 name: lib-modules
857 readOnly: true
858 - mountPath: /run/xtables.lock
859 name: xtables-lock
860 readOnly: false
861 - mountPath: /var/run/calico
862 name: var-run-calico
863 readOnly: false
864 - mountPath: /var/lib/calico
865 name: var-lib-calico
866 readOnly: false
867 - name: policysync
868 mountPath: /var/run/nodeagent
869 volumes:
870 # Used by calico-node.
871 - name: lib-modules
872 hostPath:
873 path: /lib/modules
874 - name: var-run-calico
875 hostPath:
876 path: /var/run/calico
877 - name: var-lib-calico
878 hostPath:
879 path: /var/lib/calico
880 - name: xtables-lock
881 hostPath:
882 path: /run/xtables.lock
883 type: FileOrCreate
884 # Used to install CNI.
885 - name: cni-bin-dir
886 hostPath:
887 path: /opt/cni/bin
888 - name: cni-net-dir
889 hostPath:
890 path: /etc/cni/net.d
891 # Mount in the directory for host-local IPAM allocations. This is
892 # used when upgrading from host-local to calico-ipam, and can be removed
893 # if not using the upgrade-ipam init container.
894 - name: host-local-net-dir
895 hostPath:
896 path: /var/lib/cni/networks
897 # Used to create per-pod Unix Domain Sockets
898 - name: policysync
899 hostPath:
900 type: DirectoryOrCreate
901 path: /var/run/nodeagent
902 # Used to install Flex Volume Driver
903 - name: flexvol-driver-host
904 hostPath:
905 type: DirectoryOrCreate
906 path: /var/lib/kubelet/volumeplugins/nodeagent~uds
907---
908
909apiVersion: v1
910kind: ServiceAccount
911metadata:
912 name: calico-node
913 labels:
914 pipeline-managed: kube-system
915 namespace: kube-system
916
917---
918# Source: calico/templates/calico-kube-controllers.yaml
919
920# See https://github.com/projectcalico/kube-controllers
921apiVersion: apps/v1
922kind: Deployment
923metadata:
924 name: calico-kube-controllers
925 namespace: kube-system
926 labels:
927 pipeline-managed: kube-system
928 k8s-app: calico-kube-controllers
929spec:
930 # The controllers can only have a single active instance.
931 replicas: 1
932 selector:
933 matchLabels:
934 k8s-app: calico-kube-controllers
935 strategy:
936 type: Recreate
937 template:
938 metadata:
939 name: calico-kube-controllers
940 namespace: kube-system
941 labels:
942 k8s-app: calico-kube-controllers
943 annotations:
944 scheduler.alpha.kubernetes.io/critical-pod: ''
945 spec:
946 nodeSelector:
947 beta.kubernetes.io/os: linux
948 tolerations:
949 # Mark the pod as a critical add-on for rescheduling.
950 - key: CriticalAddonsOnly
951 operator: Exists
952 - key: node-role.kubernetes.io/master
953 effect: NoSchedule
954 serviceAccountName: calico-kube-controllers
955 priorityClassName: system-cluster-critical
956 containers:
957 - name: calico-kube-controllers
958 image: calico/kube-controllers:v3.11.1
959 env:
960 # Choose which controllers to run.
961 - name: ENABLED_CONTROLLERS
962 value: node
963 - name: DATASTORE_TYPE
964 value: kubernetes
965 readinessProbe:
966 exec:
967 command:
968 - /usr/bin/check-status
969 - -r
970
971---
972
973apiVersion: v1
974kind: ServiceAccount
975metadata:
976 name: calico-kube-controllers
977 labels:
978 pipeline-managed: kube-system
979 namespace: kube-system
980---
981# Source: calico/templates/calico-etcd-secrets.yaml
982
983---
984# Source: calico/templates/configure-canal.yaml