· 2 years ago · Jul 08, 2023, 06:30 PM
1###IMPORTANT###
2#If you are using containerd, make sure docker isn't installed.
3#kubeadm init will try to auto detect the container runtime and at the moment
4#it if both are installed it will pick docker first.
5ssh aen@c1-cp1
6
7
8#0 - Creating a Cluster
9#Create our kubernetes cluster, specify a pod network range matching that in calico.yaml!
10#Only on the Control Plane Node, download the yaml files for the pod network.
11wget https://raw.githubusercontent.com/projectcalico/calico/master/manifests/calico.yaml
12
13
14#Look inside calico.yaml and find the setting for Pod Network IP address range CALICO_IPV4POOL_CIDR,
15#adjust if needed for your infrastructure to ensure that the Pod network IP
16#range doesn't overlap with other networks in our infrastructure.
17vi calico.yaml
18
19
20#You can now just use kubeadm init to bootstrap the cluster
21sudo kubeadm init --kubernetes-version v1.26.0
22
23#sudo kubeadm init #remove the kubernetes-version parameter if you want to use the latest.
24
25
26#Before moving on review the output of the cluster creation process including the kubeadm init phases,
27#the admin.conf setup and the node join command
28
29
30#Configure our account on the Control Plane Node to have admin access to the API server from a non-privileged account.
31mkdir -p $HOME/.kube
32sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
33sudo chown $(id -u):$(id -g) $HOME/.kube/config
34
35
36#1 - Creating a Pod Network
37#Deploy yaml file for your pod network.
38kubectl apply -f calico.yaml
39
40
41#Look for the all the system pods and calico pods to change to Running.
42#The DNS pod won't start (pending) until the Pod network is deployed and Running.
43kubectl get pods --all-namespaces
44
45
46#Gives you output over time, rather than repainting the screen on each iteration.
47kubectl get pods --all-namespaces --watch
48
49
50#All system pods should be Running
51kubectl get pods --all-namespaces
52
53
54#Get a list of our current nodes, just the Control Plane Node Node...should be Ready.
55kubectl get nodes
56
57
58
59
60#2 - systemd Units...again!
61#Check out the systemd unit...it's no longer crashlooping because it has static pods to start
62#Remember the kubelet starts the static pods, and thus the control plane pods
63sudo systemctl status kubelet.service
64
65
66#3 - Static Pod manifests
67#Let's check out the static pod manifests on the Control Plane Node
68ls /etc/kubernetes/manifests
69
70
71#And look more closely at API server and etcd's manifest.
72sudo more /etc/kubernetes/manifests/etcd.yaml
73sudo more /etc/kubernetes/manifests/kube-apiserver.yaml
74
75
76#Check out the directory where the kubeconfig files live for each of the control plane pods.
77ls /etc/kubernetes
78***************************************************************************
79*************************************************************************************
80
81
82
83#For this demo ssh into c1-node1
84ssh aen@c1-node1
85
86
87#Disable swap, swapoff then edit your fstab removing any entry for swap partitions
88#You can recover the space with fdisk. You may want to reboot to ensure your config is ok.
89swapoff -a
90vi /etc/fstab
91
92
93###IMPORTANT####
94#I expect this code to change a bit to make the installation process more streamlined.
95#Overall, the end result will stay the same...you'll have continerd installed
96#I will keep the code in the course downloads up to date with the latest method.
97################
98
99#0 - Joining Nodes to a Cluster
100
101#Install a container runtime - containerd
102#containerd prerequisites, and load two modules and configure them to load on boot
103#https://kubernetes.io/docs/setup/production-environment/container-runtimes/
104cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
105overlay
106br_netfilter
107EOF
108
109sudo modprobe overlay
110sudo modprobe br_netfilter
111
112# sysctl params required by setup, params persist across reboots
113cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
114net.bridge.bridge-nf-call-iptables = 1
115net.bridge.bridge-nf-call-ip6tables = 1
116net.ipv4.ip_forward = 1
117EOF
118
119# Apply sysctl params without reboot
120sudo sysctl --system
121
122
123#Install containerd...we need to install from the docker repo to get containerd 1.6, the ubuntu repo stops at 1.5.9
124curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
125
126echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
127 $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
128
129sudo apt-get update
130sudo apt-get install -y containerd.io
131
132
133#Configure containerd
134sudo mkdir -p /etc/containerd
135sudo containerd config default | sudo tee /etc/containerd/config.toml
136
137
138#Set the cgroup driver for containerd to systemd which is required for the kubelet.
139#For more information on this config file see:
140# https://github.com/containerd/cri/blob/master/docs/config.md and also
141# https://github.com/containerd/containerd/blob/master/docs/ops.md
142
143#At the end of this section, change SystemdCgroup = false to SystemdCgroup = true
144 [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
145 ...
146# [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
147 SystemdCgroup = true
148
149#You can use sed to swap in true
150sudo sed -i 's/ SystemdCgroup = false/ SystemdCgroup = true/' /etc/containerd/config.toml
151
152
153#Verify the change was made
154sudo vi /etc/containerd/config.toml
155
156
157#Restart containerd with the new configuration
158sudo systemctl restart containerd
159
160
161
162#Install Kubernetes packages - kubeadm, kubelet and kubectl
163#Add Google's apt repository gpg key
164sudo curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-archive-keyring.gpg
165
166
167#Add the Kubernetes apt repository
168echo "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
169
170
171#Update the package list
172sudo apt-get update
173apt-cache policy kubelet | head -n 20
174
175
176#Install the required packages, if needed we can request a specific version.
177#Pick the same version you used on the Control Plane Node in 0-PackageInstallation-containerd.sh
178VERSION=1.26.0-00
179sudo apt-get install -y kubelet=$VERSION kubeadm=$VERSION kubectl=$VERSION
180sudo apt-mark hold kubelet kubeadm kubectl containerd
181
182
183#To install the latest, omit the version parameters
184#sudo apt-get install kubelet kubeadm kubectl
185#sudo apt-mark hold kubelet kubeadm kubectl
186
187
188#Check the status of our kubelet and our container runtime.
189#The kubelet will enter a crashloop until it's joined
190sudo systemctl status kubelet.service
191sudo systemctl status containerd.service
192
193
194#Ensure both are set to start when the system starts up.
195sudo systemctl enable kubelet.service
196sudo systemctl enable containerd.service
197
198
199#Log out of c1-node1 and back on to c1-cp1
200exit
201
202
203#On c1-cp1 - if you didn't keep the output, on the Control Plane Node, you can get the token.
204kubeadm token list
205
206
207#If you need to generate a new token, perhaps the old one timed out/expired.
208kubeadm token create
209
210
211#On the Control Plane Node, you can find the CA cert hash.
212openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
213
214
215#You can also use print-join-command to generate token and print the join command in the proper format
216#COPY THIS INTO YOUR CLIPBOARD
217kubeadm token create --print-join-command
218
219
220#Back on the worker node c1-node1, using the Control Plane Node (API Server) IP address or name, the token and the cert has, let's join this Node to our cluster.
221ssh aen@c1-node1
222
223
224#PASTE_JOIN_COMMAND_HERE be sure to add sudo
225sudo kubeadm join 172.16.94.10:6443 \
226 --token 2yij0q.256jwadksuvxprp8 \
227 --discovery-token-ca-cert-hash sha256:bd0763f650e65bc211c02f39d6e1e6a5ea92423728df7034b8747dc0086d6c8a
228
229
230#Log out of c1-node1 and back on to c1-cp1
231exit
232
233
234#Back on Control Plane Node, this will say NotReady until the networking pod is created on the new node.
235#Has to schedule the pod, then pull the container.
236kubectl get nodes
237
238
239#On the Control Plane Node, watch for the calico pod and the kube-proxy to change to Running on the newly added nodes.
240kubectl get pods --all-namespaces --watch
241
242
243#Still on the Control Plane Node, look for this added node's status as ready.
244kubectl get nodes
245
246
247#GO BACK TO THE TOP AND DO THE SAME FOR c1-node2 and c1-node3
248#Just SSH into c1-node2 and c1-node3 and run the commands again.
249ssh aen@c1-node2
250#You can skip the token re-creation if you have one that's still valid.
251
252