· 6 years ago · Apr 26, 2020, 11:14 AM
1toeikanta@kanta-H97M-D3H:~/Desktop/github/minifk$ ./cleanup.sh
2• Deleting virtual machines
3INFO[0000] Machine firekube-master-0 hasn't been created...
4INFO[0000] Machine firekube-worker-0 hasn't been created...
5
6toeikanta@kanta-H97M-D3H:~/Desktop/github/minifk$ rm ~/.wks/bin/wksctl
7
8toeikanta@kanta-H97M-D3H:~/Desktop/github/minifk$ docker volume ls
9DRIVER VOLUME NAME
10local 8e4e82efcec91085a20a67b771068c694b976bfcdbc0d2dd306d49ebffc68868
11local minifk-0
12local minifk-1
13
14toeikanta@kanta-H97M-D3H:~/Desktop/github/minifk$ docker volume rm minifk-0
15minifk-0
16
17toeikanta@kanta-H97M-D3H:~/Desktop/github/minifk$ docker volume rm minifk-1
18minifk-1
19
20toeikanta@kanta-H97M-D3H:~/Desktop/github/minifk$ docker volume ls
21DRIVER VOLUME NAME
22local 8e4e82efcec91085a20a67b771068c694b976bfcdbc0d2dd306d49ebffc68868
23
24toeikanta@kanta-H97M-D3H:~/Desktop/github/minifk$ ./setup.sh
25• Using git branch: master
26• Using git remote: origin
27
28• Found jk 0.3.0
29• Found footloose 0.6.3
30• Downloading https://github.com/weaveworks/fk-covid/releases/download/v0.1.0/wksctl-linux.tar.gz
31##################################################################################################################################################################################################### 100.0%##################################################################################################################################################################################################### 100.0%
32wksctl
33INFO[0000] wksctl version 0.8.2-beta.1 is available; please update at https://github.com/weaveworks/wksctl/releases/tag/v0.8.2-beta.1
34• Found wksctl 0.8.2-alpha.3-60-g7fbcae5
35• Creating footloose manifest
36• Creating virtual machines
37INFO[0000] Docker Image: chanwit/minifk-master present locally
38INFO[0000] Docker Image: chanwit/minifk-worker present locally
39INFO[0000] Creating machine: firekube-master-0 ...
40INFO[0000] Creating machine: firekube-worker-0 ...
41• Creating Cluster API manifests
42• Generating cluster.yaml
43• Updating container images and git parameters
44INFO[0000] wksctl version 0.8.2-beta.1 is available; please update at https://github.com/weaveworks/wksctl/releases/tag/v0.8.2-beta.1
45• Pushing initial cluster configuration
46On branch master
47Your branch is up to date with 'origin/master'.
48
49Changes not staged for commit:
50 modified: setup.sh
51
52no changes added to commit
53Everything up-to-date
54• Installing Kubernetes cluster
55INFO[0000] wksctl version 0.8.2-beta.1 is available; please update at https://github.com/weaveworks/wksctl/releases/tag/v0.8.2-beta.1
56DEBU[2020-04-26T18:00:23+07:00] creating SSH client host=127.0.0.1 port=2222 printOutputs=true privateKeyPath=./cluster-key user=root
57DEBU[2020-04-26T18:00:23+07:00] running command: cat /etc/*release
58CentOS Linux release 7.6.1810 (Core)
59NAME="CentOS Linux"
60VERSION="7 (Core)"
61ID="centos"
62ID_LIKE="rhel fedora"
63VERSION_ID="7"
64PRETTY_NAME="CentOS Linux 7 (Core)"
65ANSI_COLOR="0;31"
66CPE_NAME="cpe:/o:centos:centos:7"
67HOME_URL="https://www.centos.org/"
68BUG_REPORT_URL="https://bugs.centos.org/"
69
70CENTOS_MANTISBT_PROJECT="CentOS-7"
71CENTOS_MANTISBT_PROJECT_VERSION="7"
72REDHAT_SUPPORT_PRODUCT="centos"
73REDHAT_SUPPORT_PRODUCT_VERSION="7"
74
75CentOS Linux release 7.6.1810 (Core)
76CentOS Linux release 7.6.1810 (Core)
77DEBU[2020-04-26T18:00:23+07:00] running command: sudo -n -- sh -c 'cat /etc/os-release'
78NAME="CentOS Linux"
79VERSION="7 (Core)"
80ID="centos"
81ID_LIKE="rhel fedora"
82VERSION_ID="7"
83PRETTY_NAME="CentOS Linux 7 (Core)"
84ANSI_COLOR="0;31"
85CPE_NAME="cpe:/o:centos:centos:7"
86HOME_URL="https://www.centos.org/"
87BUG_REPORT_URL="https://bugs.centos.org/"
88
89CENTOS_MANTISBT_PROJECT="CentOS-7"
90CENTOS_MANTISBT_PROJECT_VERSION="7"
91REDHAT_SUPPORT_PRODUCT="centos"
92REDHAT_SUPPORT_PRODUCT_VERSION="7"
93
94DEBU[2020-04-26T18:00:23+07:00] running command: sudo -n -- sh -c 'cat /etc/machine-id 2>/dev/null || cat /var/lib/dbus/machine-id 2>/dev/null'
95de7eb7a3e12a4f358cde67ca0e1882aa
96DEBU[2020-04-26T18:00:23+07:00] running command: sudo -n -- sh -c 'cat /sys/class/dmi/id/product_uuid 2>/dev/null || cat /etc/machine-id 2>/dev/null'
9703aa02fc-0414-0587-ff06-d40700080009
98DEBU[2020-04-26T18:00:23+07:00] running command: sudo -n -- sh -c 'command -v -- "selinuxenabled" >/dev/null 2>&1'
99DEBU[2020-04-26T18:00:23+07:00] running command: sudo -n -- sh -c 'selinuxenabled'
100DEBU[2020-04-26T18:00:23+07:00] running command: sudo -n -- sh -c 'cat /proc/1/environ'
101PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/binHOSTNAME=master-0TERM=xtermcontainer=dockerHOME=/rootDEBU[2020-04-26T18:00:23+07:00] the following env-specific configuration will be used config="&{0 false [FileContent--proc-sys-net-bridge-bridge-nf-call-iptables Swap SystemVerification] true false false true weavek8sops }"
102DEBU[2020-04-26T18:00:23+07:00] Kubernetes version used machine=master-0 version=1.14.1
103DEBU[2020-04-26T18:00:23+07:00] building addon addon=weave-net
104INFO[2020-04-26T18:00:23+07:00] Starting resource="install:base"
105DEBU[2020-04-26T18:00:23+07:00] running command: sudo -n -- sh -c 'rpm -q --queryformat '"'"'%{NAME} %{VERSION} %{RELEASE}\n'"'"' yum-plugin-versionlock'
106yum-plugin-versionlock 1.1.31 52.el7
107DEBU[2020-04-26T18:00:23+07:00] running command: sudo -n -- sh -c 'rpm -q --queryformat '"'"'%{NAME} %{VERSION} %{RELEASE}\n'"'"' device-mapper-persistent-data'
108device-mapper-persistent-data 0.8.5 1.el7
109DEBU[2020-04-26T18:00:23+07:00] running command: sudo -n -- sh -c 'rpm -q --queryformat '"'"'%{NAME} %{VERSION} %{RELEASE}\n'"'"' lvm2'
110lvm2 2.02.185 2.el7_7.2
111DEBU[2020-04-26T18:00:23+07:00] running command: sudo -n -- sh -c 'rpm -q --queryformat '"'"'%{NAME} %{VERSION} %{RELEASE}\n'"'"' yum-utils'
112yum-utils 1.1.31 50.el7
113INFO[2020-04-26T18:00:23+07:00] Starting resource="install:yum-versionlock"
114INFO[2020-04-26T18:00:23+07:00] Finishing resource="install:yum-versionlock"
115INFO[2020-04-26T18:00:23+07:00] Starting resource="install:device-mapper-persistent-data"
116INFO[2020-04-26T18:00:23+07:00] Finishing resource="install:device-mapper-persistent-data"
117INFO[2020-04-26T18:00:23+07:00] Starting resource="install:lvm2"
118INFO[2020-04-26T18:00:23+07:00] Finishing resource="install:lvm2"
119INFO[2020-04-26T18:00:23+07:00] Starting resource="install:yum-utils"
120INFO[2020-04-26T18:00:23+07:00] Finishing resource="install:yum-utils"
121INFO[2020-04-26T18:00:23+07:00] Finishing resource="install:base"
122INFO[2020-04-26T18:00:23+07:00] Starting resource="install:config"
123DEBU[2020-04-26T18:00:23+07:00] running command: sudo -n -- sh -c 'md5sum /etc/yum.repos.d/kubernetes.repo'
1245cd36504dc7b6ba86442d9634ab4c2e5 /etc/yum.repos.d/kubernetes.repo
125DEBU[2020-04-26T18:00:23+07:00] running command: sudo -n -- sh -c 'md5sum /etc/yum.repos.d/docker-ce.repo'
126bbb0224eb355f307b39eed429c61be09 /etc/yum.repos.d/docker-ce.repo
127DEBU[2020-04-26T18:00:23+07:00] running command: sudo -n -- sh -c 'md5sum /etc/docker/daemon.json'
1286b7b0989399dda789e176584eac29e0f /etc/docker/daemon.json
129INFO[2020-04-26T18:00:23+07:00] Starting resource="install:config-file-0"
130INFO[2020-04-26T18:00:23+07:00] Finishing resource="install:config-file-0"
131INFO[2020-04-26T18:00:23+07:00] Starting resource="install:config-file-1"
132INFO[2020-04-26T18:00:23+07:00] Finishing resource="install:config-file-1"
133INFO[2020-04-26T18:00:23+07:00] Starting resource="install:config-file-2"
134INFO[2020-04-26T18:00:23+07:00] Finishing resource="install:config-file-2"
135INFO[2020-04-26T18:00:23+07:00] Finishing resource="install:config"
136INFO[2020-04-26T18:00:23+07:00] Starting resource="install:cri"
137DEBU[2020-04-26T18:00:23+07:00] running command: sudo -n -- sh -c 'systemctl show docker -p ActiveState'
138ActiveState=active
139DEBU[2020-04-26T18:00:23+07:00] running command: sudo -n -- sh -c 'systemctl is-enabled docker'
140enabled
141DEBU[2020-04-26T18:00:23+07:00] running command: sudo -n -- sh -c 'rpm -q --queryformat '"'"'%{NAME} %{VERSION} %{RELEASE}\n'"'"' docker-ce-18.09.7'
142docker-ce 18.09.7 3.el7
143INFO[2020-04-26T18:00:24+07:00] Starting resource="install:docker"
144INFO[2020-04-26T18:00:24+07:00] Finishing resource="install:docker"
145INFO[2020-04-26T18:00:24+07:00] Starting resource="systemd:daemon-reload"
146DEBU[2020-04-26T18:00:24+07:00] running command: sudo -n -- sh -c 'systemctl daemon-reload'
147INFO[2020-04-26T18:00:24+07:00] Finishing resource="systemd:daemon-reload"
148INFO[2020-04-26T18:00:24+07:00] Starting resource="lock-package:docker"
149DEBU[2020-04-26T18:00:24+07:00] running command: sudo -n -- sh -c 'yum versionlock add docker-ce'
150Loaded plugins: fastestmirror, ovl, versionlock
151versionlock added: 0
152INFO[2020-04-26T18:00:24+07:00] Finishing resource="lock-package:docker"
153INFO[2020-04-26T18:00:24+07:00] Starting resource="service-init:docker-service"
154INFO[2020-04-26T18:00:24+07:00] Finishing resource="service-init:docker-service"
155INFO[2020-04-26T18:00:24+07:00] Finishing resource="install:cri"
156INFO[2020-04-26T18:00:24+07:00] Starting resource="install:k8s"
157DEBU[2020-04-26T18:00:24+07:00] running command: sudo -n -- sh -c 'rpm -q --queryformat '"'"'%{NAME} %{VERSION} %{RELEASE}\n'"'"' kubectl-1.14.1'
158kubectl 1.14.1 0
159DEBU[2020-04-26T18:00:24+07:00] running command: sudo -n -- sh -c 'rpm -q --queryformat '"'"'%{NAME} %{VERSION} %{RELEASE}\n'"'"' kubeadm-1.14.1'
160kubeadm 1.14.1 0
161DEBU[2020-04-26T18:00:24+07:00] running command: sudo -n -- sh -c 'md5sum /etc/sysconfig/kubelet'
162da79170ce18fd664049c9e6ded5f9f2f /etc/sysconfig/kubelet
163DEBU[2020-04-26T18:00:24+07:00] running command: sudo -n -- sh -c 'systemctl show kubelet -p ActiveState'
164ActiveState=activating
165DEBU[2020-04-26T18:00:24+07:00] running command: sudo -n -- sh -c 'systemctl is-enabled kubelet'
166enabled
167DEBU[2020-04-26T18:00:24+07:00] running command: sudo -n -- sh -c 'rpm -q --queryformat '"'"'%{NAME} %{VERSION} %{RELEASE}\n'"'"' kubelet-1.14.1'
168kubelet 1.14.1 0
169INFO[2020-04-26T18:00:24+07:00] Starting resource="install:kubectl"
170INFO[2020-04-26T18:00:24+07:00] Finishing resource="install:kubectl"
171INFO[2020-04-26T18:00:24+07:00] Starting resource="install:kubelet"
172INFO[2020-04-26T18:00:24+07:00] Finishing resource="install:kubelet"
173INFO[2020-04-26T18:00:24+07:00] Starting resource="create-dir:kubelet.service.d"
174DEBU[2020-04-26T18:00:24+07:00] running command: sudo -n -- sh -c 'mkdir -p /etc/systemd/system/kubelet.service.d'
175INFO[2020-04-26T18:00:24+07:00] Finishing resource="create-dir:kubelet.service.d"
176INFO[2020-04-26T18:00:24+07:00] Starting resource="install:kubeadm"
177INFO[2020-04-26T18:00:24+07:00] Finishing resource="install:kubeadm"
178INFO[2020-04-26T18:00:24+07:00] Starting resource="configure:kubelet-sysconfig"
179DEBU[2020-04-26T18:00:24+07:00] running command: sudo -n -- sh -c 'mkdir -pv $(dirname "/etc/sysconfig/kubelet") && sed -n '"'"'w /etc/sysconfig/kubelet'"'"' && chmod 0660 "/etc/sysconfig/kubelet"'
180INFO[2020-04-26T18:00:24+07:00] Finishing resource="configure:kubelet-sysconfig"
181INFO[2020-04-26T18:00:24+07:00] Starting resource="lock-package:kubernetes"
182DEBU[2020-04-26T18:00:24+07:00] running command: sudo -n -- sh -c 'yum versionlock add '"'"'kube*'"'"''
183Loaded plugins: fastestmirror, ovl, versionlock
184versionlock added: 0
185INFO[2020-04-26T18:00:24+07:00] Finishing resource="lock-package:kubernetes"
186INFO[2020-04-26T18:00:24+07:00] Starting resource="service-init:kubelet"
187INFO[2020-04-26T18:00:24+07:00] Finishing resource="service-init:kubelet"
188INFO[2020-04-26T18:00:24+07:00] Finishing resource="install:k8s"
189INFO[2020-04-26T18:00:24+07:00] Starting resource="kubeadm:init"
190INFO[2020-04-26T18:00:24+07:00] initializing Kubernetes cluster
191DEBU[2020-04-26T18:00:24+07:00] running command: sudo -n -- sh -c 'mkdir -pv $(dirname "/tmp/wks_kubeadm_init_config.yaml") && sed -n '"'"'w /tmp/wks_kubeadm_init_config.yaml'"'"' && chmod 0660 "/tmp/wks_kubeadm_init_config.yaml"'
192DEBU[2020-04-26T18:00:24+07:00] uploaded kubeadm's configuration yaml="apiServer:\n certSANs:\n - localhost\n - 127.0.0.1\n - 172.17.0.2\napiVersion: kubeadm.k8s.io/v1beta1\ncertificatesDir: \"\"\ncontrolPlaneEndpoint: 172.17.0.2:6443\ncontrollerManager: {}\ndns:\n type: \"\"\netcd: {}\nimageRepository: \"\"\nkind: ClusterConfiguration\nkubernetesVersion: 1.14.1\nnetworking:\n dnsDomain: \"\"\n podSubnet: \"\"\n serviceSubnet: \"\"\nscheduler: {}\n---\napiVersion: kubeadm.k8s.io/v1beta1\nbootstrapTokens:\n- token: o0xa3h.acd2jfuqmqr6m6qm\nkind: InitConfiguration\nlocalAPIEndpoint:\n advertiseAddress: 172.17.0.2\n bindPort: 0\nnodeRegistration:\n kubeletExtraArgs:\n node-ip: 172.17.0.2\n---\napiVersion: kubeproxy.config.k8s.io/v1alpha1\nbindAddress: \"\"\nclientConnection:\n acceptContentTypes: \"\"\n burst: 0\n contentType: \"\"\n kubeconfig: \"\"\n qps: 0\nclusterCIDR: \"\"\nconfigSyncPeriod: 0s\nconntrack:\n max: 0\n maxPerCore: null\n min: null\n tcpCloseWaitTimeout: null\n tcpEstablishedTimeout: null\nenableProfiling: false\nhealthzBindAddress: \"\"\nhostnameOverride: \"\"\niptables:\n masqueradeAll: false\n masqueradeBit: null\n minSyncPeriod: 0s\n syncPeriod: 0s\nipvs:\n excludeCIDRs: null\n minSyncPeriod: 0s\n scheduler: \"\"\n syncPeriod: 0s\nkind: KubeProxyConfiguration\nmetricsBindAddress: \"\"\nmode: \"\"\nnodePortAddresses: null\noomScoreAdj: null\nportRange: \"\"\nresourceContainer: \"\"\nudpIdleTimeout: 0s\n"
193INFO[2020-04-26T18:00:24+07:00] Starting resource="kubeadm:get-homedir"
194DEBU[2020-04-26T18:00:24+07:00] running command: sudo -n -- sh -c 'echo -n $HOME'
195/rootINFO[2020-04-26T18:00:24+07:00] Finishing resource="kubeadm:get-homedir"
196INFO[2020-04-26T18:00:24+07:00] Starting resource="kubeadm:reset"
197DEBU[2020-04-26T18:00:24+07:00] running command: sudo -n -- sh -c 'echo skipping :: kubeadm reset --force'
198skipping :: kubeadm reset --force
199INFO[2020-04-26T18:00:24+07:00] Finishing resource="kubeadm:reset"
200INFO[2020-04-26T18:00:24+07:00] Starting resource="kubeadm:config:kubectl-dir"
201DEBU[2020-04-26T18:00:24+07:00] running command: sudo -n -- sh -c 'mkdir -p /root/.kube'
202INFO[2020-04-26T18:00:24+07:00] Finishing resource="kubeadm:config:kubectl-dir"
203INFO[2020-04-26T18:00:24+07:00] Starting resource="kubeadm:config:images"
204DEBU[2020-04-26T18:00:24+07:00] running command: sudo -n -- sh -c 'echo skipping :: kubeadm config images pull --config=/tmp/wks_kubeadm_init_config.yaml'
205skipping :: kubeadm config images pull --config=/tmp/wks_kubeadm_init_config.yaml
206INFO[2020-04-26T18:00:25+07:00] Finishing resource="kubeadm:config:images"
207INFO[2020-04-26T18:00:25+07:00] Starting resource="kubeadm:run-init"
208DEBU[2020-04-26T18:00:25+07:00] running command: sudo -n -- sh -c '( unset http_proxy https_proxy HTTP_PROXY HTTPS_PROXY && ( kubeadm init --config=/tmp/wks_kubeadm_init_config.yaml --ignore-preflight-errors=all --experimental-upload-certs ) )'
209[init] Using Kubernetes version: v1.14.1
210[preflight] Running pre-flight checks
211 [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
212 [WARNING Swap]: running with swap on is not supported. Please disable swap
213 [WARNING SystemVerification]: failed to parse kernel config: unable to load kernel module: "configs", output: "", err: exit status 1
214[preflight] The system verification failed. Printing the output from the verification:
215KERNEL_VERSION: 5.3.0-46-generic
216DOCKER_VERSION: 18.09.7
217DOCKER_GRAPH_DRIVER: overlay2
218OS: Linux
219CGROUPS_CPU: enabled
220CGROUPS_CPUACCT: enabled
221CGROUPS_CPUSET: enabled
222CGROUPS_DEVICES: enabled
223CGROUPS_FREEZER: enabled
224CGROUPS_MEMORY: enabled
225[preflight] Pulling images required for setting up a Kubernetes cluster
226[preflight] This might take a minute or two, depending on the speed of your internet connection
227[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
228[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
229[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
230[kubelet-start] Activating the kubelet service
231[certs] Using certificateDir folder "/etc/kubernetes/pki"
232[certs] Generating "ca" certificate and key
233[certs] Generating "apiserver" certificate and key
234[certs] apiserver serving cert is signed for DNS names [master-0 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local localhost] and IPs [10.96.0.1 172.17.0.2 172.17.0.2 127.0.0.1 172.17.0.2]
235[certs] Generating "apiserver-kubelet-client" certificate and key
236[certs] Generating "front-proxy-ca" certificate and key
237[certs] Generating "front-proxy-client" certificate and key
238[certs] Generating "etcd/ca" certificate and key
239[certs] Generating "etcd/server" certificate and key
240[certs] etcd/server serving cert is signed for DNS names [master-0 localhost] and IPs [172.17.0.2 127.0.0.1 ::1]
241[certs] Generating "etcd/peer" certificate and key
242[certs] etcd/peer serving cert is signed for DNS names [master-0 localhost] and IPs [172.17.0.2 127.0.0.1 ::1]
243[certs] Generating "etcd/healthcheck-client" certificate and key
244[certs] Generating "apiserver-etcd-client" certificate and key
245[certs] Generating "sa" key and public key
246[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
247[kubeconfig] Writing "admin.conf" kubeconfig file
248[kubeconfig] Writing "kubelet.conf" kubeconfig file
249[kubeconfig] Writing "controller-manager.conf" kubeconfig file
250[kubeconfig] Writing "scheduler.conf" kubeconfig file
251[control-plane] Using manifest folder "/etc/kubernetes/manifests"
252[control-plane] Creating static Pod manifest for "kube-apiserver"
253[control-plane] Creating static Pod manifest for "kube-controller-manager"
254[control-plane] Creating static Pod manifest for "kube-scheduler"
255[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
256[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
257[kubelet-check] Initial timeout of 40s passed. //////////////// ช่วงนี้รอนาน แล้วก็ error รวดเดียวเลยครับ
258error execution phase wait-control-plane: couldn't initialize a Kubernetes cluster
259
260Unfortunately, an error has occurred:
261 timed out waiting for the condition
262
263This error is likely caused by:
264 - The kubelet is not running
265 - The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)
266
267If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
268 - 'systemctl status kubelet'
269 - 'journalctl -xeu kubelet'
270
271Additionally, a control plane component may have crashed or exited when started by the container runtime.
272To troubleshoot, list all containers using your preferred container runtimes CLI, e.g. docker.
273Here is one example how you may list all Kubernetes containers running in docker:
274 - 'docker ps -a | grep kube | grep -v pause'
275 Once you have found the failing container, you can inspect its logs with:
276 - 'docker logs CONTAINERID'
277INFO[2020-04-26T18:05:17+07:00] Failed resource="kubeadm:run-init"
278INFO[2020-04-26T18:05:17+07:00] Starting resource="kubeadm:config:copy"
279INFO[2020-04-26T18:05:17+07:00] Failing (Bad Upstream Resource) resource="kubeadm:config:copy"
280INFO[2020-04-26T18:05:17+07:00] Starting resource="kubeadm:config:set-ownership"
281INFO[2020-04-26T18:05:17+07:00] Failing (Bad Upstream Resource) resource="kubeadm:config:set-ownership"
282INFO[2020-04-26T18:05:17+07:00] State of Resource 'kubeadm:config:set-ownership' is Invalid.
283Explanation:
284{
285 "resource": "kubeadm:config:set-ownership",
286 "status": "Invalid",
287 "reason": "DependencyInvalid",
288 "dependencies": [
289 {
290 "resource": "kubeadm:config:copy",
291 "status": "Invalid",
292 "reason": "DependencyInvalid",
293 "dependencies": [
294 {
295 "resource": "kubeadm:run-init",
296 "status": "Invalid",
297 "reason": "ApplyError",
298 "error": "command exited with 1"
299 }
300 ]
301 }
302 ]
303}
304DEBU[2020-04-26T18:05:17+07:00] running command: sudo -n -- sh -c 'rm -f "/tmp/wks_kubeadm_init_config.yaml"'
305INFO[2020-04-26T18:05:17+07:00] Failed resource="kubeadm:init"
306INFO[2020-04-26T18:05:17+07:00] Starting resource="kubectl:apply:cluster_v1alpha1_machineset.yaml"
307INFO[2020-04-26T18:05:17+07:00] Failing (Bad Upstream Resource) resource="kubectl:apply:cluster_v1alpha1_machineset.yaml"
308INFO[2020-04-26T18:05:17+07:00] Starting resource="install:cni"
309INFO[2020-04-26T18:05:17+07:00] Failing (Bad Upstream Resource) resource="install:cni"
310INFO[2020-04-26T18:05:17+07:00] Starting resource="kubectl:apply:cluster_v1alpha1_cluster.yaml"
311INFO[2020-04-26T18:05:17+07:00] Failing (Bad Upstream Resource) resource="kubectl:apply:cluster_v1alpha1_cluster.yaml"
312INFO[2020-04-26T18:05:17+07:00] Starting resource="kubectl:apply:cluster_v1alpha1_machine.yaml"
313INFO[2020-04-26T18:05:17+07:00] Failing (Bad Upstream Resource) resource="kubectl:apply:cluster_v1alpha1_machine.yaml"
314INFO[2020-04-26T18:05:17+07:00] Starting resource="kubectl:apply:cluster_v1alpha1_machineclass.yaml"
315INFO[2020-04-26T18:05:17+07:00] Failing (Bad Upstream Resource) resource="kubectl:apply:cluster_v1alpha1_machineclass.yaml"
316INFO[2020-04-26T18:05:17+07:00] Starting resource="kubectl:apply:cluster_v1alpha1_machinedeployment.yaml"
317INFO[2020-04-26T18:05:17+07:00] Failing (Bad Upstream Resource) resource="kubectl:apply:cluster_v1alpha1_machinedeployment.yaml"
318INFO[2020-04-26T18:05:17+07:00] Starting resource="install:configmaps"
319INFO[2020-04-26T18:05:17+07:00] Failing (Bad Upstream Resource) resource="install:configmaps"
320INFO[2020-04-26T18:05:17+07:00] Starting resource="kubectl:apply:machines"
321INFO[2020-04-26T18:05:17+07:00] Failing (Bad Upstream Resource) resource="kubectl:apply:machines"
322INFO[2020-04-26T18:05:17+07:00] Starting resource="kubectl:apply:cluster"
323INFO[2020-04-26T18:05:17+07:00] Failing (Bad Upstream Resource) resource="kubectl:apply:cluster"
324INFO[2020-04-26T18:05:17+07:00] Starting resource="install:wks"
325INFO[2020-04-26T18:05:17+07:00] Failing (Bad Upstream Resource) resource="install:wks"
326INFO[2020-04-26T18:05:17+07:00] Starting resource="install:flux:flux-git-deploy-secret"
327INFO[2020-04-26T18:05:17+07:00] Failing (Bad Upstream Resource) resource="install:flux:flux-git-deploy-secret"
328INFO[2020-04-26T18:05:17+07:00] Starting resource="install:addons"
329INFO[2020-04-26T18:05:17+07:00] Failing (Bad Upstream Resource) resource="install:addons"
330INFO[2020-04-26T18:05:17+07:00] Starting resource="install:flux:main"
331INFO[2020-04-26T18:05:17+07:00] Failing (Bad Upstream Resource) resource="install:flux:main"
332INFO[2020-04-26T18:05:17+07:00] State of Resource 'install:cni' is Invalid.
333Explanation:
334{
335 "resource": "install:cni",
336 "status": "Invalid",
337 "reason": "DependencyInvalid",
338 "dependencies": [
339 {
340 "resource": "kubeadm:init",
341 "status": "Invalid",
342 "reason": "ApplyError",
343 "error": "failed to initialize Kubernetes cluster with kubeadm: Apply failed because a child failed"
344 }
345 ]
346}
347INFO[2020-04-26T18:05:17+07:00] State of Resource 'install:wks' is Invalid.
348Explanation:
349{
350 "resource": "install:wks",
351 "status": "Invalid",
352 "reason": "DependencyInvalid",
353 "dependencies": [
354 {
355 "resource": "kubectl:apply:cluster",
356 "status": "Invalid",
357 "reason": "DependencyInvalid",
358 "dependencies": [
359 {
360 "resource": "install:configmaps",
361 "status": "Invalid",
362 "reason": "DependencyInvalid",
363 "dependencies": [
364 {
365 "resource": "kubeadm:init",
366 "status": "Invalid",
367 "reason": "ApplyError",
368 "error": "failed to initialize Kubernetes cluster with kubeadm: Apply failed because a child failed"
369 },
370 {
371 "resource": "kubectl:apply:cluster_v1alpha1_cluster.yaml",
372 "status": "Invalid",
373 "reason": "DependencyInvalid",
374 "dependencies": [
375 {
376 "resource": "kubeadm:init"
377 }
378 ]
379 },
380 {
381 "resource": "kubectl:apply:cluster_v1alpha1_machine.yaml",
382 "status": "Invalid",
383 "reason": "DependencyInvalid",
384 "dependencies": [
385 {
386 "resource": "kubeadm:init"
387 }
388 ]
389 },
390 {
391 "resource": "kubectl:apply:cluster_v1alpha1_machineclass.yaml",
392 "status": "Invalid",
393 "reason": "DependencyInvalid",
394 "dependencies": [
395 {
396 "resource": "kubeadm:init"
397 }
398 ]
399 },
400 {
401 "resource": "kubectl:apply:cluster_v1alpha1_machinedeployment.yaml",
402 "status": "Invalid",
403 "reason": "DependencyInvalid",
404 "dependencies": [
405 {
406 "resource": "kubeadm:init"
407 }
408 ]
409 },
410 {
411 "resource": "kubectl:apply:cluster_v1alpha1_machineset.yaml",
412 "status": "Invalid",
413 "reason": "DependencyInvalid",
414 "dependencies": [
415 {
416 "resource": "kubeadm:init"
417 }
418 ]
419 }
420 ]
421 }
422 ]
423 },
424 {
425 "resource": "kubectl:apply:machines",
426 "status": "Invalid",
427 "reason": "DependencyInvalid",
428 "dependencies": [
429 {
430 "resource": "kubeadm:init"
431 },
432 {
433 "resource": "kubectl:apply:cluster_v1alpha1_cluster.yaml"
434 },
435 {
436 "resource": "kubectl:apply:cluster_v1alpha1_machine.yaml"
437 },
438 {
439 "resource": "kubectl:apply:cluster_v1alpha1_machineclass.yaml"
440 },
441 {
442 "resource": "kubectl:apply:cluster_v1alpha1_machinedeployment.yaml"
443 },
444 {
445 "resource": "kubectl:apply:cluster_v1alpha1_machineset.yaml"
446 }
447 ]
448 }
449 ]
450}
451INFO[2020-04-26T18:05:17+07:00] State of Resource 'install:addons' is Invalid.
452Explanation:
453{
454 "resource": "install:addons",
455 "status": "Invalid",
456 "reason": "DependencyInvalid",
457 "dependencies": [
458 {
459 "resource": "kubectl:apply:cluster",
460 "status": "Invalid",
461 "reason": "DependencyInvalid",
462 "dependencies": [
463 {
464 "resource": "install:configmaps",
465 "status": "Invalid",
466 "reason": "DependencyInvalid",
467 "dependencies": [
468 {
469 "resource": "kubeadm:init",
470 "status": "Invalid",
471 "reason": "ApplyError",
472 "error": "failed to initialize Kubernetes cluster with kubeadm: Apply failed because a child failed"
473 },
474 {
475 "resource": "kubectl:apply:cluster_v1alpha1_cluster.yaml",
476 "status": "Invalid",
477 "reason": "DependencyInvalid",
478 "dependencies": [
479 {
480 "resource": "kubeadm:init"
481 }
482 ]
483 },
484 {
485 "resource": "kubectl:apply:cluster_v1alpha1_machine.yaml",
486 "status": "Invalid",
487 "reason": "DependencyInvalid",
488 "dependencies": [
489 {
490 "resource": "kubeadm:init"
491 }
492 ]
493 },
494 {
495 "resource": "kubectl:apply:cluster_v1alpha1_machineclass.yaml",
496 "status": "Invalid",
497 "reason": "DependencyInvalid",
498 "dependencies": [
499 {
500 "resource": "kubeadm:init"
501 }
502 ]
503 },
504 {
505 "resource": "kubectl:apply:cluster_v1alpha1_machinedeployment.yaml",
506 "status": "Invalid",
507 "reason": "DependencyInvalid",
508 "dependencies": [
509 {
510 "resource": "kubeadm:init"
511 }
512 ]
513 },
514 {
515 "resource": "kubectl:apply:cluster_v1alpha1_machineset.yaml",
516 "status": "Invalid",
517 "reason": "DependencyInvalid",
518 "dependencies": [
519 {
520 "resource": "kubeadm:init"
521 }
522 ]
523 }
524 ]
525 }
526 ]
527 },
528 {
529 "resource": "kubectl:apply:machines",
530 "status": "Invalid",
531 "reason": "DependencyInvalid",
532 "dependencies": [
533 {
534 "resource": "kubeadm:init"
535 },
536 {
537 "resource": "kubectl:apply:cluster_v1alpha1_cluster.yaml"
538 },
539 {
540 "resource": "kubectl:apply:cluster_v1alpha1_machine.yaml"
541 },
542 {
543 "resource": "kubectl:apply:cluster_v1alpha1_machineclass.yaml"
544 },
545 {
546 "resource": "kubectl:apply:cluster_v1alpha1_machinedeployment.yaml"
547 },
548 {
549 "resource": "kubectl:apply:cluster_v1alpha1_machineset.yaml"
550 }
551 ]
552 }
553 ]
554}
555INFO[2020-04-26T18:05:17+07:00] State of Resource 'install:flux:main' is Invalid.
556Explanation:
557{
558 "resource": "install:flux:main",
559 "status": "Invalid",
560 "reason": "DependencyInvalid",
561 "dependencies": [
562 {
563 "resource": "install:flux:flux-git-deploy-secret",
564 "status": "Invalid",
565 "reason": "DependencyInvalid",
566 "dependencies": [
567 {
568 "resource": "kubectl:apply:cluster",
569 "status": "Invalid",
570 "reason": "DependencyInvalid",
571 "dependencies": [
572 {
573 "resource": "install:configmaps",
574 "status": "Invalid",
575 "reason": "DependencyInvalid",
576 "dependencies": [
577 {
578 "resource": "kubeadm:init",
579 "status": "Invalid",
580 "reason": "ApplyError",
581 "error": "failed to initialize Kubernetes cluster with kubeadm: Apply failed because a child failed"
582 },
583 {
584 "resource": "kubectl:apply:cluster_v1alpha1_cluster.yaml",
585 "status": "Invalid",
586 "reason": "DependencyInvalid",
587 "dependencies": [
588 {
589 "resource": "kubeadm:init"
590 }
591 ]
592 },
593 {
594 "resource": "kubectl:apply:cluster_v1alpha1_machine.yaml",
595 "status": "Invalid",
596 "reason": "DependencyInvalid",
597 "dependencies": [
598 {
599 "resource": "kubeadm:init"
600 }
601 ]
602 },
603 {
604 "resource": "kubectl:apply:cluster_v1alpha1_machineclass.yaml",
605 "status": "Invalid",
606 "reason": "DependencyInvalid",
607 "dependencies": [
608 {
609 "resource": "kubeadm:init"
610 }
611 ]
612 },
613 {
614 "resource": "kubectl:apply:cluster_v1alpha1_machinedeployment.yaml",
615 "status": "Invalid",
616 "reason": "DependencyInvalid",
617 "dependencies": [
618 {
619 "resource": "kubeadm:init"
620 }
621 ]
622 },
623 {
624 "resource": "kubectl:apply:cluster_v1alpha1_machineset.yaml",
625 "status": "Invalid",
626 "reason": "DependencyInvalid",
627 "dependencies": [
628 {
629 "resource": "kubeadm:init"
630 }
631 ]
632 }
633 ]
634 }
635 ]
636 },
637 {
638 "resource": "kubectl:apply:machines",
639 "status": "Invalid",
640 "reason": "DependencyInvalid",
641 "dependencies": [
642 {
643 "resource": "kubeadm:init"
644 },
645 {
646 "resource": "kubectl:apply:cluster_v1alpha1_cluster.yaml"
647 },
648 {
649 "resource": "kubectl:apply:cluster_v1alpha1_machine.yaml"
650 },
651 {
652 "resource": "kubectl:apply:cluster_v1alpha1_machineclass.yaml"
653 },
654 {
655 "resource": "kubectl:apply:cluster_v1alpha1_machinedeployment.yaml"
656 },
657 {
658 "resource": "kubectl:apply:cluster_v1alpha1_machineset.yaml"
659 }
660 ]
661 }
662 ]
663 }
664 ]
665}
666ERRO[2020-04-26T18:05:17+07:00] Apply of Plan failed:
667Apply failed because a child failed
668INFO[0000] wksctl version 0.8.2-beta.1 is available; please update at https://github.com/weaveworks/wksctl/releases/tag/v0.8.2-beta.1
669The kubeconfig file at "/home/toeikanta/.kube/config" has been updated
670
671toeikanta@kanta-H97M-D3H:~/Desktop/github/minifk$ docker ps
672CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
6738e607c27fc1e chanwit/minifk-worker "/sbin/init" 7 minutes ago Up 7 minutes 0.0.0.0:2223->22/tcp, 0.0.0.0:6444->6443/tcp, 0.0.0.0:30081->30080/tcp, 0.0.0.0:30444->30443/tcp firekube-worker-0
6748ea56d9cb711 chanwit/minifk-master "/sbin/init" 7 minutes ago Up 7 minutes 0.0.0.0:6443->6443/tcp, 0.0.0.0:30080->30080/tcp, 0.0.0.0:30443->30443/tcp, 0.0.0.0:2222->22/tcp firekube-master-0
675
676toeikanta@kanta-H97M-D3H:~/Desktop/github/minifk$ kubectl get nodes
677Error from server (InternalError): an error on the server ("") has prevented the request from succeeding
678
679toeikanta@kanta-H97M-D3H:~/Desktop/github/minifk$