· 5 years ago · Aug 08, 2020, 03:44 PM
1########################################################
2# #
3# P R O X M O X C O M I S C S I G W C E P H #
4# #
5########################################################
6
7
8Autor: Guto Rodrigues
9E-mail: guto@newfront.com.br
10Website: www.newfront.com.br
11
12
13
141 - ADICIONAR A ENTRADA DNS NO ARQUIVO /etc/hosts DOS SERVIDORES QUE SERAO UTILIZADOS COMO GW ISCSI
15
16
17Exemplo utilizando 2 GWs
18
19#/etc/hosts node01
20
21191.253.193.156 node01.newfront.cloud node01
22191.253.193.157 node02.newfront.cloud node02
23
24#/etc/hosts node02
25
26191.253.193.156 node01.newfront.cloud node01
27191.253.193.157 node02.newfront.cloud node02
28
29
30
31
322 -INSTALAR CEPH NOS NODES, INICILIZAR CONFIGURACAO E CRIAR MONITOR
33
34
35pveceph install --version nautilus ; pveceph init --network 191.253.193.0/24 ; pveceph mon create
36
37
382.1 - ADICIONAR OS PARAMETROS DE CONFIGURACAO EM ceph.conf
39
40
41[osd]
42 osd_heartbeat_grace = 20
43 osd_heartbeat_interval = 5
44
45
46
47
483 - INSTALAR CEPH DASHBOARD, HABILITAR O MODULO NO MGR, CRIAR CERTIFICADO AUTO ASSINADO E CONFIGURAR USUARIO E SENHA
49
50
51OBS: O módulo só será habilitado se o MGR estiver ativo
52
53
54apt-get install ceph-mgr-dashboard -y ; ceph mgr module enable dashboard ; ceph dashboard create-self-signed-cert
55
56
573.1 - CRIAR USUARIO
58
59ceph dashboard ac-user-create {user} {senha} administrator
60
61
62
63
644 - INSTALAR O PACOTE GIT PARA DESCARREGAR OS PACOTES NECESSARIOS PARA O FUNCIONAMENTO DE CEPH ISCSI
65
66
67apt install git -y
68
69
70
71
725 - DESCARREGAR E INSTALAR E HABILITAR OS PACOTES DE CEPH ISCSI GW
73
74
75#apt install python-pyudev -y ; apt install python-gobject -y ; apt install python-urwid -y ; apt install python-pyparsing -y ; apt install python-netifaces -y ; apt install python-crypto -y ; apt install python-flask -y ; apt-get install python-setuptools -y
76
77
78#git clone https://github.com/open-iscsi/tcmu-runner ; cd tcmu-runner ; ./extra/install_dep.sh ; apt install pkg-config -y ; apt-get install libglib2.0-dev -y ; cmake -Dwith-glfs=false -Dwith-qcow=false -DSUPPORT_SYSTEMD=ON -DCMAKE_INSTALL_PREFIX=/usr ; make install ; systemctl daemon-reload ; systemctl enable tcmu-runner ; systemctl start tcmu-runner ; cd /root/
79
80#git clone https://github.com/open-iscsi/rtslib-fb.git ; cd rtslib-fb ; python setup.py install ; cd /root/
81
82#git clone https://github.com/open-iscsi/configshell-fb.git ; cd configshell-fb ; python setup.py install ; cd /root
83
84#git clone https://github.com/open-iscsi/targetcli-fb.git ; cd targetcli-fb ; python setup.py install ; mkdir /etc/target ; mkdir /var/target ; cd /root/
85
86#git clone https://github.com/ceph/ceph-iscsi.git ; cd ceph-iscsi ; python setup.py install --install-scripts=/usr/bin ; cp usr/lib/systemd/system/rbd-target-gw.service /lib/systemd/system ; cp usr/lib/systemd/system/rbd-target-api.service /lib/systemd/system ; cd /root ; systemctl daemon-reload ; systemctl enable rbd-target-gw ; systemctl start rbd-target-gw ; systemctl enable rbd-target-api ; systemctl start rbd-target-api
87
88
89
90
916 - O gwcli REQUER UM POOL COM NOME "rbd" PARA QUE ELE POSSA ARMAZENAR METADADOS DA CONFIGURACAO ISCSI.
92
93
946.1 - CRIAR POOL
95
96ceph osd pool create rbd 128 replicated replicated_rule
97
98
99#INICIAR POOL
100
101rbd pool init rbd
102
103
1047 - CRIAR CONFIGURACAO DO iSCSIGW
105
106touch /etc/ceph/iscsi-gateway.cfg
107
108
109# Adicionar a conf abaixo no arquivo de configuração
110
111
112[config]
113# Name of the Ceph storage cluster. A suitable Ceph configuration file allowing
114# access to the Ceph storage cluster from the gateway node is required, if not
115# colocated on an OSD node.
116cluster_name = ceph
117
118# Place a copy of the ceph cluster's admin keyring in the gateway's /etc/ceph
119# drectory and reference the filename here
120gateway_keyring = ceph.client.admin.keyring
121
122
123# API settings.
124# The API supports a number of options that allow you to tailor it to your
125# local environment. If you want to run the API under https, you will need to
126# create cert/key files that are compatible for each iSCSI gateway node, that is
127# not locked to a specific node. SSL cert and key files *must* be called
128# 'iscsi-gateway.crt' and 'iscsi-gateway.key' and placed in the '/etc/ceph/' directory
129# on *each* gateway node. With the SSL files in place, you can use 'api_secure = true'
130# to switch to https mode.
131
132# To support the API, the bear minimum settings are:
133api_secure = false
134
135# Additional API configuration options are as follows, defaults shown.
136api_user = admin
137api_password = admin
138api_port = 5001
139trusted_ip_list = 191.253.193.156,191.253.193.157
140
141
142#############
143
144OBS: trust_ip_list é uma lista de endereços IP em cada gateway iscsi que será usado para operações de gerenciamento como criação de destino, exportação lun, etc. O IP pode ser o mesmo que será usado para dados iSCSI, como comandos READ / WRITE para / do Imagem RBD, mas recomenda-se o uso de IPs separados.
145
146IMPORTANTE: O iscsi-gateway.cfg arquivo deve ser idêntico em todos os nós do "Gateway iSCSI"
147
148
149
1508 - LOGADO COMO "root" EM TODOS O NODES DO GATEWAY ISCSI, ATIVE E INICIE O SERVICO DA API
151
152systemctl daemon-reload
153systemctl enable rbd-target-gw
154systemctl start rbd-target-gw
155systemctl enable rbd-target-api
156systemctl start rbd-target-api
157
158
159
160
1619 - HABILITAR iSCSI GW NO CEPH DASHBOARD
162
163
164
165#DESABILITAR VERIFICACAO SSL
166
167
168ceph dashboard set-iscsi-api-ssl-verification false
169
170
171#ADICIONAR ISCSI GATEWAYS
172
173
174ceph dashboard iscsi-gateway-add http://admin:admin@node01.newfront.cloud:5001
175
176ceph dashboard iscsi-gateway-add http://admin:admin@node02.newfront.cloud:5001
177
178
179#LISTAR
180
181ceph dashboard iscsi-gateway-list
182
183
184#DELETAR
185
186ceph dashboard iscsi-gateway-rm node01.newfront.cloud
187
188
189
190
19110 - AJUSTAR OS PARAMETROS ABAIXO, EM /etc/iscsi/iscsid.conf PARA:
192
193
194node.startup = automatic
195node.session.timeo.replacement_timeout = 15
196
197
198OBS: Caso o inicializador já estiver em usa, o ajuste tem que ser feito direto no portal
199
200Exemplo: '/etc/iscsi/nodes/<TARGET>/<PORTAL>/default'
201
202
203
20411 - OPERACOES EM GWCLI
205
206
207
208#ACESSAR GWCLI
209
210gwcli
211
212
213#CRIAR TARGET E GATEWAYS
214
215cd /iscsi-target
216
217create iqn.2020-08.br.com.newfront.iscsi-gw:iscsi-igw
218
219cd iqn.2020-08.br.com.newfront.iscsi-gw:iscsi-igw/gateways
220
221create node01.newfront.cloud 191.253.193.157
222create node01.newfront.cloud 191.253.193.151
223
224
225#CRIAR RBD BLOCK PARA COMPARTILHAR VIA ISCSI
226
227cd /disks
228
229create pool=rbd image=disk-0 size=100G
230
231
232#ACESSAR TARGET E CRIAR INITIATORS
233
234cd iqn.2020-08.br.com.newfront.iscsi-gw:iscsi-igw/hosts
235
236create iqn.1994-05.com.redhat:rh7-client
237
238
239
240#ACESSAR IQN INITIATOR
241
242cd iqn.1994-05.com.redhat:rh7-client
243
244
245#VINCULAR RBD BLOCK CRIADO AO INITIATOR
246
247disk add rbd/disk-0
248
249
250
25112 - MONTAR LUN NO PROXMOX E CONFIGURAR MULTIPATH
252
253
254Pela interface do Proxmox, acessar DataCenter > Storage > Add > iSCSI
255
256ID: nome do storage
257PORTAL: ip do portal
258TARGET: selecionar target criado no gw
259
260*Desmarcar a opção "Use LUNs directly" para usar sob LVM
261
262
263
264
265#INSTALAR MULTIPATH
266
267apt-get update
268apt-get install multipath-tools
269
270
271
272
273#LISTAR WWID DO DISCO TCMU
274
275
276#IDENTIFICAR WWID
277
278/lib/udev/scsi_id -g -u -d /dev/sdx
279
280
281
282#ADICIONAR/CRIAR conf /etc/multipath.conf E ADICIONAR O CONTEUDO ABAIXO:
283
284
285blacklist {
286 wwid .*
287}
288
289blacklist_exceptions {
290 wwid "36001405e86504de4aa24417aedad09f9"
291}
292
293multipaths {
294 multipath {
295 wwid "36001405e86504de4aa24417aedad09f9"
296 alias multipath-ceph
297 }
298
299
300
301
302#ADICIONAR WWIDS EM /etc/multipath/wwids com o comando:
303
304multipath -a 36001405e86504de4aa24417aedad09f9
305
306
307
308
309#RESTART E CHECK
310
311systemctl restart multipath-tools.service ; multipath -ll
312
313
314
315
316#FAZER WIPE NO BLOCO DO MULTIPATH
317
318
319wipefs -a /dev/mapper/multipath-ceph
320
321
322
323
32413 - CONFIGURAR LVM E CONVERTER PARA THIN
325
326
327
328#CRIAR PV
329
330pvcreate /dev/mapper/multipath-ceph
331
332#CRIAR VG
333
334vgcreate ISCSI /dev/mapper/multipath-ceph
335
336#CRIAR LV
337
338lvcreate ISCSI -n LVM-OVER-ISCSI -L 199G
339
340
341#CONVERTER PARA THIN
342
343lvconvert --type thin-pool /dev/mapper/ISCSI-LVM--OVER--ISCSI -y
344
345
346
347
34814 - ADICIONAR STORAGE LVM Thin NO PROXMOX
349
350
351pvesm add lvmthin LVM-THIN --vgname ISCSI --thinpool LVM-OVER-ISCSI --nodes {node-name}
352