· 5 years ago · Aug 08, 2020, 03:38 PM
1########################################################
2# #
3# P R O X M O X C O M I S C S I G W C E P H #
4# #
5########################################################
6
7
8
9
101 - ADICIONAR A ENTRADA DNS NO ARQUIVO /etc/hosts DOS SERVIDORES QUE SERAO UTILIZADOS COMO GW ISCSI
11
12
13Exemplo utilizando 2 GWs
14
15#/etc/hosts node01
16
17191.253.193.156 node01.newfront.cloud node01
18191.253.193.157 node02.newfront.cloud node02
19
20#/etc/hosts node02
21
22191.253.193.156 node01.newfront.cloud node01
23191.253.193.157 node02.newfront.cloud node02
24
25
26
27
282 -INSTALAR CEPH NOS NODES, INICILIZAR CONFIGURACAO E CRIAR MONITOR
29
30
31pveceph install --version nautilus ; pveceph init --network 191.253.193.0/24 ; pveceph mon create
32
33
342.1 - ADICIONAR OS PARAMETROS DE CONFIGURACAO EM ceph.conf
35
36
37[osd]
38 osd_heartbeat_grace = 20
39 osd_heartbeat_interval = 5
40
41
42
43
443 - INSTALAR CEPH DASHBOARD, HABILITAR O MODULO NO MGR, CRIAR CERTIFICADO AUTO ASSINADO E CONFIGURAR USUARIO E SENHA
45
46
47OBS: O módulo só será habilitado se o MGR estiver ativo
48
49
50apt-get install ceph-mgr-dashboard -y ; ceph mgr module enable dashboard ; ceph dashboard create-self-signed-cert
51
52
533.1 - CRIAR USUARIO
54
55ceph dashboard ac-user-create {user} {senha} administrator
56
57
58
59
604 - INSTALAR O PACOTE GIT PARA DESCARREGAR OS PACOTES NECESSARIOS PARA O FUNCIONAMENTO DE CEPH ISCSI
61
62
63apt install git -y
64
65
66
67
685 - DESCARREGAR E INSTALAR E HABILITAR OS PACOTES DE CEPH ISCSI GW
69
70
71#apt install python-pyudev -y ; apt install python-gobject -y ; apt install python-urwid -y ; apt install python-pyparsing -y ; apt install python-netifaces -y ; apt install python-crypto -y ; apt install python-flask -y ; apt-get install python-setuptools -y
72
73
74#git clone https://github.com/open-iscsi/tcmu-runner ; cd tcmu-runner ; ./extra/install_dep.sh ; apt install pkg-config -y ; apt-get install libglib2.0-dev -y ; cmake -Dwith-glfs=false -Dwith-qcow=false -DSUPPORT_SYSTEMD=ON -DCMAKE_INSTALL_PREFIX=/usr ; make install ; systemctl daemon-reload ; systemctl enable tcmu-runner ; systemctl start tcmu-runner ; cd /root/
75
76#git clone https://github.com/open-iscsi/rtslib-fb.git ; cd rtslib-fb ; python setup.py install ; cd /root/
77
78#git clone https://github.com/open-iscsi/configshell-fb.git ; cd configshell-fb ; python setup.py install ; cd /root
79
80#git clone https://github.com/open-iscsi/targetcli-fb.git ; cd targetcli-fb ; python setup.py install ; mkdir /etc/target ; mkdir /var/target ; cd /root/
81
82#git clone https://github.com/ceph/ceph-iscsi.git ; cd ceph-iscsi ; python setup.py install --install-scripts=/usr/bin ; cp usr/lib/systemd/system/rbd-target-gw.service /lib/systemd/system ; cp usr/lib/systemd/system/rbd-target-api.service /lib/systemd/system ; cd /root ; systemctl daemon-reload ; systemctl enable rbd-target-gw ; systemctl start rbd-target-gw ; systemctl enable rbd-target-api ; systemctl start rbd-target-api
83
84
85
86
876 - O gwcli REQUER UM POOL COM NOME "rbd" PARA QUE ELE POSSA ARMAZENAR METADADOS DA CONFIGURACAO ISCSI.
88
89
906.1 - CRIAR POOL
91
92ceph osd pool create rbd 128 replicated replicated_rule
93
94
95#INICIAR POOL
96
97rbd pool init rbd
98
99
1007 - CRIAR CONFIGURACAO DO iSCSIGW
101
102touch /etc/ceph/iscsi-gateway.cfg
103
104
105# Adicionar a conf abaixo no arquivo de configuração
106
107
108[config]
109# Name of the Ceph storage cluster. A suitable Ceph configuration file allowing
110# access to the Ceph storage cluster from the gateway node is required, if not
111# colocated on an OSD node.
112cluster_name = ceph
113
114# Place a copy of the ceph cluster's admin keyring in the gateway's /etc/ceph
115# drectory and reference the filename here
116gateway_keyring = ceph.client.admin.keyring
117
118
119# API settings.
120# The API supports a number of options that allow you to tailor it to your
121# local environment. If you want to run the API under https, you will need to
122# create cert/key files that are compatible for each iSCSI gateway node, that is
123# not locked to a specific node. SSL cert and key files *must* be called
124# 'iscsi-gateway.crt' and 'iscsi-gateway.key' and placed in the '/etc/ceph/' directory
125# on *each* gateway node. With the SSL files in place, you can use 'api_secure = true'
126# to switch to https mode.
127
128# To support the API, the bear minimum settings are:
129api_secure = false
130
131# Additional API configuration options are as follows, defaults shown.
132api_user = admin
133api_password = admin
134api_port = 5001
135trusted_ip_list = 191.253.193.156,191.253.193.157
136
137
138#############
139
140OBS: trust_ip_list é uma lista de endereços IP em cada gateway iscsi que será usado para operações de gerenciamento como criação de destino, exportação lun, etc. O IP pode ser o mesmo que será usado para dados iSCSI, como comandos READ / WRITE para / do Imagem RBD, mas recomenda-se o uso de IPs separados.
141
142IMPORTANTE: O iscsi-gateway.cfg arquivo deve ser idêntico em todos os nós do "Gateway iSCSI"
143
144
145
1468 - LOGADO COMO "root" EM TODOS O NODES DO GATEWAY ISCSI, ATIVE E INICIE O SERVICO DA API
147
148systemctl daemon-reload
149systemctl enable rbd-target-gw
150systemctl start rbd-target-gw
151systemctl enable rbd-target-api
152systemctl start rbd-target-api
153
154
155
156
1579 - HABILITAR iSCSI GW NO CEPH DASHBOARD
158
159
160
161#DESABILITAR VERIFICACAO SSL
162
163
164ceph dashboard set-iscsi-api-ssl-verification false
165
166
167#ADICIONAR ISCSI GATEWAYS
168
169
170ceph dashboard iscsi-gateway-add http://admin:admin@node01.newfront.cloud:5001
171
172ceph dashboard iscsi-gateway-add http://admin:admin@node02.newfront.cloud:5001
173
174
175#LISTAR
176
177ceph dashboard iscsi-gateway-list
178
179
180#DELETAR
181
182ceph dashboard iscsi-gateway-rm node01.newfront.cloud
183
184
185
186
18710 - AJUSTAR OS PARAMETROS ABAIXO, EM /etc/iscsi/iscsid.conf PARA:
188
189
190node.startup = automatic
191node.session.timeo.replacement_timeout = 15
192
193
194OBS: Caso o inicializador já estiver em usa, o ajuste tem que ser feito direto no portal
195
196Exemplo: '/etc/iscsi/nodes/<TARGET>/<PORTAL>/default'
197
198
199
20011 - OPERACOES EM GWCLI
201
202
203
204#ACESSAR GWCLI
205
206gwcli
207
208
209#CRIAR TARGET E GATEWAYS
210
211cd /iscsi-target
212
213create iqn.2020-08.br.com.newfront.iscsi-gw:iscsi-igw
214
215cd iqn.2020-08.br.com.newfront.iscsi-gw:iscsi-igw/gateways
216
217create node01.newfront.cloud 191.253.193.157
218create node01.newfront.cloud 191.253.193.151
219
220
221#CRIAR RBD BLOCK PARA COMPARTILHAR VIA ISCSI
222
223cd /disks
224
225create pool=rbd image=disk-0 size=100G
226
227
228#ACESSAR TARGET E CRIAR INITIATORS
229
230cd iqn.2020-08.br.com.newfront.iscsi-gw:iscsi-igw/hosts
231
232create iqn.1994-05.com.redhat:rh7-client
233
234
235
236#ACESSAR IQN INITIATOR
237
238cd iqn.1994-05.com.redhat:rh7-client
239
240
241#VINCULAR RBD BLOCK CRIADO AO INITIATOR
242
243disk add rbd/disk-0
244
245
246
24712 - MONTAR LUN NO PROXMOX E CONFIGURAR MULTIPATH
248
249
250Pela interface do Proxmox, acessar DataCenter > Storage > Add > iSCSI
251
252ID: nome do storage
253PORTAL: ip do portal
254TARGET: selecionar target criado no gw
255
256*Desmarcar a opção "Use LUNs directly" para usar sob LVM
257
258
259
260
261#INSTALAR MULTIPATH
262
263apt-get update
264apt-get install multipath-tools
265
266
267
268
269#LISTAR WWID DO DISCO TCMU
270
271
272#IDENTIFICAR WWID
273
274/lib/udev/scsi_id -g -u -d /dev/sdx
275
276
277
278#ADICIONAR/CRIAR conf /etc/multipath.conf E ADICIONAR O CONTEUDO ABAIXO:
279
280
281blacklist {
282 wwid .*
283}
284
285blacklist_exceptions {
286 wwid "36001405e86504de4aa24417aedad09f9"
287}
288
289multipaths {
290 multipath {
291 wwid "36001405e86504de4aa24417aedad09f9"
292 alias multipath-ceph
293 }
294
295
296
297
298#ADICIONAR WWIDS EM /etc/multipath/wwids com o comando:
299
300multipath -a 36001405e86504de4aa24417aedad09f9
301
302
303
304
305#RESTART E CHECK
306
307systemctl restart multipath-tools.service ; multipath -ll
308
309
310
311
312#FAZER WIPE NO BLOCO DO MULTIPATH
313
314
315wipefs -a /dev/mapper/multipath-ceph
316
317
318
319
32013 - CONFIGURAR LVM E CONVERTER PARA THIN
321
322
323
324#CRIAR PV
325
326pvcreate /dev/mapper/multipath-ceph
327
328#CRIAR VG
329
330vgcreate ISCSI /dev/mapper/multipath-ceph
331
332#CRIAR LV
333
334lvcreate ISCSI -n LVM-OVER-ISCSI -L 199G
335
336
337#CONVERTER PARA THIN
338
339lvconvert --type thin-pool /dev/mapper/ISCSI-LVM--OVER--ISCSI -y
340
341
342
343
34414 - ADICIONAR STORAGE LVM Thin NO PROXMOX
345
346
347pvesm add lvmthin LVM-THIN --vgname ISCSI --thinpool LVM-OVER-ISCSI --nodes {node-name}