· 6 years ago · Mar 01, 2020, 12:28 AM
1/*
2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc. <alan@redhat.com>
4 *
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
7 *
8 * Copyright (c) 2000-2010 Adaptec, Inc. (aacraid@adaptec.com)
9 * Copyright (c) 2010-2015 PMC-Sierra, Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 * Module Name:
26 * aachba.c
27 *
28 * Abstract: Contains Interfaces to manage IOs.
29 *
30 */
31
32#include <linux/kernel.h>
33#include <linux/init.h>
34#include <linux/types.h>
35//#include <linux/sched.h>
36#include <linux/pci.h>
37#include <linux/spinlock.h>
38#include <linux/slab.h>
39#include <linux/version.h> /* For the following test */
40#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,4,2))
41#include <linux/completion.h>
42#endif
43#include <linux/blkdev.h>
44#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))
45#include <asm/semaphore.h>
46#endif
47#include <asm/uaccess.h>
48#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,16))
49#include <linux/highmem.h> /* For flush_kernel_dcache_page */
50#endif
51#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)) || (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)))
52#include <linux/module.h>
53#endif
54#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
55#define MAJOR_NR SCSI_DISK0_MAJOR /* For DEVICE_NR() */
56#include <linux/blk.h> /* for DEVICE_NR & io_request_lock definition */
57#include "scsi.h"
58#include "hosts.h"
59#include "sd.h"
60#define no_uld_attach hostdata
61#else
62#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6))
63#include <linux/moduleparam.h>
64#endif
65#include <scsi/scsi.h>
66#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,1)) && !defined(DID_OK))
67#define DID_OK 0x00
68#define DID_NO_CONNECT 0x01
69#define DID_TIME_OUT 0x03
70#define DID_BAD_TARGET 0x04
71#define DID_ABORT 0x05
72#define DID_PARITY 0x06
73#define DID_ERROR 0x07
74#define DID_RESET 0x08
75#define SUCCESS 0x2002
76#define FAILED 0x2003
77#define SCSI_MLQUEUE_DEVICE_BUSY 0x1056
78#define SCSI_MLQUEUE_HOST_BUSY 0x1055
79#endif
80#include <scsi/scsi_cmnd.h>
81#include <scsi/scsi_device.h>
82#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)) && defined(DID_BUS_BUSY) && !defined(BLIST_NO_ULD_ATTACH))
83#include <scsi/scsi_devinfo.h> /* Pick up BLIST_NO_ULD_ATTACH? */
84#endif
85#include <scsi/scsi_host.h>
86#if (!defined(CONFIG_COMMUNITY_KERNEL))
87#include <scsi/scsi_tcq.h> /* For MSG_ORDERED_TAG */
88#endif
89#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7)) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)) && !defined(BLIST_NO_ULD_ATTACH))
90#define no_uld_attach inq_periph_qual
91#elif ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7)) && !defined(BLIST_NO_ULD_ATTACH))
92#define no_uld_attach hostdata
93#endif
94#endif
95#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)) && defined(AAC_DEBUG_INSTRUMENT_CONTEXT))
96#include "scsi_priv.h" /* For SCSI_CMND_MAGIC */
97#endif
98#if (!defined(CONFIG_COMMUNITY_KERNEL))
99#if (defined(MODULE))
100#include <linux/proc_fs.h>
101#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0))
102#include <linux/smp_lock.h>
103#else
104#include <linux/mutex.h>
105#endif
106#endif
107#if (defined(HAS_BOOTSETUP_H))
108#include <asm/bootsetup.h>
109#elif (!defined(HAS_NOT_SETUP))
110#include <asm/setup.h>
111#endif
112#ifndef COMMAND_LINE_SIZE
113# define COMMAND_LINE_SIZE 256
114#endif
115#endif
116
117#include "aacraid.h"
118#if (!defined(CONFIG_COMMUNITY_KERNEL))
119#include "fwdebug.h"
120#endif
121
122/* values for inqd_pdt: Peripheral device type in plain English */
123#define INQD_PDT_DA 0x00 /* Direct-access (DISK) device */
124#define INQD_PDT_PROC 0x03 /* Processor device */
125#define INQD_PDT_CHNGR 0x08 /* Changer (jukebox, scsi2) */
126#define INQD_PDT_COMM 0x09 /* Communication device (scsi2) */
127#define INQD_PDT_NOLUN2 0x1f /* Unknown Device (scsi2) */
128#define INQD_PDT_NOLUN 0x7f /* Logical Unit Not Present */
129
130#define INQD_PDT_DMASK 0x1F /* Peripheral Device Type Mask */
131#define INQD_PDT_QMASK 0xE0 /* Peripheral Device Qualifer Mask */
132
133/*
134 * Sense codes
135 */
136
137#define SENCODE_NO_SENSE 0x00
138#define SENCODE_END_OF_DATA 0x00
139#define SENCODE_BECOMING_READY 0x04
140#define SENCODE_INIT_CMD_REQUIRED 0x04
141#define SENCODE_UNRECOVERED_READ_ERROR 0x11
142#if (!defined(CONFIG_COMMUNITY_KERNEL))
143#define SENCODE_DATA_PROTECT 0x0E
144#endif
145#define SENCODE_PARAM_LIST_LENGTH_ERROR 0x1A
146#define SENCODE_INVALID_COMMAND 0x20
147#define SENCODE_LBA_OUT_OF_RANGE 0x21
148#define SENCODE_INVALID_CDB_FIELD 0x24
149#define SENCODE_LUN_NOT_SUPPORTED 0x25
150#define SENCODE_INVALID_PARAM_FIELD 0x26
151#define SENCODE_PARAM_NOT_SUPPORTED 0x26
152#define SENCODE_PARAM_VALUE_INVALID 0x26
153#define SENCODE_RESET_OCCURRED 0x29
154#define SENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x3E
155#define SENCODE_INQUIRY_DATA_CHANGED 0x3F
156#define SENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x39
157#define SENCODE_DIAGNOSTIC_FAILURE 0x40
158#define SENCODE_INTERNAL_TARGET_FAILURE 0x44
159#define SENCODE_INVALID_MESSAGE_ERROR 0x49
160#define SENCODE_LUN_FAILED_SELF_CONFIG 0x4c
161#define SENCODE_OVERLAPPED_COMMAND 0x4E
162
163/*
164 * Additional sense codes
165 */
166
167#define ASENCODE_NO_SENSE 0x00
168#define ASENCODE_END_OF_DATA 0x05
169#define ASENCODE_BECOMING_READY 0x01
170#define ASENCODE_INIT_CMD_REQUIRED 0x02
171#define ASENCODE_PARAM_LIST_LENGTH_ERROR 0x00
172#define ASENCODE_INVALID_COMMAND 0x00
173#define ASENCODE_LBA_OUT_OF_RANGE 0x00
174#define ASENCODE_INVALID_CDB_FIELD 0x00
175#define ASENCODE_LUN_NOT_SUPPORTED 0x00
176#define ASENCODE_INVALID_PARAM_FIELD 0x00
177#define ASENCODE_PARAM_NOT_SUPPORTED 0x01
178#define ASENCODE_PARAM_VALUE_INVALID 0x02
179#define ASENCODE_RESET_OCCURRED 0x00
180#define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x00
181#define ASENCODE_INQUIRY_DATA_CHANGED 0x03
182#define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x00
183#define ASENCODE_DIAGNOSTIC_FAILURE 0x80
184#define ASENCODE_INTERNAL_TARGET_FAILURE 0x00
185#define ASENCODE_INVALID_MESSAGE_ERROR 0x00
186#define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00
187#define ASENCODE_OVERLAPPED_COMMAND 0x00
188
189#define BYTE0(x) (unsigned char)(x)
190#define BYTE1(x) (unsigned char)((x) >> 8)
191#define BYTE2(x) (unsigned char)((x) >> 16)
192#define BYTE3(x) (unsigned char)((x) >> 24)
193
194#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
195/* compatibility */
196#ifndef SAM_STAT_CHECK_CONDITION
197# define SAM_STAT_CHECK_CONDITION (CHECK_CONDITION << 1)
198#endif
199#ifndef SAM_STAT_GOOD
200# define SAM_STAT_GOOD (GOOD << 1)
201#endif
202#ifndef SAM_STAT_TASK_SET_FULL
203# define SAM_STAT_TASK_SET_FULL (QUEUE_FULL << 1)
204#endif
205#ifndef SAM_STAT_BUSY
206# define SAM_STAT_BUSY (BUSY << 1)
207#endif
208#ifndef SAM_STAT_RESERVATION_CONFLICT
209# define SAM_STAT_RESERVATION_CONFLICT (RESERVATION_CONFLICT << 1)
210#endif
211#ifndef SAM_STAT_TASK_ABORTED
212# define SAM_STAT_TASK_ABORTED (TASK_ABORTED << 1)
213#endif
214
215#endif
216
217/* ATA pass thru commands */
218#ifndef ATA_12
219#define ATA_12 0xa1 /* 12-byte pass-thru */
220#endif
221
222#ifndef ATA_16
223#define ATA_16 0x85 /* 16-byte pass-thru */
224#endif
225
226/* MODE_SENSE data format */
227typedef struct {
228 struct {
229 u8 data_length;
230 u8 med_type;
231 u8 dev_par;
232 u8 bd_length;
233 } __attribute__((packed)) hd;
234 struct {
235 u8 dens_code;
236 u8 block_count[3];
237 u8 reserved;
238 u8 block_length[3];
239 } __attribute__((packed)) bd;
240 u8 mpc_buf[3];
241} __attribute__((packed)) aac_modep_data;
242
243/* MODE_SENSE_10 data format */
244typedef struct {
245 struct {
246 u8 data_length[2];
247 u8 med_type;
248 u8 dev_par;
249 u8 rsrvd[2];
250 u8 bd_length[2];
251 } __attribute__((packed)) hd;
252 struct {
253 u8 dens_code;
254 u8 block_count[3];
255 u8 reserved;
256 u8 block_length[3];
257 } __attribute__((packed)) bd;
258 u8 mpc_buf[3];
259} __attribute__((packed)) aac_modep10_data;
260
261/*------------------------------------------------------------------------------
262 * S T R U C T S / T Y P E D E F S
263 *----------------------------------------------------------------------------*/
264/* SCSI inquiry data */
265struct inquiry_data {
266 u8 inqd_pdt; /* Peripheral qualifier | Peripheral Device Type */
267 u8 inqd_dtq; /* RMB | Device Type Qualifier */
268 u8 inqd_ver; /* ISO version | ECMA version | ANSI-approved version */
269 u8 inqd_rdf; /* AENC | TrmIOP | Response data format */
270 u8 inqd_len; /* Additional length (n-4) */
271 u8 inqd_pad1[2];/* Reserved - must be zero */
272 u8 inqd_pad2; /* RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
273 u8 inqd_vid[8]; /* Vendor ID */
274 u8 inqd_pid[16];/* Product ID */
275 u8 inqd_prl[4]; /* Product Revision Level */
276};
277
278/* Excluding SUSE as it has issues when inbox driver does not have this support but outbox has it.
279 Because SUSE uses /dev/disk/by-id mapping entries in the OS grub config and VPD 0X83 creates conflicts */
280#if (!defined(CONFIG_SUSE_KERNEL))
281/* Added for VPD 0x83 */
282typedef struct {
283 u8 CodeSet : 4; // VPD_CODE_SET
284 u8 Reserved : 4;
285 u8 IdentifierType : 4; // VPD_IDENTIFIER_TYPE
286 u8 Reserved2 : 4;
287 u8 Reserved3;
288 u8 IdentifierLength;
289 u8 VendId[8];
290 u8 ProductId[16];
291 u8 SerialNumber[8]; // SN in ASCII
292
293} TVPD_ID_Descriptor_Type_1;
294
295typedef struct {
296 u8 CodeSet : 4; // VPD_CODE_SET
297 u8 Reserved : 4;
298 u8 IdentifierType : 4; // VPD_IDENTIFIER_TYPE
299 u8 Reserved2 : 4;
300 u8 Reserved3;
301 u8 IdentifierLength;
302 struct TEU64Id {
303 u32 Serial;
304 u8 Reserved; // The serial number supposed to be 40 bits, bit we only support 32, so make the last byte zero.
305 u8 VendId[3];
306 } EU64Id;
307
308} TVPD_ID_Descriptor_Type_2;
309
310typedef struct {
311 u8 CodeSet : 4; // VPD_CODE_SET
312 u8 Reserved : 4;
313 u8 IdentifierType : 4; // VPD_IDENTIFIER_TYPE
314 u8 Reserved2 : 4;
315 u8 Reserved3;
316 u8 IdentifierLength;
317 u8 Identifier[16];
318} TVPD_ID_Descriptor_Type_3;
319
320typedef struct {
321 u8 DeviceType : 5;
322 u8 DeviceTypeQualifier : 3;
323 u8 PageCode;
324 u8 Reserved;
325 u8 PageLength;
326 TVPD_ID_Descriptor_Type_1 IdDescriptorType1;
327 TVPD_ID_Descriptor_Type_2 IdDescriptorType2;
328 TVPD_ID_Descriptor_Type_3 IdDescriptorType3;
329} TVPD_Page83;
330
331#endif
332
333
334/*
335 * M O D U L E G L O B A L S
336 */
337
338#if (defined(INITFLAGS_APRE_SUPPORTED) && !defined(CONFIG_COMMUNITY_KERNEL))
339static int aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap);
340static int aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg);
341static int aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg);
342static int aac_build_sgraw2(struct scsi_cmnd* scsicmd, struct aac_raw_io2* rio2, int sg_max);
343static int aac_build_sghba(struct scsi_cmnd* scsicmd, struct aac_hba_cmd_req * hbacmd, int sg_max, u64 sg_address);
344static int aac_scsi_cmd(struct scsi_cmnd * scsicmd);
345static int aac_scsi_cmd_apre(struct scsi_cmnd * scsicmd);
346#else
347static long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap);
348static long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg);
349static long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg);
350static long aac_build_sgraw2(struct scsi_cmnd* scsicmd, struct aac_raw_io2* rio2, int sg_max);
351static long aac_build_sghba(struct scsi_cmnd* scsicmd, struct aac_hba_cmd_req * hbacmd, int sg_max, u64 sg_address);
352#endif
353static int aac_convert_sgraw2(struct aac_raw_io2* rio2, int pages, int nseg, int nseg_new);
354static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
355static int aac_send_hba_fib(struct scsi_cmnd* scsicmd);
356#ifdef AAC_DETAILED_STATUS_INFO
357static char *aac_get_status_string(u32 status);
358#endif
359
360/*
361 * Non dasd selection is handled entirely in aachba now
362 */
363
364static int nondasd = -1;
365static int aac_cache = 2; /* WCE=0 to avoid performance problems */
366static int dacmode = -1;
367#if ((LINUX_VERSION_CODE > KERNEL_VERSION(2,6,8)) || defined(PCI_HAS_ENABLE_MSI) || defined(PCI_HAS_DISABLE_MSI))
368int aac_msi;
369#else
370
371#endif
372#if (defined(__arm__) || defined(CONFIG_EXTERNAL))
373int aac_commit = 1;
374int startup_timeout = 540;
375int aif_timeout = 540;
376#else
377int aac_commit = -1;
378int startup_timeout = 180;
379int aif_timeout = 120;
380#endif
381
382int aac_sync_mode = 0; /* only sync. transfer - disabled */
383int aac_convert_sgl = 1; /* convert non-conformable s/g list - enabled */
384int aac_hba_mode = 1;
385int aac_fib_dump = 0; /* Do fib dump before IOP_RESET*/
386
387#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
388module_param(nondasd, int, S_IRUGO|S_IWUSR);
389#else
390MODULE_PARM(nondasd, "i");
391#endif
392MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices."
393 " 0=off, 1=on");
394#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
395module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR);
396MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n"
397 "\tbit 0 - Disable FUA in WRITE SCSI commands\n"
398 "\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n"
399 "\tbit 2 - Disable only if Battery is protecting Cache");
400#else
401MODULE_PARM(aac_cache, "i");
402MODULE_PARM_DESC(aac_cache, "Disable Queue Flush commands:\n"
403 "\tbit 0 - Disable FUA in WRITE SCSI commands\n"
404 "\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n"
405 "\tbit 2 - Disable only if Battery is protecting Cache");
406#endif
407#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
408module_param(dacmode, int, S_IRUGO|S_IWUSR);
409#else
410MODULE_PARM(dacmode, "i");
411#endif
412MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC."
413 " 0=off, 1=on");
414#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
415module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR);
416#else
417MODULE_PARM(aac_sync_mode, "i");
418#endif
419MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode"
420 " 0=off, 1=on");
421#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
422module_param(aac_convert_sgl, int, S_IRUGO|S_IWUSR);
423#else
424MODULE_PARM(aac_convert_sgl, "i");
425#endif
426MODULE_PARM_DESC(aac_convert_sgl, "Convert non-conformable s/g list"
427 " 0=off, 1=on");
428#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
429module_param(aac_hba_mode, int, S_IRUGO|S_IWUSR);
430#else
431MODULE_PARM(aac_hba_mode, "i");
432#endif
433MODULE_PARM_DESC(aac_hba_mode, "HBA (bypass) mode support"
434 " 0=off, 1=on");
435#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
436module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR);
437MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the"
438 " adapter for foreign arrays.\n"
439 "This is typically needed in systems that do not have a BIOS."
440 " 0=off, 1=on");
441#else
442MODULE_PARM(aac_commit, "i");
443MODULE_PARM_DESC(aac_commit, "Control whether a COMMIT_CONFIG is issued to the"
444 " adapter for foreign arrays.\n"
445 "This is typically needed in systems that do not have a BIOS."
446 " 0=off, 1=on");
447#endif
448#if ((LINUX_VERSION_CODE > KERNEL_VERSION(2,6,8)) || defined(PCI_HAS_ENABLE_MSI) || defined(PCI_HAS_DISABLE_MSI))
449#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
450module_param_named(msi, aac_msi, int, S_IRUGO|S_IWUSR);
451MODULE_PARM_DESC(msi, "IRQ handling."
452 " 0=PIC(default), 1=MSI, 2=MSI-X(unsupported, uses MSI)");
453#else
454MODULE_PARM(aac_msi, "i");
455MODULE_PARM_DESC(aac_msi, "IRQ handling."
456 " 0=PIC(default), 1=MSI, 2=MSI-X(unsupported, uses MSI)");
457#endif
458#endif
459#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
460module_param(startup_timeout, int, S_IRUGO|S_IWUSR);
461#else
462MODULE_PARM(startup_timeout, "i");
463#endif
464MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for"
465 " adapter to have it's kernel up and\n"
466 "running. This is typically adjusted for large systems that do not"
467 " have a BIOS.");
468#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
469module_param(aif_timeout, int, S_IRUGO|S_IWUSR);
470#else
471MODULE_PARM(aif_timeout, "i");
472#endif
473MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for"
474 " applications to pick up AIFs before\n"
475 "deregistering them. This is typically adjusted for heavily burdened"
476 " systems.");
477#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
478module_param(aac_fib_dump, int, S_IRUGO|S_IWUSR);
479#else
480MODULE_PARM(aac_fib_dump, "i");
481#endif
482MODULE_PARM_DESC(aac_fib_dump, "Dump controller fibs prior to IOP_RESET"
483 " 0=off, 1=on");
484
485#if (!defined(CONFIG_COMMUNITY_KERNEL))
486#if (defined(__arm__) || defined(CONFIG_EXTERNAL) || (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)) || defined(__VMKERNEL_MODULE__) || defined(__VMKLNX30__) || defined(__VMKLNX__))
487static int coalescethreshold = 0;
488#else
489static int coalescethreshold = 16; /* 8KB coalesce knee */
490#endif
491#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
492module_param(coalescethreshold, int, S_IRUGO|S_IWUSR);
493#else
494MODULE_PARM(coalescethreshold, "i");
495#endif
496MODULE_PARM_DESC(coalescethreshold, "Control the maximum block size of"
497 " sequential requests that are fed back to the scsi_merge layer for"
498 " coalescing. 0=off, 16 block (8KB) default.");
499
500#endif
501int numacb = -1;
502#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
503module_param(numacb, int, S_IRUGO|S_IWUSR);
504#else
505MODULE_PARM(numacb, "i");
506#endif
507MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control"
508 " blocks (FIB) allocated. Valid values are 512 and down. Default is"
509 " to use suggestion from Firmware.");
510
511#if (defined(__VMKERNEL_MODULE__) || defined(__VMKLNX__))
512int aac_remove_devnodes = 0;
513#else
514int aac_remove_devnodes = 1;
515#endif
516#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
517module_param(aac_remove_devnodes, int, S_IRUGO|S_IWUSR);
518#else
519MODULE_PARM(aac_remove_devnodes, "i");
520#endif
521MODULE_PARM_DESC(aac_remove_devnodes, "Remove device nodes(/dev/sd* and /dev/sg*) permanently when the device goes to offline state."
522 " 0=off, 1=on(Default).");
523
524int update_interval = 30 * 60;
525#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
526module_param(update_interval, int, S_IRUGO|S_IWUSR);
527#else
528MODULE_PARM(update_interval, "i");
529#endif
530MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync"
531 " updates issued to adapter.");
532
533int check_interval = 24 * 60 * 60;
534#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
535module_param(check_interval, int, S_IRUGO|S_IWUSR);
536#else
537MODULE_PARM(check_interval, "i");
538#endif
539MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health"
540 " checks.");
541
542int aac_check_reset = 1;
543#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
544module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR);
545MODULE_PARM_DESC(check_reset, "If adapter fails health check, reset the"
546 " adapter. a value of -1 forces the reset to adapters programmed to"
547 " ignore it.");
548#else
549MODULE_PARM(aac_check_reset, "i");
550MODULE_PARM_DESC(aac_check_reset, "If adapter fails health check, reset the"
551 " adapter. a value of -1 forces the reset to adapters programmed to"
552 " ignore it.");
553#endif
554#if (defined(AAC_EXTENDED_TIMEOUT))
555
556#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
557int extendedtimeout = -1;
558module_param(extendedtimeout, int, S_IRUGO|S_IWUSR);
559#else
560static int extendedtimeout = -1;
561MODULE_PARM(extendedtimeout, "i");
562#endif
563MODULE_PARM_DESC(extendedtimeout, "Request a specific timeout to override I/O"
564 " requests issed to the adapter.");
565#endif
566
567#if (defined(HAS_BOOT_CONFIG) || (defined(BOOTCD) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7))))
568int expose_physicals = 0;
569#else
570int expose_physicals = -1;
571#endif
572#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
573module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
574#else
575MODULE_PARM(expose_physicals, "i");
576#endif
577MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays."
578 " -1=protect 0=off, 1=on");
579
580#if (defined(HAS_BOOT_CONFIG) || (defined(BOOTCD) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7))))
581int expose_hidden_space = 0;
582#else
583int expose_hidden_space = -1;
584#endif
585#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
586module_param(expose_hidden_space, int, S_IRUGO|S_IWUSR);
587#else
588MODULE_PARM(expose_hidden_space, "i");
589#endif
590MODULE_PARM_DESC(expose_hidden_space, "Expose hidden space of the Array."
591 " -1=protect 0=off, 1=on");
592
593int aac_reset_devices;
594#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
595module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR);
596MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization.");
597#else
598MODULE_PARM(aac_reset_devices, "i");
599MODULE_PARM_DESC(aac_reset_devices, "Force an adapter reset at initialization.");
600#endif
601
602#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,16)) && (defined(CONFIG_SLES_KERNEL) || defined(CONFIG_SUSE_KERNEL)) && defined(CONFIG_SLE_SP))
603#if (CONFIG_SLE_SP == 1)
604int aac_wwn = 2;
605#else
606int aac_wwn = 1;
607#endif
608#elif (defined(__VMKERNEL_MODULE__) || defined(__VMKLNX30__) || defined(__VMKLNX__))
609int aac_wwn = 1;
610#else
611int aac_wwn = 1;
612#endif
613#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
614module_param_named(wwn, aac_wwn, int, S_IRUGO|S_IWUSR);
615#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,16)) && (defined(CONFIG_SLES_KERNEL) || defined(CONFIG_SUSE_KERNEL)) && defined(CONFIG_SLE_SP))
616#if (CONFIG_SLE_SP == 1)
617MODULE_PARM_DESC(wwn, "Select a WWN type for the arrays:\n"
618 "\t0 - Disable\n"
619 "\t1 - Array Meta Data Signature\n"
620 "\t2 - Adapter Serial Number (default)");
621#else
622MODULE_PARM_DESC(wwn, "Select a WWN type for the arrays:\n"
623 "\t0 - Disable\n"
624 "\t1 - Array Meta Data Signature (default)\n"
625 "\t2 - Adapter Serial Number");
626#endif
627#else
628MODULE_PARM_DESC(wwn, "Select a WWN type for the arrays:\n"
629 "\t0 - Disable\n"
630 "\t1 - Array Meta Data Signature (default)\n"
631 "\t2 - Adapter Serial Number");
632#endif
633#else
634MODULE_PARM(aac_wwn, "i");
635#if ((LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,21)) && (defined(__VMKERNEL_MODULE__) || defined(__VMKLNX30__) || defined(__VMKLNX__)))
636MODULE_PARM_DESC(aac_wwn, "Select a WWN type for the arrays:\n"
637 "\t0 - Disable (default)\n"
638 "\t1 - Array Meta Data Signature\n"
639 "\t2 - Adapter Serial Number");
640#else
641MODULE_PARM_DESC(aac_wwn, "Select a WWN type for the arrays:\n"
642 "\t0 - Disable\n"
643 "\t1 - Array Meta Data Signature (default)\n"
644 "\t2 - Adapter Serial Number");
645#endif
646#endif
647
648
649#if (!defined(CONFIG_COMMUNITY_KERNEL) && !defined(__VMKLNX30__) && !defined(__VMKLNX__) && ((LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)) || !defined(HAS_BOOT_CONFIG)))
650static char * aacraid;
651
652static int aacraid_setup(struct aac_dev *dev, char *str)
653{
654 int i;
655 char *key;
656 char *value;
657#if (defined(CONFIG_SLES_KERNEL) || defined(CONFIG_SUSE_KERNEL))
658 static int dud = 0;
659#endif
660 struct {
661 char * option_name;
662 int * option_flag;
663 int option_value;
664 } options[] = {
665#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
666 { "nondasd", &nondasd, 1 },
667 { "cache", &aac_cache, 2 },
668 { "dacmode", &dacmode, 1 },
669 { "sync_mode", &aac_sync_mode, 0 },
670 { "convert_sgl", &aac_convert_sgl, 1 },
671 { "hba_mode", &aac_hba_mode, 1 },
672 { "commit", &aac_commit, 1 },
673#if ((LINUX_VERSION_CODE > KERNEL_VERSION(2,6,8)) || defined(PCI_HAS_ENABLE_MSI) || defined(PCI_HAS_DISABLE_MSI))
674 { "msi", &aac_msi, 0 ),
675#endif
676#if (defined(__arm__) || defined(CONFIG_EXTERNAL) || (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)) || defined(__VMKERNEL_MODULE__) || defined(__VMKLNX30__) || defined(__VMKLNX__))
677 { "coalescethreshold", &coalescethreshold, 0 },
678#else
679 { "coalescethreshold", &coalescethreshold, 16 },
680#endif
681 { "update_interval", &update_interval, 30 * 60 },
682 { "check_interval", &check_interval, -1 },
683 { "check_reset", &aac_check_reset, 1 },
684#if (defined(AAC_EXTENDED_TIMEOUT))
685 { "extendedtimeout", &extendedtimeout, AAC_EXTENDED_TIMEOUT },
686#endif
687 { "expose_physicals", &expose_physicals, -1 },
688 { "expose_hidden_space", &expose_hidden_space, -1},
689#if (defined(__VMKERNEL_MODULE__) || defined(__VMKLNX__))
690 { "aac_remove_devnodes", &aac_remove_devnodes, 0},
691#else
692 { "aac_remove_devnodes", &aac_remove_devnodes, 1},
693#endif
694 { "reset_devices", &aac_reset_devices, 1 },
695#endif
696 { "dd", &expose_physicals, 0 },
697#if (defined(CONFIG_SLES_KERNEL) || defined(CONFIG_SUSE_KERNEL))
698 { "dud", &dud, 0 },
699#endif
700 };
701
702 adbg_setup(dev,KERN_INFO,"aacraid_setup(\"%s\")\n", (str) ? str : "<null>");
703 if (str) while ((key = strsep(&str, ",; \t\r\n"))) {
704 if (!*key)
705 continue;
706 if ((strnicmp (key, "aacraid", 7) == 0)
707 && ((key[7] == '.') || (key[7] == '=')))
708 key += 8;
709 if (((value = strchr(key, ':')))
710 || ((value = strchr(key, '='))))
711 *value++ = '\0';
712 for (i = 0; i < (sizeof (options) / sizeof (options[0])); i++) {
713 if (strnicmp (key, options[i].option_name,
714 strlen(options[i].option_name)) == 0) {
715 *options[i].option_flag
716 = (value)
717 ? simple_strtoul(value, NULL, 0)
718 : options[i].option_value;
719 break;
720 }
721 }
722 }
723#if (defined(CONFIG_SLES_KERNEL) || defined(CONFIG_SUSE_KERNEL))
724 /* SuSE special */
725 if (dud)
726 expose_physicals = 0;
727#endif
728
729 return (1);
730}
731
732#endif
733static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
734 struct fib *fibptr) {
735 struct scsi_device *device;
736
737 if (unlikely(!scsicmd || !scsicmd->scsi_done)) {
738 dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n"));
739#if (defined(AAC_DEBUG_INSTRUMENT_CONTEXT) || (0 && defined(BOOTCD)))
740 if (!nblank(dprintk(x)))
741 printk(KERN_WARNING
742 "aac_valid_context: scsi command corrupt %p->scsi_done=%p\n",
743 scsicmd, (scsicmd &&
744 (scsicmd != (struct scsi_cmnd*)(uintptr_t)0x6b6b6b6b6b6b6b6bLL))
745 ? scsicmd->scsi_done
746 : (void (*)(struct scsi_cmnd*))(uintptr_t)-1LL);
747 if (nblank(fwprintf(x))) {
748 extern struct list_head aac_devices; /* in linit.c */
749 struct aac_dev *aac;
750 list_for_each_entry(aac, &aac_devices, entry) {
751 fwprintf((aac, HBA_FLAGS_DBG_FW_PRINT_B,
752 "scsi command corrupt %p->scsi_done=%p",
753 scsicmd, (scsicmd &&
754 (scsicmd != (struct scsi_cmnd*)(uintptr_t)0x6b6b6b6b6b6b6b6bLL))
755 ? scsicmd->scsi_done
756 : (void (*)(struct scsi_cmnd*))(uintptr_t)-1LL));
757 }
758 }
759#endif
760 aac_fib_complete(fibptr);
761#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
762 aac_fib_free_tag(fibptr);
763#else
764 aac_fib_free(fibptr);
765#endif
766 return 0;
767 }
768#if (defined(AAC_DEBUG_INSTRUMENT_CONTEXT) || (0 && defined(BOOTCD)))
769#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12))
770# define invalid_command_state(x) (((x)->state == SCSI_STATE_FINISHED) || !(x)->state)
771#else
772# define invalid_command_state(x) 0
773#endif
774#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
775# define invalid_command_magic(x) list_empty(&(x)->list)
776#else
777# define invalid_command_magic(x) (((x)->sc_magic != SCSI_CMND_MAGIC) && (x)->sc_magic)
778#endif
779 if (unlikely((scsicmd == (struct scsi_cmnd*)(uintptr_t)0x6b6b6b6b6b6b6b6bLL) ||
780 invalid_command_state(scsicmd) ||
781 (scsicmd->scsi_done == (void (*)(struct scsi_cmnd*))(uintptr_t)0x6b6b6b6b6b6b6b6bLL))) {
782 printk(KERN_WARNING
783 "aac_valid_context: scsi command corrupt %p->scsi_done=%p%s%s\n",
784 scsicmd, (scsicmd &&
785 (scsicmd != (struct scsi_cmnd*)(uintptr_t)0x6b6b6b6b6b6b6b6bLL))
786 ? scsicmd->scsi_done
787 : (void (*)(struct scsi_cmnd*))(uintptr_t)-1LL,
788 (scsicmd &&
789 (scsicmd != (struct scsi_cmnd*)(uintptr_t)0x6b6b6b6b6b6b6b6bLL) &&
790 invalid_command_state(scsicmd)) ? " state" : "",
791 (scsicmd &&
792 (scsicmd != (struct scsi_cmnd*)(uintptr_t)0x6b6b6b6b6b6b6b6bLL) &&
793 invalid_command_magic(scsicmd))
794#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
795 ? " list" : ""
796#else
797 ? " magic" : ""
798#endif
799 );
800 if (nblank(fwprintf(x))) {
801 extern struct list_head aac_devices; /* in linit.c */
802 struct aac_dev *aac;
803 list_for_each_entry(aac, &aac_devices, entry) {
804 fwprintf((aac, HBA_FLAGS_DBG_FW_PRINT_B,
805 "scsi command corrupt %p->scsi_done=%p%s%s",
806 scsicmd, (scsicmd &&
807 (scsicmd != (struct scsi_cmnd*)(uintptr_t)0x6b6b6b6b6b6b6b6bLL))
808 ? scsicmd->scsi_done
809 : (void (*)(struct scsi_cmnd*))(uintptr_t)-1LL,
810 (scsicmd &&
811 (scsicmd != (struct scsi_cmnd*)(uintptr_t)0x6b6b6b6b6b6b6b6bLL) &&
812 invalid_command_state(scsicmd))
813 ? " state" : "",
814 (scsicmd &&
815 (scsicmd != (struct scsi_cmnd*)(uintptr_t)0x6b6b6b6b6b6b6b6bLL) &&
816 invalid_command_magic(scsicmd))
817 ? " magic" : ""));
818 }
819 }
820 aac_fib_complete(fibptr);
821#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
822 aac_fib_free_tag(fibptr);
823#else
824 aac_fib_free(fibptr);
825#endif
826 return 0;
827 }
828#undef invalid_command_state
829#endif
830 scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
831 device = scsicmd->device;
832#if (defined(AAC_DEBUG_INSTRUMENT_CONTEXT) || (0 && defined(BOOTCD)))
833 if (unlikely(device == (void *)(uintptr_t)0x6b6b6b6b6b6b6b6bLL)) {
834 printk(KERN_WARNING
835 "aac_valid_context: scsi device corrupt device=DEALLOCATED\n");
836 if (nblank(fwprintf(x))) {
837 extern struct list_head aac_devices; /* in linit.c */
838 struct aac_dev *aac;
839 list_for_each_entry(aac, &aac_devices, entry) {
840 fwprintf((aac, HBA_FLAGS_DBG_FW_PRINT_B,
841 "scsi device corrupt device=DEALLOCATED"));
842 }
843 }
844 aac_fib_complete(fibptr);
845#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
846 aac_fib_free_tag(fibptr);
847#else
848 aac_fib_free(fibptr);
849#endif
850 return 0;
851 }
852#endif
853 if (unlikely(!device)) {
854 dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n"));
855#if (defined(AAC_DEBUG_INSTRUMENT_CONTEXT) || (0 && defined(BOOTCD)))
856 if (!nblank(dprintk(x)))
857 printk(KERN_WARNING
858 "aac_valid_context: scsi device corrupt device=%p online=%d\n",
859 device, (!device) ? -1 : scsi_device_online(device));
860 if (nblank(fwprintf(x))) {
861 extern struct list_head aac_devices; /* in linit.c */
862 struct aac_dev *aac;
863 list_for_each_entry(aac, &aac_devices, entry) {
864 fwprintf((aac, HBA_FLAGS_DBG_FW_PRINT_B,
865 "scsi device corrupt device=%p online=%d",
866 device, (!device)
867 ? -1 : scsi_device_online(device)));
868 }
869 }
870#endif
871 aac_fib_complete(fibptr);
872#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
873 aac_fib_free_tag(fibptr);
874#else
875 aac_fib_free(fibptr);
876#endif
877 return 0;
878 }
879 return 1;
880}
881
882/**
883 * aac_get_config_status - check the adapter configuration
884 * @common: adapter to query
885 *
886 * Query config status, and commit the configuration if needed.
887 */
888int aac_get_config_status(struct aac_dev *dev, int commit_flag)
889{
890 int status = 0;
891 struct fib * fibptr;
892
893 if (!(fibptr = aac_fib_alloc(dev)))
894 return -ENOMEM;
895
896 aac_fib_init(fibptr);
897 {
898 struct aac_get_config_status *dinfo;
899 dinfo = (struct aac_get_config_status *) fib_data(fibptr);
900
901 dinfo->command = cpu_to_le32(VM_ContainerConfig);
902 dinfo->type = cpu_to_le32(CT_GET_CONFIG_STATUS);
903 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data));
904 }
905
906 status = aac_fib_send(ContainerCommand,
907 fibptr,
908 sizeof (struct aac_get_config_status_resp),
909 FsaNormal,
910 1, 1,
911 NULL, NULL);
912 if (status < 0) {
913 aac_err(dev, "Driver Init: CT_GET_CONFIG_STATUS( ) failed-%d\n", status);
914#if (0 && defined(BOOTCD))
915 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
916 "aac_get_config_status: SendFIB failed."));
917#endif
918 } else {
919 struct aac_get_config_status_resp *reply
920 = (struct aac_get_config_status_resp *) fib_data(fibptr);
921 dprintk((KERN_WARNING
922 "aac_get_config_status: response=%d status=%d action=%d\n",
923 le32_to_cpu(reply->response),
924 le32_to_cpu(reply->status),
925 le32_to_cpu(reply->data.action)));
926#if (0 && defined(BOOTCD))
927 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
928 "aac_get_config_status: response=%d status=%d action=%d",
929 le32_to_cpu(reply->response),
930 le32_to_cpu(reply->status),
931 le32_to_cpu(reply->data.action)));
932#endif
933 if ((le32_to_cpu(reply->response) != ST_OK) ||
934 (le32_to_cpu(reply->status) != CT_OK) ||
935 (le32_to_cpu(reply->data.action) > CFACT_PAUSE)) {
936 aac_err(dev, "Commit Configuration Not accepted\n");
937#if (0 && defined(BOOTCD))
938 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
939 "aac_get_config_status: Will not issue the Commit Configuration"));
940#endif
941 status = -EINVAL;
942 }
943 }
944 /* Do not set XferState to zero unless receives a response from F/W */
945 if (status >= 0)
946 aac_fib_complete(fibptr);
947
948 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */
949 if (status >= 0) {
950 if ((aac_commit == 1) || commit_flag) {
951 struct aac_commit_config * dinfo;
952 aac_fib_init(fibptr);
953 dinfo = (struct aac_commit_config *) fib_data(fibptr);
954
955 dinfo->command = cpu_to_le32(VM_ContainerConfig);
956 dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG);
957
958 status = aac_fib_send(ContainerCommand,
959 fibptr,
960 sizeof (struct aac_commit_config),
961 FsaNormal,
962 1, 1,
963 NULL, NULL);
964 /* Do not set XferState to zero unless
965 * receives a response from F/W */
966 if (status >= 0)
967 aac_fib_complete(fibptr);
968 else
969 aac_err(dev,"Driver Init: CT_COMMIT_CONFIG( ) failed-%d\n", status);
970
971 } else if (aac_commit == 0) {
972 aac_warn(dev,
973 "Foreign device configurations are being ignored\n");
974#if (0 && defined(BOOTCD))
975 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
976 "aac_get_config_status: Foreign device configurations are being ignored"));
977#endif
978 }
979 }
980 /* FIB should be freed only after getting the response from the F/W */
981 if (status != -ERESTARTSYS)
982 aac_fib_free(fibptr);
983 return status;
984}
985
986static void aac_expose_phy_device(struct scsi_cmnd *scsicmd)
987{
988#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))
989 void *buf;
990#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
991 struct scatterlist *sg = scsi_sglist(scsicmd);
992
993#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
994 if (scsicmd->use_sg) {
995#if (!defined(__VMKLNX__) && !defined(__VMKLNX30__))
996 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
997#else
998#if defined(__ESX5__)
999 buf = phys_to_virt(sg_dma_address(sg));
1000#else
1001 buf = phys_to_virt(sg->dma_address);
1002#endif
1003#endif
1004 } else {
1005 buf = scsicmd->request_buffer;
1006 }
1007#else
1008#if (defined(HAS_SG_PAGE))
1009 buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
1010#else
1011 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1012#endif
1013
1014#endif
1015 if(((*(char *)buf) & 0x20) && ((*(char *)buf) & 0x1f) == TYPE_DISK)
1016 (*(char *)buf) &= 0xdf;
1017
1018#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
1019#if (!defined(__VMKLNX__)&& !defined(__VMKLNX30__))
1020 if (scsicmd->use_sg) {
1021#if (((LINUX_VERSION_CODE > KERNEL_VERSION(2,6,16)) && defined(ARCH_HAS_FLUSH_ANON_PAGE)) || defined(CONFIG_PARISC) || defined(CONFIG_COMMUNITY_KERNEL))
1022 flush_kernel_dcache_page(kmap_atomic_to_page(buf - sg->offset));
1023#endif
1024 kunmap_atomic(buf - sg->offset, KM_IRQ0);
1025 }
1026#endif
1027#else
1028#if (defined(ARCH_HAS_FLUSH_ANON_PAGE) || defined(CONFIG_COMMUNITY_KERNEL))
1029 flush_kernel_dcache_page(kmap_atomic_to_page(buf - sg->offset));
1030#endif
1031 kunmap_atomic(buf - sg->offset, KM_IRQ0);
1032#endif
1033
1034#elif ((defined(__VMKERNEL_MODULE__) || defined(__VMKLNX__)) && !defined(__x86_64__))
1035#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4))
1036 void *pptr;
1037 vmk_verify_memory_for_io(scsicmd->request_bufferMA, 1);
1038 buf = phys_to_kmap(scsicmd->request_bufferMA, 1, &pptr);
1039 if(((*(char *)buf) & 0x20) && ((*(char *)buf) & 0x1f) == TYPE_DISK)
1040 (*(char *)buf) &= 0xdf;
1041 phys_to_kmapFree(buf, pptr);
1042#else
1043 vmk_verify_memory_for_io(scsicmd->request_bufferMA, 1);
1044 buf = vmk_phys_to_kmap(scsicmd->request_bufferMA, 1);
1045 if (((*(char *)buf) & 0x20) && ((*(char *)buf) & 0x1f) == TYPE_DISK)
1046 (*(char *)buf) &= 0xdf;
1047 vmk_phys_to_kmap_free(buf);
1048#endif
1049#else
1050 buf = scsicmd->request_buffer;
1051 if(((*(char *)buf) & 0x20) && ((*(char *)buf) & 0x1f) == TYPE_DISK)
1052 (*(char *)buf) &= 0xdf;
1053#endif
1054#else
1055 char inq_data;
1056 scsi_sg_copy_to_buffer(scsicmd, &inq_data, sizeof(inq_data));
1057 if ((inq_data & 0x20) && (inq_data & 0x1f) == TYPE_DISK) {
1058 inq_data &= 0xdf;
1059 scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
1060 }
1061#endif
1062}
1063
1064/**
1065 * aac_get_containers - list containers
1066 * @common: adapter to probe
1067 *
1068 * Make a list of all containers on this controller
1069 */
1070int aac_get_containers(struct aac_dev *dev)
1071{
1072 struct fsa_dev_info *fsa_dev_ptr;
1073 u32 index;
1074 int status = 0;
1075 struct fib * fibptr;
1076 struct aac_get_container_count *dinfo;
1077 struct aac_get_container_count_resp *dresp;
1078 int maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
1079
1080 if (!(fibptr = aac_fib_alloc(dev)))
1081 return -ENOMEM;
1082
1083 aac_fib_init(fibptr);
1084 dinfo = (struct aac_get_container_count *) fib_data(fibptr);
1085 dinfo->command = cpu_to_le32(VM_ContainerConfig);
1086 dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT);
1087
1088 status = aac_fib_send(ContainerCommand,
1089 fibptr,
1090 sizeof (struct aac_get_container_count),
1091 FsaNormal,
1092 1, 1,
1093 NULL, NULL);
1094
1095 if (status >= 0) {
1096 dresp = (struct aac_get_container_count_resp *)fib_data(fibptr);
1097 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
1098 if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
1099 AAC_OPTION_SUPPORTED_240_VOLUMES)
1100 maximum_num_containers = le32_to_cpu(dresp->MaxSimpleVolumes);
1101 aac_fib_complete(fibptr);
1102 }
1103
1104 if (status != -ERESTARTSYS) {
1105 aac_fib_free(fibptr);
1106 }
1107
1108 if (status < 0) {
1109 aac_err(dev, "Driver Init: VM_ContainerConfig ( ) failed - %d\n", status);
1110 return status;
1111 }
1112
1113 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
1114 maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
1115 if ((dev->fsa_dev == NULL) || (dev->maximum_num_containers != maximum_num_containers))
1116 {
1117 fsa_dev_ptr = dev->fsa_dev;
1118
1119#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)) && !defined(HAS_KZALLOC))
1120 dev->fsa_dev = kmalloc(sizeof(*fsa_dev_ptr) * maximum_num_containers,
1121 GFP_KERNEL);
1122#else
1123 dev->fsa_dev = kzalloc(sizeof(*fsa_dev_ptr) * maximum_num_containers,
1124 GFP_KERNEL);
1125#endif
1126 if (fsa_dev_ptr) {
1127 kfree(fsa_dev_ptr);
1128 fsa_dev_ptr = NULL;
1129 }
1130
1131 if (!dev->fsa_dev){
1132 aac_err(dev, "fsa_dev memory allocation failed\n");
1133 return -ENOMEM;
1134 }
1135#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)) && !defined(HAS_KZALLOC))
1136 memset(dev->fsa_dev, 0, sizeof(struct fsa_dev) * maximum_num_containers);
1137#endif
1138 dev->maximum_num_containers = maximum_num_containers;
1139 }
1140
1141 for (index = 0; index < dev->maximum_num_containers; index++) {
1142 dev->fsa_dev[index].devname[0] = '\0';
1143 dev->fsa_dev[index].valid = 0;
1144 status = aac_probe_container(dev, index);
1145
1146 if (status < 0) {
1147 aac_err(dev,"Driver Init: aac_get_containers: SendFIB failed - %d\n", status);
1148#if (0 && defined(BOOTCD))
1149 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
1150 "aac_get_containers: SendFIB failed."));
1151#endif
1152 break;
1153 }
1154 }
1155 return status;
1156}
1157#if (defined(AAC_DEBUG_INSTRUMENT_TIMING) || defined(AAC_DEBUG_INSTRUMENT_CONTEXT))
1158
1159static void aac_io_done(struct scsi_cmnd * scsicmd)
1160{
1161#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)) /* suppress unused variable warning */
1162 unsigned long cpu_flags;
1163#endif
1164
1165 if (unlikely((scsicmd == NULL) ||
1166 (scsicmd == (void *)(uintptr_t)0x6b6b6b6b6b6b6b6bLL) ||
1167#if (defined(AAC_DEBUG_INSTRUMENT_CONTEXT) || (0 && defined(BOOTCD)))
1168 invalid_command_magic(scsicmd) ||
1169#endif
1170 (scsicmd->scsi_done == (void (*)(struct scsi_cmnd*))NULL) ||
1171 (scsicmd->scsi_done == (void (*)(struct scsi_cmnd*))(uintptr_t)0x6b6b6b6b6b6b6b6bLL))) {
1172 printk(KERN_WARNING "aac_io_done: scsicmd corrupted\n");
1173 return;
1174 }
1175#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0))
1176 spin_lock_irqsave(&io_request_lock, cpu_flags);
1177#endif
1178 scsicmd->scsi_done(scsicmd);
1179#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0))
1180 spin_unlock_irqrestore(&io_request_lock, cpu_flags);
1181#endif
1182#if (defined(AAC_DEBUG_INSTRUMENT_TIMING))
1183 {
1184 u64 lba;
1185 u32 count = 0;
1186 struct timeval now;
1187 struct aac_dev * dev;
1188 do_gettimeofday(&now);
1189 if ((scsicmd->cmnd[0] == WRITE_6) || /* 6 byte command */
1190 (scsicmd->cmnd[0] == READ_6)) {
1191 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
1192 (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
1193 count = scsicmd->cmnd[4];
1194 if (count == 0)
1195 count = 256;
1196#if ((LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) || defined(WRITE_16))
1197 } else if ((scsicmd->cmnd[0] == WRITE_16) || /* 16 byte command */
1198 (scsicmd->cmnd[0] == READ_16)) {
1199 lba = ((u64)scsicmd->cmnd[2] << 56)
1200 | ((u64)scsicmd->cmnd[3] << 48)
1201 | ((u64)scsicmd->cmnd[4] << 40)
1202 | ((u64)scsicmd->cmnd[5] << 32)
1203 | ((u64)scsicmd->cmnd[6] << 24)
1204 | (scsicmd->cmnd[7] << 16)
1205 | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1206 count = (scsicmd->cmnd[10] << 24)
1207 | (scsicmd->cmnd[11] << 16)
1208 | (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
1209#endif
1210 } else if ((scsicmd->cmnd[0] == WRITE_12) /* 12 byte command */
1211 || (scsicmd->cmnd[0] == READ_12)) {
1212 lba = ((u64)scsicmd->cmnd[2] << 24)
1213 | (scsicmd->cmnd[3] << 16)
1214 | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
1215 count = (scsicmd->cmnd[6] << 24)
1216 | (scsicmd->cmnd[7] << 16)
1217 | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1218 } else if ((scsicmd->cmnd[0] == WRITE_10) /* 10 byte command */
1219 || (scsicmd->cmnd[0] == READ_10)) {
1220 lba = ((u64)scsicmd->cmnd[2] << 24)
1221 | (scsicmd->cmnd[3] << 16)
1222 | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
1223 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
1224 } else
1225 lba = (u64)(long)scsicmd;
1226 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1227 printk(((count)
1228 ? KERN_DEBUG "%lu.%06lu d%lu %llu[%u]\n"
1229 : KERN_DEBUG "%lu.%06lu d%lu 0x%llx\n"),
1230 now.tv_sec % 100, now.tv_usec,
1231 (unsigned long)(atomic_read(&dev->queues->queue[
1232#if (defined(INITFLAGS_APRE_SUPPORTED))
1233 (dev->comm_interface == AAC_COMM_APRE)
1234 ? ApreCmdQueue
1235 : AdapNormCmdQueue
1236#else
1237 AdapNormCmdQueue
1238#endif
1239 ].numpending)), (unsigned long long)lba, count);
1240#if (defined(INITFLAGS_APRE_SUPPORTED))
1241 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
1242 ((count)
1243 ? "%lu.%06lu d%lu %llu[%u]"
1244 : "%lu.%06lu d%lu 0x%llx"),
1245 now.tv_sec % 100, now.tv_usec,
1246 (unsigned long)(dev->queues->queue[
1247 (dev->comm_interface == AAC_COMM_APRE)
1248 ? ApreCmdQueue
1249 : AdapNormCmdQueue
1250 ].numpending), (unsigned long long)lba, count));
1251#else
1252 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
1253 ((count)
1254 ? "%lu.%06lu d%lu %llu[%u]"
1255 : "%lu.%06lu d%lu 0x%llx"),
1256 now.tv_sec % 100, now.tv_usec,
1257 (unsigned long)(atomic_read(&dev->queues->queue[AdapNormCmdQueue].numpending)), lba, count));
1258
1259#endif
1260 }
1261#endif
1262}
1263
1264static inline void __aac_io_done(struct scsi_cmnd * scsicmd)
1265{
1266#if (defined(AAC_DEBUG_INSTRUMENT_TIMING))
1267 struct timeval now;
1268 struct aac_dev * dev;
1269#endif
1270#if (defined(AAC_DEBUG_INSTRUMENT_CONTEXT))
1271 BUG_ON((scsicmd == NULL) || (scsicmd == (void *)(uintptr_t)0x6b6b6b6b6b6b6b6bLL));
1272 BUG_ON((scsicmd->scsi_done == NULL) || (scsicmd->scsi_done == (void (*)(struct scsi_cmnd*))(uintptr_t)0x6b6b6b6b6b6b6b6bLL));
1273#endif
1274 scsicmd->scsi_done(scsicmd);
1275#if (defined(AAC_DEBUG_INSTRUMENT_TIMING))
1276 do_gettimeofday(&now);
1277 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1278 printk(KERN_DEBUG "%lu.%06lu d%lu %p\n",
1279 now.tv_sec % 100, now.tv_usec,
1280 (unsigned long)(atomic_read(&dev->queues->queue[
1281#if (defined(INITFLAGS_APRE_SUPPORTED))
1282 (dev->comm_interface == AAC_COMM_APRE)
1283 ? ApreCmdQueue
1284 : AdapNormCmdQueue
1285#else
1286 AdapNormCmdQueue
1287#endif
1288 ].numpending)), scsicmd);
1289#if (defined(INITFLAGS_APRE_SUPPORTED))
1290 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
1291 "%lu.%06lu d%lu %p",
1292 now.tv_sec % 100, now.tv_usec,
1293 (unsigned long)(dev->queues->queue[
1294 (dev->comm_interface == AAC_COMM_APRE)
1295 ? ApreCmdQueue
1296 : AdapNormCmdQueue
1297 ].numpending), scsicmd));
1298#else
1299 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
1300 "%lu.%06lu d%lu %p",
1301 now.tv_sec % 100, now.tv_usec,
1302 (unsigned long)atomic_read(&dev->queues->queue[AdapNormCmdQueue].numpending), scsicmd));
1303#endif
1304#endif
1305}
1306#endif
1307
1308#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))
1309static void aac_internal_transfer(struct scsi_cmnd *scsicmd, void *data, unsigned int offset, unsigned int len)
1310{
1311 void *buf;
1312 int transfer_len;
1313#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
1314 struct scatterlist *sg = scsi_sglist(scsicmd);
1315
1316#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
1317 if (scsicmd->use_sg) {
1318#if (!defined(__VMKLNX30__) && !defined(__VMKLNX__))
1319 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1320 transfer_len = min(sg->length, len + offset);
1321#else
1322#if defined(__ESX5__)
1323 buf = phys_to_virt(sg_dma_address(sg));
1324 transfer_len = min(sg_dma_len(sg), len + offset);
1325#else
1326 buf = phys_to_virt(sg->dma_address);
1327 transfer_len = min(sg->dma_length, len + offset);
1328#endif
1329#endif
1330 } else {
1331 buf = scsicmd->request_buffer;
1332 transfer_len = min(scsicmd->request_bufflen, len + offset);
1333 }
1334#else
1335#if (defined(HAS_SG_PAGE))
1336 buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
1337#else
1338 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1339#endif
1340 transfer_len = min(sg->length, len + offset);
1341
1342#endif
1343 transfer_len -= offset;
1344 if (buf && transfer_len > 0)
1345 memcpy(buf + offset, data, transfer_len);
1346
1347#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
1348#if (!defined(__VMKLNX30__) && !defined(__VMKLNX__))
1349 if (scsicmd->use_sg) {
1350#if (((LINUX_VERSION_CODE > KERNEL_VERSION(2,6,16)) && defined(ARCH_HAS_FLUSH_ANON_PAGE)) || defined(CONFIG_PARISC) || defined(CONFIG_COMMUNITY_KERNEL))
1351 flush_kernel_dcache_page(kmap_atomic_to_page(buf - sg->offset));
1352#endif
1353 kunmap_atomic(buf - sg->offset, KM_IRQ0);
1354 }
1355#endif
1356#else
1357#if (defined(ARCH_HAS_FLUSH_ANON_PAGE) || defined(CONFIG_COMMUNITY_KERNEL))
1358 flush_kernel_dcache_page(kmap_atomic_to_page(buf - sg->offset));
1359#endif
1360 kunmap_atomic(buf - sg->offset, KM_IRQ0);
1361#endif
1362
1363#elif ((defined(__VMKERNEL_MODULE__) || defined(__VMKLNX30__) || defined(__VMKLNX__)) && !defined(__x86_64__))
1364#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4))
1365 void *pptr;
1366 vmk_verify_memory_for_io(scsicmd->request_bufferMA, len + offset);
1367 buf = phys_to_kmap(scsicmd->request_bufferMA, len + offset, &pptr);
1368 transfer_len = min(scsicmd->request_bufflen, len + offset) - offset;
1369 if (buf && transfer_len > 0)
1370 memcpy(buf + offset, data, transfer_len);
1371 phys_to_kmapFree(buf, pptr);
1372#else
1373 vmk_verify_memory_for_io(scsicmd->request_bufferMA, len + offset);
1374 buf = vmk_phys_to_kmap(scsicmd->request_bufferMA, len + offset);
1375 transfer_len = min(scsicmd->request_bufflen, len + offset) - offset;
1376 if (buf && transfer_len > 0)
1377 memcpy(buf + offset, data, transfer_len);
1378 vmk_phys_to_kmap_free(buf);
1379#endif
1380#else
1381 buf = scsicmd->request_buffer;
1382 transfer_len = min(scsicmd->request_bufflen, len + offset) - offset;
1383 if (buf && transfer_len > 0)
1384 memcpy(buf + offset, data, transfer_len);
1385#endif
1386}
1387
1388#endif
1389static void get_container_name_callback(void *context, struct fib * fibptr)
1390{
1391 struct aac_get_name_resp * get_name_reply;
1392 struct scsi_cmnd * scsicmd;
1393
1394 scsicmd = (struct scsi_cmnd *) context;
1395
1396 if (!aac_valid_context(scsicmd, fibptr))
1397 return;
1398
1399 dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies));
1400#if ((0 && defined(BOOTCD)) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1401 fwprintf((fibptr->dev, HBA_FLAGS_DBG_FW_PRINT_B,
1402 "get_container_name_callback"));
1403#endif
1404 BUG_ON(fibptr == NULL);
1405
1406 get_name_reply = (struct aac_get_name_resp *) fib_data(fibptr);
1407 /* Failure is irrelevant, using default value instead */
1408#if ((0 && defined(BOOTCD)) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1409 fwprintf((fibptr->dev, HBA_FLAGS_DBG_FW_PRINT_B,
1410 " status=%d", le32_to_cpu(get_name_reply->status)));
1411#endif
1412 if ((le32_to_cpu(get_name_reply->status) == CT_OK)
1413 && (get_name_reply->data[0] != '\0')) {
1414 char *sp = get_name_reply->data;
1415 sp[sizeof(((struct aac_get_name_resp *)NULL)->data)] = '\0';
1416#if ((0 && defined(BOOTCD)) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1417 fwprintf((fibptr->dev, HBA_FLAGS_DBG_FW_PRINT_B,
1418 " name=\"%s\"", sp));
1419#endif
1420 while (*sp == ' ')
1421 ++sp;
1422 if (*sp) {
1423#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26))
1424 struct inquiry_data inq;
1425#endif
1426 char d[sizeof(((struct inquiry_data *)NULL)->inqd_pid)];
1427 int count = sizeof(d);
1428 char *dp = d;
1429 do {
1430 *dp++ = (*sp) ? *sp++ : ' ';
1431 } while (--count > 0);
1432#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))
1433 aac_internal_transfer(scsicmd, d,
1434 offsetof(struct inquiry_data, inqd_pid), sizeof(d));
1435#else
1436
1437 scsi_sg_copy_to_buffer(scsicmd, &inq, sizeof(inq));
1438 memcpy(inq.inqd_pid, d, sizeof(d));
1439 scsi_sg_copy_from_buffer(scsicmd, &inq, sizeof(inq));
1440#endif
1441 }
1442 }
1443
1444 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1445
1446 aac_fib_complete(fibptr);
1447
1448#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
1449 aac_fib_free_tag(fibptr);
1450#else
1451 aac_fib_free(fibptr);
1452#endif
1453
1454#if (defined(AAC_DEBUG_INSTRUMENT_TIMING) || defined(AAC_DEBUG_INSTRUMENT_CONTEXT))
1455 aac_io_done(scsicmd);
1456#else
1457 scsicmd->scsi_done(scsicmd);
1458#endif
1459}
1460
1461/**
1462 * aac_get_container_name - get container name, none blocking.
1463 */
1464static int aac_get_container_name(struct scsi_cmnd * scsicmd)
1465{
1466 int status;
1467 struct aac_get_name *dinfo;
1468 struct fib * cmd_fibcontext;
1469 struct aac_dev * dev;
1470
1471 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
1472
1473#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
1474 if (!(cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd)))
1475#else
1476 if (!(cmd_fibcontext = aac_fib_alloc(dev)))
1477#endif
1478 return -ENOMEM;
1479
1480 aac_fib_init(cmd_fibcontext);
1481 dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
1482
1483 dinfo->command = cpu_to_le32(VM_ContainerConfig);
1484 dinfo->type = cpu_to_le32(CT_READ_NAME);
1485 dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
1486 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data));
1487
1488 status = aac_fib_send(ContainerCommand,
1489 cmd_fibcontext,
1490 sizeof (struct aac_get_name_resp),
1491 FsaNormal,
1492 0, 1,
1493 (fib_callback)get_container_name_callback,
1494 (void *) scsicmd);
1495
1496 /*
1497 * Check that the command queued to the controller
1498 */
1499 if (status == -EINPROGRESS) {
1500 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
1501#if ((0 && defined(BOOTCD)) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1502 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
1503 "aac_get_container_name(%p(%d:%d:%d:%d))", scsicmd,
1504 scsicmd->device->host->host_no, scmd_channel(scsicmd),
1505 scmd_id(scsicmd), scsicmd->device->lun));
1506#endif
1507 return 0;
1508 }
1509
1510 printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);
1511#if ((0 && defined(BOOTCD)) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1512 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
1513 "aac_get_container_name: aac_fib_send failed with status: %d.",
1514 status));
1515#endif
1516 aac_fib_complete(cmd_fibcontext);
1517#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
1518 aac_fib_free_tag(cmd_fibcontext);
1519#else
1520 aac_fib_free(cmd_fibcontext);
1521#endif
1522 return -1;
1523}
1524
1525static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd)
1526{
1527 struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
1528
1529#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1530 printk(KERN_INFO "aac_probe_container_callback2(%p)\n", scsicmd);
1531#endif
1532#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE) || (0 && defined(BOOTCD)))
1533#if (defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1534 if (!list_empty(&((struct aac_dev *)(scsicmd->device->host->hostdata))->entry)) {
1535#endif
1536 fwprintf(((struct aac_dev *)(scsicmd->device->host->hostdata),
1537 HBA_FLAGS_DBG_FW_PRINT_B, "aac_probe_container_callback2(%p)", scsicmd));
1538#if (defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1539 }
1540#endif
1541#endif
1542 if ((fsa_dev_ptr[scmd_id(scsicmd)].valid & 1))
1543 return aac_scsi_cmd(scsicmd);
1544
1545 scsicmd->result = DID_NO_CONNECT << 16;
1546#if (defined(AAC_DEBUG_INSTRUMENT_TIMING) || defined(AAC_DEBUG_INSTRUMENT_CONTEXT))
1547 __aac_io_done(scsicmd);
1548#else
1549 scsicmd->scsi_done(scsicmd);
1550#endif
1551 return 0;
1552}
1553
1554static void _aac_probe_container2(void * context, struct fib * fibptr)
1555{
1556 struct fsa_dev_info *fsa_dev_ptr;
1557 int (*callback)(struct scsi_cmnd *);
1558#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE) || (0 && defined(BOOTCD)))
1559 struct aac_dev *dev;
1560#endif
1561 struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context;
1562 int i;
1563
1564#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1565 printk(KERN_INFO "_aac_probe_container2(%p,%p)\n", scsicmd, fibptr);
1566#endif
1567
1568 if (!aac_valid_context(scsicmd, fibptr))
1569 return;
1570
1571#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE) || (0 && defined(BOOTCD)))
1572 dev = fibptr->dev;
1573#if (defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1574 if (!list_empty(&dev->entry)) {
1575#endif
1576 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
1577 "_aac_probe_container2(%p(%d:%d:%d:%d),%p)", scsicmd,
1578 scsicmd->device->host->host_no, scmd_channel(scsicmd),
1579 scmd_id(scsicmd), scsicmd->device->lun, fibptr));
1580#if (defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1581 }
1582#endif
1583#endif
1584 scsicmd->SCp.Status = 0;
1585#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE) || (0 && defined(BOOTCD)))
1586 fsa_dev_ptr = dev->fsa_dev;
1587#else
1588 fsa_dev_ptr = fibptr->dev->fsa_dev;
1589#endif
1590 if (fsa_dev_ptr) {
1591 struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr);
1592 fsa_dev_ptr += scmd_id(scsicmd);
1593
1594#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1595 printk(KERN_INFO "_aac_probe_container2 (%d:%d:%d:%d)"
1596 " resp={%d,%d,0x%x,%llu}\n",
1597 scsicmd->device->host->host_no, scmd_channel(scsicmd),
1598 scmd_id(scsicmd), scsicmd->device->lun,
1599 le32_to_cpu(dresp->status), le32_to_cpu(dresp->mnt[0].vol),
1600 le32_to_cpu(dresp->mnt[0].state),
1601 ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
1602 (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32));
1603#endif
1604#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE) || (0 && defined(BOOTCD)))
1605#if (defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1606 if (!list_empty(&dev->entry)) {
1607#endif
1608 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
1609 "_aac_probe_container2 (%d:%d:%d:%d)"
1610 " resp={%d,%d,0x%x,%llu}\n",
1611 scsicmd->device->host->host_no, scmd_channel(scsicmd),
1612 scmd_id(scsicmd), scsicmd->device->lun,
1613 le32_to_cpu(dresp->status), le32_to_cpu(dresp->mnt[0].vol),
1614 le32_to_cpu(dresp->mnt[0].state),
1615 ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
1616 (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32)));
1617#if (defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1618 }
1619#endif
1620#endif
1621 if ((le32_to_cpu(dresp->status) == ST_OK) &&
1622 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
1623 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
1624 if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 &
1625 AAC_OPTION_VARIABLE_BLOCK_SIZE)) {
1626 dresp->mnt[0].fileinfo.bdevinfo.block_size = 0x200;
1627 fsa_dev_ptr->block_size = 0x200;
1628 } else {
1629 fsa_dev_ptr->block_size = le32_to_cpu(dresp->mnt[0].fileinfo.bdevinfo.block_size);
1630 }
1631 for (i = 0; i < 16; i++)
1632 fsa_dev_ptr->identifier[i] = dresp->mnt[0].fileinfo.bdevinfo.identifier[i];
1633 fsa_dev_ptr->valid = 1;
1634 /* sense_key holds the current state of the spin-up */
1635 if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY))
1636 fsa_dev_ptr->sense_data.sense_key = NOT_READY;
1637 else if (fsa_dev_ptr->sense_data.sense_key == NOT_READY)
1638 fsa_dev_ptr->sense_data.sense_key = NO_SENSE;
1639 fsa_dev_ptr->type = le32_to_cpu(dresp->mnt[0].vol);
1640 fsa_dev_ptr->size
1641 = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
1642 (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
1643 fsa_dev_ptr->ro = ((le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) != 0);
1644 }
1645 if ((fsa_dev_ptr->valid & 1) == 0)
1646 fsa_dev_ptr->valid = 0;
1647 scsicmd->SCp.Status = le32_to_cpu(dresp->count);
1648 }
1649 aac_fib_complete(fibptr);
1650 aac_fib_free(fibptr);
1651 callback = (int (*)(struct scsi_cmnd *))(scsicmd->SCp.ptr);
1652 scsicmd->SCp.ptr = NULL;
1653#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1654 printk(KERN_INFO "(*%p)(%p)\n", callback, scsicmd);
1655#endif
1656#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE) || (0 && defined(BOOTCD)))
1657#if (defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1658 if (!list_empty(&dev->entry)) {
1659#endif
1660 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B, "(*%p)(%p)", callback, scsicmd));
1661#if (defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1662 }
1663#endif
1664#endif
1665 (*callback)(scsicmd);
1666 return;
1667}
1668
1669static void _aac_probe_container1(void * context, struct fib * fibptr)
1670{
1671 struct scsi_cmnd * scsicmd;
1672 struct aac_mount * dresp;
1673 struct aac_query_mount *dinfo;
1674 int status;
1675
1676#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1677 printk(KERN_INFO "_aac_probe_container1(%p,%p)\n", context, fibptr);
1678#endif
1679#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE) || (0 && defined(BOOTCD)))
1680#if (defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1681 if (!list_empty(&fibptr->dev->entry)) {
1682#endif
1683 fwprintf((fibptr->dev, HBA_FLAGS_DBG_FW_PRINT_B,
1684 "_aac_probe_container1(%p,%p)", context, fibptr));
1685#if (defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1686 }
1687#endif
1688#endif
1689 dresp = (struct aac_mount *) fib_data(fibptr);
1690 if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 &
1691 AAC_OPTION_VARIABLE_BLOCK_SIZE))
1692 dresp->mnt[0].capacityhigh = 0;
1693 if ((le32_to_cpu(dresp->status) != ST_OK) ||
1694 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
1695 _aac_probe_container2(context, fibptr);
1696 return;
1697 }
1698 scsicmd = (struct scsi_cmnd *) context;
1699
1700 if (!aac_valid_context(scsicmd, fibptr))
1701 return;
1702
1703 aac_fib_init(fibptr);
1704
1705 dinfo = (struct aac_query_mount *)fib_data(fibptr);
1706
1707 if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
1708 AAC_OPTION_VARIABLE_BLOCK_SIZE)
1709 dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
1710 else
1711 dinfo->command = cpu_to_le32(VM_NameServe64);
1712
1713 dinfo->count = cpu_to_le32(scmd_id(scsicmd));
1714 dinfo->type = cpu_to_le32(FT_FILESYS);
1715
1716#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1717 if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
1718 AAC_OPTION_VARIABLE_BLOCK_SIZE)
1719 printk(KERN_INFO "aac_fib_send(ContainerCommand,VM_NameServeAllBlk,...)\n");
1720 else
1721 printk(KERN_INFO "aac_fib_send(ContainerCommand,VM_NameServe64,...)\n");
1722#endif
1723#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE) || (0 && defined(BOOTCD)))
1724#if (defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1725 if (!list_empty(&fibptr->dev->entry)) {
1726#endif
1727 if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
1728 AAC_OPTION_VARIABLE_BLOCK_SIZE)
1729 fwprintf((fibptr->dev, HBA_FLAGS_DBG_FW_PRINT_B,
1730 "aac_fib_send(ContainerCommand,VM_NameServeAllBlk,...)"));
1731 else
1732 fwprintf((fibptr->dev, HBA_FLAGS_DBG_FW_PRINT_B,
1733 "aac_fib_send(ContainerCommand,VM_NameServe64,...)"));
1734#if (defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1735 }
1736#endif
1737#endif
1738 status = aac_fib_send(ContainerCommand,
1739 fibptr,
1740 sizeof(struct aac_query_mount),
1741 FsaNormal,
1742 0, 1,
1743 _aac_probe_container2,
1744 (void *) scsicmd);
1745 /*
1746 * Check that the command queued to the controller
1747 */
1748 if (status == -EINPROGRESS)
1749 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
1750 else if (status < 0) {
1751 /* Inherit results from VM_NameServe, if any */
1752 dresp->status = cpu_to_le32(ST_OK);
1753 _aac_probe_container2(context, fibptr);
1754 }
1755}
1756
1757static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(struct scsi_cmnd *))
1758{
1759 struct fib * fibptr;
1760 int status = -ENOMEM;
1761
1762#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1763 printk(KERN_INFO "_aac_probe_container(%p,%p)\n", scsicmd, callback);
1764#endif
1765#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE) || (0 && defined(BOOTCD)))
1766#if (defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1767 if (!list_empty(&((struct aac_dev *)(scsicmd->device->host->hostdata))->entry)) {
1768#endif
1769 fwprintf(((struct aac_dev *)(scsicmd->device->host->hostdata),
1770 HBA_FLAGS_DBG_FW_PRINT_B,
1771 "_aac_probe_container(%p,%p)", scsicmd, callback));
1772#if (defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1773 }
1774#endif
1775#endif
1776 if ((fibptr = aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) {
1777 struct aac_query_mount *dinfo;
1778
1779 aac_fib_init(fibptr);
1780
1781 dinfo = (struct aac_query_mount *)fib_data(fibptr);
1782
1783 if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
1784 AAC_OPTION_VARIABLE_BLOCK_SIZE)
1785 dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
1786 else
1787 dinfo->command = cpu_to_le32(VM_NameServe);
1788
1789 dinfo->count = cpu_to_le32(scmd_id(scsicmd));
1790 dinfo->type = cpu_to_le32(FT_FILESYS);
1791 scsicmd->SCp.ptr = (char *)callback;
1792
1793#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1794 if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
1795 AAC_OPTION_VARIABLE_BLOCK_SIZE)
1796 printk(KERN_INFO "aac_fib_send(ContainerCommand,VM_NameServeAllBlk,...)\n");
1797 else
1798 printk(KERN_INFO "aac_fib_send(ContainerCommand,VM_NameServe,...)\n");
1799#endif
1800#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE) || (0 && defined(BOOTCD)))
1801#if (defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1802 if (!list_empty(&fibptr->dev->entry)) {
1803#endif
1804 if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
1805 AAC_OPTION_VARIABLE_BLOCK_SIZE)
1806 fwprintf((fibptr->dev, HBA_FLAGS_DBG_FW_PRINT_B,
1807 "aac_fib_send(ContainerCommand,VM_NameServeAllBlk,...)"));
1808 else
1809 fwprintf((fibptr->dev, HBA_FLAGS_DBG_FW_PRINT_B,
1810 "aac_fib_send(ContainerCommand,VM_NameServe,...)"));
1811#if (defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1812 }
1813#endif
1814#endif
1815 status = aac_fib_send(ContainerCommand,
1816 fibptr,
1817 sizeof(struct aac_query_mount),
1818 FsaNormal,
1819 0, 1,
1820 _aac_probe_container1,
1821 (void *) scsicmd);
1822 /*
1823 * Check that the command queued to the controller
1824 */
1825 if (status == -EINPROGRESS) {
1826 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
1827 return 0;
1828 }
1829 if (status < 0) {
1830 scsicmd->SCp.ptr = NULL;
1831 aac_fib_complete(fibptr);
1832 aac_fib_free(fibptr);
1833 }
1834 }
1835 if (status < 0) {
1836 struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
1837 if (fsa_dev_ptr) {
1838 fsa_dev_ptr += scmd_id(scsicmd);
1839 if ((fsa_dev_ptr->valid & 1) == 0) {
1840 fsa_dev_ptr->valid = 0;
1841 return (*callback)(scsicmd);
1842 }
1843 }
1844 }
1845 return status;
1846}
1847
1848/**
1849 * aac_probe_container - query a logical volume
1850 * @dev: device to query
1851 * @cid: container identifier
1852 *
1853 * Queries the controller about the given volume. The volume information
1854 * is updated in the struct fsa_dev_info structure rather than returned.
1855 */
1856static int aac_probe_container_callback1(struct scsi_cmnd * scsicmd)
1857{
1858#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1859 printk(KERN_INFO "aac_probe_container_callback1(%p)\n", scsicmd);
1860#endif
1861#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE) || (0 && defined(BOOTCD)))
1862#if (defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1863 if (!list_empty(&((struct aac_dev *)(scsicmd->device->host->hostdata))->entry)) {
1864#endif
1865 fwprintf(((struct aac_dev *)(scsicmd->device->host->hostdata),
1866 HBA_FLAGS_DBG_FW_PRINT_B, "aac_probe_container_callback1(%p)", scsicmd));
1867#if (defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1868 }
1869#endif
1870#endif
1871 scsicmd->device = NULL;
1872 return 0;
1873}
1874
1875int aac_probe_container(struct aac_dev *dev, int cid)
1876{
1877 struct scsi_cmnd *scsicmd = kmalloc(sizeof(*scsicmd), GFP_KERNEL);
1878 struct scsi_device *scsidev = kmalloc(sizeof(*scsidev), GFP_KERNEL);
1879 struct aac_query_mount *dinfo;
1880 struct fib *fibptr;
1881 struct aac_mount *dresp;
1882 struct fsa_dev_info *fsa_dev_ptr;
1883 int i;
1884 int status = -ENOMEM;
1885
1886#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1887 printk(KERN_INFO "aac_probe_container(%p,%d)\n", dev, cid);
1888#endif
1889 if (!scsicmd || !scsidev) {
1890 kfree(scsicmd);
1891 kfree(scsidev);
1892#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1893 printk(KERN_INFO "aac_probe_container returns -ENOMEM\n");
1894#endif
1895 return -ENOMEM;
1896 }
1897#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE) || (0 && defined(BOOTCD)))
1898#if (defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1899 if (!list_empty(&dev->entry)) {
1900#endif
1901 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
1902 "aac_probe_container(%p,%d)", dev, cid));
1903#if (defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
1904 }
1905#endif
1906#endif
1907#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
1908 scsicmd->list.next = NULL;
1909#endif
1910#if (defined(AAC_DEBUG_INSTRUMENT_CONTEXT))
1911#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12))
1912 scsicmd->state = SCSI_STATE_QUEUED;
1913#endif
1914#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)) && defined(SCSI_CMND_MAGIC))
1915 scsicmd->sc_magic = 0;
1916#endif
1917#endif
1918 scsicmd->scsi_done = (void (*)(struct scsi_cmnd*))aac_probe_container_callback1;
1919
1920 scsicmd->device = scsidev;
1921#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6)) && (!defined(SCSI_HAS_SCSI_DEVICE_ONLINE)))
1922 scsidev->online = 1;
1923#else
1924 scsidev->sdev_state = 0;
1925#endif
1926 scsidev->id = cid;
1927 scsidev->host = dev->scsi_host_ptr;
1928
1929 fsa_dev_ptr = dev->fsa_dev;
1930
1931 fibptr = aac_fib_alloc(dev);
1932 if (NULL == fibptr )
1933 goto exit_probe;
1934 aac_fib_init(fibptr);
1935
1936 if (NULL == fsa_dev_ptr)
1937 goto fib_free;
1938 fsa_dev_ptr += scmd_id(scsicmd);
1939
1940 dinfo = (struct aac_query_mount *)fib_data(fibptr);
1941
1942 if (fibptr->dev->supplement_adapter_info.SupportedOptions2 &
1943 AAC_OPTION_VARIABLE_BLOCK_SIZE)
1944 dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
1945 else if((fibptr->dev->adapter_info.options & AAC_OPT_NEW_COMM) &&
1946 (fibptr->dev->adapter_info.options & AAC_OPTION_VARIABLE_BLOCK_SIZE))
1947 dinfo->command = cpu_to_le32(VM_NameServe64);
1948 else
1949 dinfo->command = cpu_to_le32(VM_NameServe);
1950
1951
1952 dinfo->count= cpu_to_le32(scmd_id(scsicmd));
1953 dinfo->type = cpu_to_le32(FT_FILESYS);
1954
1955 status = aac_fib_send(ContainerCommand,
1956 fibptr,
1957 sizeof(struct aac_query_mount),
1958 FsaNormal,
1959 1,1,
1960 (void *)NULL,
1961 (void *) scsicmd);
1962
1963 if (status == -EINPROGRESS) {
1964 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
1965 status = 0;
1966 goto ret;
1967 }
1968
1969 if (status < 0) {
1970 goto fib_free;
1971 }
1972
1973 dresp = (struct aac_mount*) fib_data(fibptr);
1974
1975 if ((le32_to_cpu(dresp->status) == ST_OK) &&
1976 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
1977 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
1978 if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 &
1979 AAC_OPTION_VARIABLE_BLOCK_SIZE)) {
1980 dresp->mnt[0].fileinfo.bdevinfo.block_size = 0x200;
1981 fsa_dev_ptr->block_size = 0x200;
1982 } else {
1983 fsa_dev_ptr->block_size = le32_to_cpu(dresp->mnt[0].fileinfo.bdevinfo.block_size);
1984 }
1985 for (i = 0; i < 16; i++)
1986 fsa_dev_ptr->identifier[i] = dresp->mnt[0].fileinfo.bdevinfo.identifier[i];
1987 fsa_dev_ptr->valid = 1;
1988 /* sense_key holds the current state of the spin-up */
1989 if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY))
1990 fsa_dev_ptr->sense_data.sense_key = NOT_READY;
1991 else if (fsa_dev_ptr->sense_data.sense_key == NOT_READY)
1992 fsa_dev_ptr->sense_data.sense_key = NO_SENSE;
1993 fsa_dev_ptr->type = le32_to_cpu(dresp->mnt[0].vol);
1994 fsa_dev_ptr->size
1995 = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
1996 (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
1997 fsa_dev_ptr->ro = ((le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) != 0);
1998 }
1999 if ((fsa_dev_ptr->valid & 1) == 0)
2000 fsa_dev_ptr->valid = 0;
2001 scsicmd->SCp.Status = le32_to_cpu(dresp->count);
2002
2003fib_free:
2004 aac_fib_complete(fibptr);
2005 aac_fib_free(fibptr);
2006 scsicmd->device = NULL;
2007
2008exit_probe:
2009 if (status >= 0)
2010 goto ret;
2011
2012 if ((NULL != fsa_dev_ptr) &&
2013 (fsa_dev_ptr->valid & 1) == 0){
2014 fsa_dev_ptr->valid = 0;
2015 return 0;
2016 }
2017
2018ret:
2019 kfree(scsidev);
2020 status = scsicmd->SCp.Status;
2021 kfree(scsicmd);
2022#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE) || (0 && defined(BOOTCD)))
2023 {
2024 struct fsa_dev_info * fsa_dev_ptr = &dev->fsa_dev[cid];
2025#endif
2026#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
2027
2028 printk(KERN_INFO
2029 "aac_probe_container returns %d"
2030 " *(&%p->fsa_dev[%d]=%p)={%d,%d,%llu,\"%.*s\"}\n",
2031 status, dev, cid, fsa_dev_ptr, fsa_dev_ptr->valid,
2032 fsa_dev_ptr->type, fsa_dev_ptr->size,
2033 (int)sizeof(fsa_dev_ptr->devname),
2034 fsa_dev_ptr->devname);
2035#endif
2036#if (defined(AAC_DEBUG_INSTRUMENT_INIT) || defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE) || (0 && defined(BOOTCD)))
2037#if (defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
2038 if (!list_empty(&dev->entry)) {
2039#endif
2040 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
2041 "aac_probe_container returns %d"
2042 " *(&%p->fsa_dev[%d]=%p)={%d,%d,%llu,\"%.*s\"}",
2043 status, dev, cid, fsa_dev_ptr, fsa_dev_ptr->valid,
2044 fsa_dev_ptr->type, fsa_dev_ptr->size,
2045 (int)sizeof(fsa_dev_ptr->devname),
2046 fsa_dev_ptr->devname));
2047#if (defined(AAC_DEBUG_INSTRUMENT_VM_NAMESERVE))
2048 }
2049#endif
2050 }
2051#endif
2052 return status;
2053}
2054
2055#if (defined(CONFIG_COMMUNITY_KERNEL))
2056/* Local Structure to set SCSI inquiry data strings */
2057struct scsi_inq {
2058 char vid[8]; /* Vendor ID */
2059 char pid[16]; /* Product ID */
2060 char prl[4]; /* Product Revision Level */
2061};
2062
2063#endif
2064/**
2065 * InqStrCopy - string merge
2066 * @a: string to copy from
2067 * @b: string to copy to
2068 *
2069 * Copy a String from one location to another
2070 * without copying \0
2071 */
2072
2073static void inqstrcpy(char *a, char *b)
2074{
2075
2076 while (*a != (char)0)
2077 *b++ = *a++;
2078}
2079
2080static char *container_types[] = {
2081 "None",
2082 "Volume",
2083 "Mirror",
2084 "Stripe",
2085 "RAID5",
2086 "SSRW",
2087 "SSRO",
2088 "Morph",
2089 "Legacy",
2090 "RAID4",
2091 "RAID10",
2092 "RAID00",
2093 "V-MIRRORS",
2094 "PSEUDO R4",
2095 "RAID50",
2096 "RAID5D",
2097 "RAID5D0",
2098 "RAID1E",
2099 "RAID6",
2100 "RAID60",
2101 "Unknown"
2102};
2103#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
2104
2105char * get_container_type(unsigned tindex)
2106{
2107 if (tindex >= ARRAY_SIZE(container_types))
2108 tindex = ARRAY_SIZE(container_types) - 1;
2109 return container_types[tindex];
2110}
2111#endif
2112
2113/* Function: setinqstr
2114 *
2115 * Arguments: [1] pointer to void [1] int
2116 *
2117 * Purpose: Sets SCSI inquiry data strings for vendor, product
2118 * and revision level. Allows strings to be set in platform dependant
2119 * files instead of in OS dependant driver source.
2120 */
2121
2122#if (defined(CONFIG_COMMUNITY_KERNEL))
2123static void setinqstr(struct aac_dev *dev, void *data, int tindex)
2124#else
2125void setinqstr(struct aac_dev *dev, void *data, int tindex)
2126#endif
2127{
2128 struct scsi_inq *str;
2129
2130 str = (struct scsi_inq *)(data); /* cast data to scsi inq block */
2131 memset(str, ' ', sizeof(*str));
2132
2133 if (dev->supplement_adapter_info.AdapterTypeText[0]) {
2134 int c;
2135 char *cp;
2136 char *cname = kmalloc(sizeof(dev->supplement_adapter_info.AdapterTypeText), GFP_KERNEL);
2137
2138 if (!cname)
2139 return;
2140
2141 cp=cname;
2142 memcpy(cname, dev->supplement_adapter_info.AdapterTypeText,
2143 sizeof(dev->supplement_adapter_info.AdapterTypeText));
2144
2145 if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
2146 inqstrcpy("SMC", str->vid);
2147 else {
2148 c = sizeof(str->vid);
2149 while (*cp && *cp != ' ' && --c)
2150 ++cp;
2151 c = *cp;
2152 *cp = '\0';
2153 inqstrcpy (cname,
2154 str->vid);
2155 *cp = c;
2156 while (*cp && *cp != ' ')
2157 ++cp;
2158 }
2159 while (*cp == ' ')
2160 ++cp;
2161 /* last six chars reserved for vol type */
2162 c = 0;
2163 if (strlen(cp) > sizeof(str->pid)) {
2164 c = cp[sizeof(str->pid)];
2165 cp[sizeof(str->pid)] = '\0';
2166 }
2167 inqstrcpy (cp, str->pid);
2168 kfree(cname);
2169 } else {
2170 struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype);
2171 inqstrcpy (mp->vname, str->vid);
2172 /* last six chars reserved for vol type */
2173 inqstrcpy (mp->model, str->pid);
2174 }
2175
2176 if (tindex < ARRAY_SIZE(container_types)){
2177 char *findit = str->pid;
2178
2179 for ( ; *findit != ' '; findit++); /* walk till we find a space */
2180 /* RAID is superfluous in the context of a RAID device */
2181 if (memcmp(findit-4, "RAID", 4) == 0)
2182 *(findit -= 4) = ' ';
2183 if (((findit - str->pid) + strlen(container_types[tindex]))
2184 < (sizeof(str->pid) + sizeof(str->prl)))
2185 inqstrcpy (container_types[tindex], findit + 1);
2186 }
2187 inqstrcpy ("V1.0", str->prl);
2188}
2189
2190static void get_container_serial_callback(void *context, struct fib * fibptr)
2191{
2192 struct aac_get_serial_resp * get_serial_reply;
2193 struct scsi_cmnd * scsicmd;
2194
2195 BUG_ON(fibptr == NULL);
2196
2197 scsicmd = (struct scsi_cmnd *) context;
2198 if (!aac_valid_context(scsicmd, fibptr))
2199 return;
2200
2201 get_serial_reply = (struct aac_get_serial_resp *) fib_data(fibptr);
2202 /* Failure is irrelevant, using default value instead */
2203 if (le32_to_cpu(get_serial_reply->status) == CT_OK) {
2204
2205/* Excluding SUSE as it has issues when inbox driver does not have this support but outbox has it.
2206 Because SUSE uses /dev/disk/by-id mapping entries in the OS grub config and VPD 0X83 creates conflicts */
2207#if (!defined(CONFIG_SUSE_KERNEL))
2208
2209 /*Check to see if it's for VPD 0x83 or 0x80 */
2210 if(scsicmd->cmnd[2] == 0x83)
2211 {
2212 /* vpd page 0x83 - Device Identification Page */
2213 struct aac_dev * dev;
2214 int container, i;
2215 TVPD_Page83 VPDPage83Data;
2216
2217 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
2218
2219 memset(((u8 *)&VPDPage83Data), 0, sizeof(VPDPage83Data));
2220
2221
2222 VPDPage83Data.DeviceType = 0; //DIRECT_ACCESS_DEVICE;
2223 VPDPage83Data.DeviceTypeQualifier = 0; //DEVICE_CONNECTED;
2224 VPDPage83Data.PageCode = 0x83; //VPD_DEVICE_IDENTIFIERS;
2225 VPDPage83Data.Reserved = 0;
2226
2227 VPDPage83Data.PageLength = sizeof(VPDPage83Data.IdDescriptorType1) +
2228 sizeof(VPDPage83Data.IdDescriptorType2);
2229
2230 // VPD 0x83 Type 3 is not supported for ARC
2231 if(dev->sa_firmware)
2232 VPDPage83Data.PageLength += sizeof(VPDPage83Data.IdDescriptorType3);
2233
2234 // T10 Vendor Identifier Field Format
2235 VPDPage83Data.IdDescriptorType1.CodeSet = 2; //VpdCodeSetAscii;
2236 VPDPage83Data.IdDescriptorType1.IdentifierType = 1; //VpdIdentifierTypeVendorId;
2237 VPDPage83Data.IdDescriptorType1.IdentifierLength = sizeof(VPDPage83Data.IdDescriptorType1) - 4;
2238
2239 memcpy(VPDPage83Data.IdDescriptorType1.VendId, "ADAPTEC ", // "ADAPTEC " for adaptec
2240 sizeof(VPDPage83Data.IdDescriptorType1.VendId));
2241 memcpy(VPDPage83Data.IdDescriptorType1.ProductId, "ARRAY ",
2242 sizeof(VPDPage83Data.IdDescriptorType1.ProductId));
2243
2244 // Convert to ascii based serial number.
2245 // The LSB is the the end.
2246
2247 for (i=0; i < 8; i++) {
2248 u8 temp = (u8)((get_serial_reply->uid >> ((7 - i) * 4)) & 0xF);
2249 if (temp > 0x9)
2250 {
2251 VPDPage83Data.IdDescriptorType1.SerialNumber[i] = 'A' + (temp - 0xA);
2252 } else
2253 {
2254 VPDPage83Data.IdDescriptorType1.SerialNumber[i] = '0' + temp;
2255 }
2256 }
2257
2258 // EUI-64 Vendor Identifier Field Format, 24 bits for VendId and 40 bits for SN.
2259 VPDPage83Data.IdDescriptorType2.CodeSet = 1; //VpdCodeSetBinary;
2260 VPDPage83Data.IdDescriptorType2.IdentifierType = 2; //VpdIdentifierTypeEUI64;
2261 VPDPage83Data.IdDescriptorType2.IdentifierLength = sizeof(VPDPage83Data.IdDescriptorType2) - 4;
2262
2263 VPDPage83Data.IdDescriptorType2.EU64Id.VendId[0] = 0xD0; // 0x0000055 for IBM, 0x0000D0 for Adaptec.
2264 VPDPage83Data.IdDescriptorType2.EU64Id.VendId[1] = 0;
2265 VPDPage83Data.IdDescriptorType2.EU64Id.VendId[2] = 0;
2266
2267 VPDPage83Data.IdDescriptorType2.EU64Id.Serial = get_serial_reply->uid;
2268 VPDPage83Data.IdDescriptorType2.EU64Id.Reserved = 0;
2269
2270
2271 // VPD 0x83 Type 3 is not supported for ARC
2272 if(dev->sa_firmware){
2273 // VpdIdentifierTypeFCPHName
2274 VPDPage83Data.IdDescriptorType3.CodeSet = 1;
2275 VPDPage83Data.IdDescriptorType3.IdentifierType = 3;
2276 VPDPage83Data.IdDescriptorType3.IdentifierLength = sizeof(VPDPage83Data.IdDescriptorType3) - 4;
2277 for (container = 0; container < dev->maximum_num_containers; container++) {
2278 if (scmd_id(scsicmd) == container) {
2279 for (i = 0; i < 16; i++)
2280 VPDPage83Data.IdDescriptorType3.Identifier[i] = dev->fsa_dev[container].identifier[i];
2281 break;
2282 }
2283 }
2284 }
2285
2286 // Move the inquiry data to the response buffer.
2287 // memcpy(arr, &VPDPage83Data, sizeof(VPDPage83Data));
2288
2289#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))
2290 aac_internal_transfer(scsicmd, &VPDPage83Data, 0,
2291 sizeof(VPDPage83Data));
2292#else
2293 scsi_sg_copy_from_buffer(scsicmd, &VPDPage83Data,
2294 sizeof(VPDPage83Data));
2295#endif
2296 }
2297 else
2298#endif
2299 {
2300 /* It must be for VPD 0x80 */
2301 char sp[13];
2302 /* EVPD bit set */
2303 sp[0] = INQD_PDT_DA;
2304 sp[1] = scsicmd->cmnd[2];
2305 sp[2] = 0;
2306 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,4,4))
2307 sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X",
2308 le32_to_cpu(get_serial_reply->uid));
2309 #else
2310 sp[3] = sprintf(sp+4, "%08X",
2311 le32_to_cpu(get_serial_reply->uid));
2312 #endif
2313 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))
2314 aac_internal_transfer(scsicmd, sp, 0, sizeof(sp));
2315 #else
2316 scsi_sg_copy_from_buffer(scsicmd, sp, sizeof(sp));
2317 #endif
2318 }
2319 }
2320
2321 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
2322
2323 aac_fib_complete(fibptr);
2324
2325#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
2326 aac_fib_free_tag(fibptr);
2327#else
2328 aac_fib_free(fibptr);
2329#endif
2330
2331#if (defined(AAC_DEBUG_INSTRUMENT_TIMING) || defined(AAC_DEBUG_INSTRUMENT_CONTEXT))
2332 aac_io_done(scsicmd);
2333#else
2334 scsicmd->scsi_done(scsicmd);
2335#endif
2336}
2337
2338/**
2339 * aac_get_container_serial - get container serial, none blocking.
2340 */
2341static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
2342{
2343 int status;
2344 struct aac_get_serial *dinfo;
2345 struct fib * cmd_fibcontext;
2346 struct aac_dev * dev;
2347
2348 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
2349
2350#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
2351 if (!(cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd)))
2352#else
2353 if (!(cmd_fibcontext = aac_fib_alloc(dev)))
2354#endif
2355 return -ENOMEM;
2356
2357 aac_fib_init(cmd_fibcontext);
2358 dinfo = (struct aac_get_serial *) fib_data(cmd_fibcontext);
2359
2360 dinfo->command = cpu_to_le32(VM_ContainerConfig);
2361 dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID);
2362 dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
2363
2364 status = aac_fib_send(ContainerCommand,
2365 cmd_fibcontext,
2366 sizeof (struct aac_get_serial_resp),
2367 FsaNormal,
2368 0, 1,
2369 (fib_callback) get_container_serial_callback,
2370 (void *) scsicmd);
2371
2372 /*
2373 * Check that the command queued to the controller
2374 */
2375 if (status == -EINPROGRESS) {
2376 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
2377 return 0;
2378 }
2379
2380 printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status);
2381 aac_fib_complete(cmd_fibcontext);
2382#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
2383 aac_fib_free_tag(cmd_fibcontext);
2384#else
2385 aac_fib_free(cmd_fibcontext);
2386#endif
2387 return -1;
2388}
2389
2390/* Function: setinqserial
2391 *
2392 * Arguments: [1] pointer to void [1] int
2393 *
2394 * Purpose: Sets SCSI Unit Serial number.
2395 * This is a fake. We should read a proper
2396 * serial number from the container. <SuSE>But
2397 * without docs it's quite hard to do it :-)
2398 * So this will have to do in the meantime.</SuSE>
2399 */
2400
2401static int setinqserial(struct aac_dev *dev, void *data, int cid)
2402{
2403 /*
2404 * This breaks array migration.
2405 */
2406#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,4,4))
2407 return snprintf((char *)(data), sizeof(struct scsi_inq) - 4, "%08X%02X",
2408 le32_to_cpu(dev->adapter_info.serial[0]), cid);
2409#else
2410 return sprintf((char *)(data), "%08X%02X",
2411 le32_to_cpu(dev->adapter_info.serial[0]), cid);
2412#endif
2413}
2414
2415static inline void set_sense(struct sense_data *sense_data, u8 sense_key,
2416 u8 sense_code, u8 a_sense_code, u8 bit_pointer, u16 field_pointer)
2417{
2418 u8 *sense_buf = (u8 *)sense_data;
2419 /* Sense data valid, err code 70h */
2420 sense_buf[0] = 0x70; /* No info field */
2421 sense_buf[1] = 0; /* Segment number, always zero */
2422
2423 sense_buf[2] = sense_key; /* Sense key */
2424
2425 sense_buf[12] = sense_code; /* Additional sense code */
2426 sense_buf[13] = a_sense_code; /* Additional sense code qualifier */
2427
2428 if (sense_key == ILLEGAL_REQUEST) {
2429 sense_buf[7] = 10; /* Additional sense length */
2430
2431 sense_buf[15] = bit_pointer;
2432 /* Illegal parameter is in the parameter block */
2433 if (sense_code == SENCODE_INVALID_CDB_FIELD)
2434 sense_buf[15] |= 0xc0;/* Std sense key specific field */
2435 /* Illegal parameter is in the CDB block */
2436 sense_buf[16] = field_pointer >> 8; /* MSB */
2437 sense_buf[17] = field_pointer; /* LSB */
2438 } else
2439 sense_buf[7] = 6; /* Additional sense length */
2440}
2441
2442static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
2443{
2444 if (lba & 0xffffffff00000000LL) {
2445 int cid = scmd_id(cmd);
2446 dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
2447 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
2448 SAM_STAT_CHECK_CONDITION;
2449 set_sense(&dev->fsa_dev[cid].sense_data,
2450 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
2451 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
2452 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
2453 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
2454 SCSI_SENSE_BUFFERSIZE));
2455#if (defined(AAC_DEBUG_INSTRUMENT_TIMING) || defined(AAC_DEBUG_INSTRUMENT_CONTEXT))
2456 __aac_io_done(cmd);
2457#else
2458 cmd->scsi_done(cmd);
2459#endif
2460 return 1;
2461 }
2462 return 0;
2463}
2464
2465static int aac_bounds_64(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
2466{
2467 return 0;
2468}
2469
2470static void io_callback(void *context, struct fib * fibptr);
2471
2472static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
2473{
2474 struct aac_dev *dev = fib->dev;
2475 u16 fibsize, command;
2476 long ret;
2477
2478 aac_fib_init(fib);
2479 if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
2480 dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) && !dev->sync_mode) {
2481 struct aac_raw_io2 *readcmd2;
2482 readcmd2 = (struct aac_raw_io2 *) fib_data(fib);
2483 memset(readcmd2, 0, sizeof(struct aac_raw_io2));
2484 readcmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
2485 readcmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
2486 readcmd2->byteCount = cpu_to_le32(count * dev->fsa_dev[scmd_id(cmd)].block_size);
2487 readcmd2->cid = cpu_to_le16(scmd_id(cmd));
2488 readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ);
2489 ret = aac_build_sgraw2(cmd, readcmd2, dev->scsi_host_ptr->sg_tablesize);
2490 if (ret < 0)
2491 return ret;
2492 command = ContainerRawIo2;
2493 fibsize = sizeof(struct aac_raw_io2) +
2494 ((le32_to_cpu(readcmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
2495 } else {
2496 struct aac_raw_io *readcmd;
2497 readcmd = (struct aac_raw_io *) fib_data(fib);
2498 readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
2499 readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
2500 readcmd->count = cpu_to_le32(count * dev->fsa_dev[scmd_id(cmd)].block_size);
2501 readcmd->cid = cpu_to_le16(scmd_id(cmd));
2502 readcmd->flags = cpu_to_le16(RIO_TYPE_READ);
2503 readcmd->bpTotal = 0;
2504 readcmd->bpComplete = 0;
2505 ret = aac_build_sgraw(cmd, &readcmd->sg);
2506 if (ret < 0)
2507 return ret;
2508 command = ContainerRawIo;
2509 fibsize = sizeof(struct aac_raw_io) +
2510 ((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw));
2511 }
2512
2513 BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
2514 /*
2515 * Now send the Fib to the adapter
2516 */
2517 return aac_fib_send(command,
2518 fib,
2519 fibsize,
2520 FsaNormal,
2521 0, 1,
2522 (fib_callback) io_callback,
2523 (void *) cmd);
2524}
2525
2526static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
2527{
2528 u16 fibsize;
2529 struct aac_read64 *readcmd;
2530 long ret;
2531
2532 aac_fib_init(fib);
2533 readcmd = (struct aac_read64 *) fib_data(fib);
2534 readcmd->command = cpu_to_le32(VM_CtHostRead64);
2535 readcmd->cid = cpu_to_le16(scmd_id(cmd));
2536 readcmd->sector_count = cpu_to_le16(count);
2537 readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
2538 readcmd->pad = 0;
2539 readcmd->flags = 0;
2540
2541 ret = aac_build_sg64(cmd, &readcmd->sg);
2542 if (ret < 0)
2543 return ret;
2544 fibsize = sizeof(struct aac_read64) +
2545 ((le32_to_cpu(readcmd->sg.count) - 1) *
2546 sizeof (struct sgentry64));
2547 BUG_ON (fibsize > (fib->dev->max_fib_size -
2548 sizeof(struct aac_fibhdr)));
2549 /*
2550 * Now send the Fib to the adapter
2551 */
2552 return aac_fib_send(ContainerCommand64,
2553 fib,
2554 fibsize,
2555 FsaNormal,
2556 0, 1,
2557 (fib_callback) io_callback,
2558 (void *) cmd);
2559}
2560
2561static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
2562{
2563 u16 fibsize;
2564 struct aac_read *readcmd;
2565 struct aac_dev *dev = fib->dev;
2566 long ret;
2567
2568 aac_fib_init(fib);
2569 readcmd = (struct aac_read *) fib_data(fib);
2570 readcmd->command = cpu_to_le32(VM_CtBlockRead);
2571 readcmd->cid = cpu_to_le32(scmd_id(cmd));
2572 readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
2573 readcmd->count = cpu_to_le32(count * dev->fsa_dev[scmd_id(cmd)].block_size);
2574
2575 ret = aac_build_sg(cmd, &readcmd->sg);
2576 if (ret < 0)
2577 return ret;
2578 fibsize = sizeof(struct aac_read) +
2579 ((le32_to_cpu(readcmd->sg.count) - 1) *
2580 sizeof (struct sgentry));
2581 BUG_ON (fibsize > (fib->dev->max_fib_size -
2582 sizeof(struct aac_fibhdr)));
2583 /*
2584 * Now send the Fib to the adapter
2585 */
2586 return aac_fib_send(ContainerCommand,
2587 fib,
2588 fibsize,
2589 FsaNormal,
2590 0, 1,
2591 (fib_callback) io_callback,
2592 (void *) cmd);
2593}
2594
2595static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
2596{
2597 struct aac_dev *dev = fib->dev;
2598 u16 fibsize, command;
2599 long ret;
2600
2601 aac_fib_init(fib);
2602 if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
2603 dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) && !dev->sync_mode) {
2604 struct aac_raw_io2 *writecmd2;
2605 writecmd2 = (struct aac_raw_io2 *) fib_data(fib);
2606 memset(writecmd2, 0, sizeof(struct aac_raw_io2));
2607 writecmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
2608 writecmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
2609 writecmd2->byteCount = cpu_to_le32(count * dev->fsa_dev[scmd_id(cmd)].block_size);
2610 writecmd2->cid = cpu_to_le16(scmd_id(cmd));
2611 writecmd2->flags = (fua && ((aac_cache & 5) != 1) &&
2612 (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
2613 cpu_to_le16(RIO2_IO_TYPE_WRITE|RIO2_IO_SUREWRITE) :
2614 cpu_to_le16(RIO2_IO_TYPE_WRITE);
2615 ret = aac_build_sgraw2(cmd, writecmd2, dev->scsi_host_ptr->sg_tablesize);
2616 if (ret < 0)
2617 return ret;
2618 command = ContainerRawIo2;
2619 fibsize = sizeof(struct aac_raw_io2) +
2620 ((le32_to_cpu(writecmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
2621 } else {
2622 struct aac_raw_io *writecmd;
2623 writecmd = (struct aac_raw_io *) fib_data(fib);
2624 writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
2625 writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
2626 writecmd->count = cpu_to_le32(count * dev->fsa_dev[scmd_id(cmd)].block_size);
2627 writecmd->cid = cpu_to_le16(scmd_id(cmd));
2628#if (defined(RIO_SUREWRITE))
2629 writecmd->flags = (fua && ((aac_cache & 5) != 1) &&
2630 (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
2631 cpu_to_le16(RIO_TYPE_WRITE|RIO_SUREWRITE) :
2632 cpu_to_le16(RIO_TYPE_WRITE);
2633#else
2634 writecmd->flags = cpu_to_le16(RIO_TYPE_WRITE);
2635#endif
2636 writecmd->bpTotal = 0;
2637 writecmd->bpComplete = 0;
2638 ret = aac_build_sgraw(cmd, &writecmd->sg);
2639 if (ret < 0)
2640 return ret;
2641 command = ContainerRawIo;
2642 fibsize = sizeof(struct aac_raw_io) +
2643 ((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw));
2644 }
2645
2646 BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
2647 /*
2648 * Now send the Fib to the adapter
2649 */
2650 return aac_fib_send(command,
2651 fib,
2652 fibsize,
2653 FsaNormal,
2654 0, 1,
2655 (fib_callback) io_callback,
2656 (void *) cmd);
2657}
2658
2659static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
2660{
2661 u16 fibsize;
2662 struct aac_write64 *writecmd;
2663 long ret;
2664
2665 aac_fib_init(fib);
2666 writecmd = (struct aac_write64 *) fib_data(fib);
2667 writecmd->command = cpu_to_le32(VM_CtHostWrite64);
2668 writecmd->cid = cpu_to_le16(scmd_id(cmd));
2669 writecmd->sector_count = cpu_to_le16(count);
2670 writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
2671 writecmd->pad = 0;
2672 writecmd->flags = 0;
2673
2674 ret = aac_build_sg64(cmd, &writecmd->sg);
2675 if (ret < 0)
2676 return ret;
2677 fibsize = sizeof(struct aac_write64) +
2678 ((le32_to_cpu(writecmd->sg.count) - 1) *
2679 sizeof (struct sgentry64));
2680 BUG_ON (fibsize > (fib->dev->max_fib_size -
2681 sizeof(struct aac_fibhdr)));
2682 /*
2683 * Now send the Fib to the adapter
2684 */
2685 return aac_fib_send(ContainerCommand64,
2686 fib,
2687 fibsize,
2688 FsaNormal,
2689 0, 1,
2690 (fib_callback) io_callback,
2691 (void *) cmd);
2692}
2693
2694static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
2695{
2696 u16 fibsize;
2697 struct aac_write *writecmd;
2698 struct aac_dev *dev = fib->dev;
2699 long ret;
2700
2701 aac_fib_init(fib);
2702 writecmd = (struct aac_write *) fib_data(fib);
2703 writecmd->command = cpu_to_le32(VM_CtBlockWrite);
2704 writecmd->cid = cpu_to_le32(scmd_id(cmd));
2705 writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
2706 writecmd->count = cpu_to_le32(count * dev->fsa_dev[scmd_id(cmd)].block_size);
2707 writecmd->sg.count = cpu_to_le32(1);
2708 /* ->stable is not used - it did mean which type of write */
2709
2710 ret = aac_build_sg(cmd, &writecmd->sg);
2711 if (ret < 0)
2712 return ret;
2713 fibsize = sizeof(struct aac_write) +
2714 ((le32_to_cpu(writecmd->sg.count) - 1) *
2715 sizeof (struct sgentry));
2716 BUG_ON (fibsize > (fib->dev->max_fib_size -
2717 sizeof(struct aac_fibhdr)));
2718 /*
2719 * Now send the Fib to the adapter
2720 */
2721 return aac_fib_send(ContainerCommand,
2722 fib,
2723 fibsize,
2724 FsaNormal,
2725 0, 1,
2726 (fib_callback) io_callback,
2727 (void *) cmd);
2728}
2729
2730static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd)
2731{
2732 struct aac_srb * srbcmd;
2733 u32 flag;
2734 u32 timeout;
2735
2736 aac_fib_init(fib);
2737 switch(cmd->sc_data_direction){
2738 case DMA_TO_DEVICE:
2739 flag = SRB_DataOut;
2740 break;
2741 case DMA_BIDIRECTIONAL:
2742 flag = SRB_DataIn | SRB_DataOut;
2743 break;
2744 case DMA_FROM_DEVICE:
2745 flag = SRB_DataIn;
2746 break;
2747 case DMA_NONE:
2748 default: /* shuts up some versions of gcc */
2749 flag = SRB_NoDataXfer;
2750 break;
2751 }
2752
2753 srbcmd = (struct aac_srb*) fib_data(fib);
2754 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
2755 srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scmd_channel(cmd)));
2756 srbcmd->id = cpu_to_le32(scmd_id(cmd));
2757 srbcmd->lun = cpu_to_le32(cmd->device->lun);
2758 srbcmd->flags = cpu_to_le32(flag);
2759#if (1 && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27))
2760 timeout = cmd->timeout_per_command/HZ;
2761#else
2762 timeout = cmd->request->timeout/HZ;
2763#endif
2764 if (timeout == 0)
2765 timeout = 30;
2766 srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
2767 srbcmd->retry_limit = 0; /* Obsolete parameter */
2768 srbcmd->cdb_size = cpu_to_le32(cmd->cmd_len);
2769 return srbcmd;
2770}
2771
2772static struct aac_hba_cmd_req * aac_construct_hbacmd(struct fib * fib, struct scsi_cmnd * cmd)
2773{
2774 struct aac_hba_cmd_req * hbacmd;
2775 struct aac_dev *dev;
2776 int bus, target;
2777 u64 address;
2778
2779 dev = (struct aac_dev *)cmd->device->host->hostdata;
2780
2781 hbacmd = (struct aac_hba_cmd_req *)fib->hw_fib_va;
2782 memset(hbacmd, 0, 96); /* sizeof(*hbacmd) is not necessary */
2783 /* iu_type is a parameter of aac_hba_send */
2784 switch (cmd->sc_data_direction) {
2785 case DMA_TO_DEVICE:
2786 hbacmd->byte1 = 2;
2787 break;
2788 case DMA_FROM_DEVICE:
2789 case DMA_BIDIRECTIONAL:
2790 hbacmd->byte1 = 1;
2791 break;
2792 case DMA_NONE:
2793 default:
2794 break;
2795 }
2796 hbacmd->lun[1] = cpu_to_le32(cmd->device->lun);
2797
2798 bus = aac_logical_to_phys(scmd_channel(cmd));
2799 target = scmd_id(cmd);
2800 hbacmd->it_nexus = dev->hba_map[bus][target].rmw_nexus;
2801
2802 /* we fill in reply_qid later in aac_src_deliver_message */
2803 /* we fill in iu_type, request_id later in aac_hba_send */
2804 /* we fill in emb_data_desc_count later in aac_build_sghba */
2805
2806 memcpy(hbacmd->cdb, cmd->cmnd, cmd->cmd_len);
2807 hbacmd->data_length = cpu_to_le32(scsi_bufflen(cmd));
2808
2809 address = (u64)fib->hw_error_pa;
2810 hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
2811 hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
2812 hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
2813
2814 return hbacmd;
2815}
2816
2817static void aac_srb_callback(void *context, struct fib * fibptr);
2818
2819static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
2820{
2821 u16 fibsize;
2822 struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
2823 long ret;
2824
2825 ret = aac_build_sg64(cmd, (struct sgmap64*) &srbcmd->sg);
2826 if (ret < 0)
2827 return ret;
2828 srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
2829
2830 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
2831 memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
2832 /*
2833 * Build Scatter/Gather list
2834 */
2835 fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) +
2836 ((le32_to_cpu(srbcmd->sg.count) & 0xff) *
2837 sizeof (struct sgentry64));
2838 BUG_ON (fibsize > (fib->dev->max_fib_size -
2839 sizeof(struct aac_fibhdr)));
2840
2841 /*
2842 * Now send the Fib to the adapter
2843 */
2844 return aac_fib_send(ScsiPortCommand64, fib,
2845 fibsize, FsaNormal, 0, 1,
2846 (fib_callback) aac_srb_callback,
2847 (void *) cmd);
2848}
2849
2850static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
2851{
2852 u16 fibsize;
2853 struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
2854 long ret;
2855
2856 ret = aac_build_sg(cmd, (struct sgmap*)&srbcmd->sg);
2857 if (ret < 0)
2858 return ret;
2859 srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
2860
2861 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
2862 memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
2863 /*
2864 * Build Scatter/Gather list
2865 */
2866 fibsize = sizeof (struct aac_srb) +
2867 (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) *
2868 sizeof (struct sgentry));
2869 BUG_ON (fibsize > (fib->dev->max_fib_size -
2870 sizeof(struct aac_fibhdr)));
2871
2872 /*
2873 * Now send the Fib to the adapter
2874 */
2875 return aac_fib_send(ScsiPortCommand, fib, fibsize, FsaNormal, 0, 1,
2876 (fib_callback) aac_srb_callback, (void *) cmd);
2877}
2878
2879static int aac_scsi_32_64(struct fib * fib, struct scsi_cmnd * cmd)
2880{
2881 if ((sizeof(dma_addr_t) > 4) && fib->dev->needs_dac &&
2882 (fib->dev->adapter_info.options & AAC_OPT_SGMAP_HOST64))
2883 return FAILED;
2884 return aac_scsi_32(fib, cmd);
2885}
2886
2887void aac_hba_callback(void *context, struct fib * fibptr);
2888
2889static int aac_adapter_hba(struct fib * fib, struct scsi_cmnd * cmd)
2890{
2891 struct aac_hba_cmd_req * hbacmd = aac_construct_hbacmd(fib, cmd);
2892 struct aac_dev *dev;
2893 // u16 fibsize;
2894 long ret;
2895
2896 dev = (struct aac_dev *)cmd->device->host->hostdata;
2897
2898 ret = aac_build_sghba(cmd, hbacmd,
2899 dev->scsi_host_ptr->sg_tablesize, (u64)fib->hw_sgl_pa);
2900 if (ret < 0)
2901 return ret;
2902
2903 /*
2904 * Now send the HBA command to the adapter
2905 */
2906 fib->hbacmd_size = 64 + le32_to_cpu(hbacmd->emb_data_desc_count) *
2907 sizeof(struct aac_hba_sgl);
2908
2909 return aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, fib,
2910 (fib_callback) aac_hba_callback,
2911 (void *) cmd);
2912}
2913
2914int aac_issue_bmic_identify(struct aac_dev* dev, u32 bus, u32 target)
2915{
2916 struct fib* fibptr;
2917 int rcode = -1;
2918 u16 fibsize, datasize;
2919 struct aac_srb *srbcmd;
2920 struct sgmap64 *sg64;
2921 struct aac_ciss_identify_pd *identify_resp;
2922 dma_addr_t addr;
2923 u32 vbus, vid;
2924 u16 temp;
2925
2926 if (!(fibptr = aac_fib_alloc(dev)))
2927 return -ENOMEM;
2928
2929 temp = AAC_MAX_LUN + target;
2930
2931 fibsize = sizeof (struct aac_srb) -
2932 sizeof (struct sgentry) + sizeof (struct sgentry64);
2933 datasize = sizeof (struct aac_ciss_identify_pd);
2934
2935 identify_resp = (struct aac_ciss_identify_pd *)
2936 pci_alloc_consistent(dev->pdev, datasize, &addr);
2937
2938 if (identify_resp != NULL)
2939 {
2940 vbus = (u32)le16_to_cpu(
2941 dev->supplement_adapter_info.VirtDeviceBus);
2942 vid = (u32)le16_to_cpu(
2943 dev->supplement_adapter_info.VirtDeviceTarget);
2944
2945 aac_fib_init(fibptr);
2946 srbcmd = (struct aac_srb *) fib_data(fibptr);
2947
2948 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
2949 srbcmd->channel = cpu_to_le32(vbus);
2950 srbcmd->id = cpu_to_le32(vid);
2951 srbcmd->lun = 0;
2952 srbcmd->flags = cpu_to_le32(SRB_DataIn);
2953 srbcmd->timeout = cpu_to_le32(10);
2954 srbcmd->retry_limit = 0;
2955 srbcmd->cdb_size = cpu_to_le32(12);
2956 srbcmd->count = cpu_to_le32(datasize);
2957
2958 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
2959 srbcmd->cdb[0] = 0x26;
2960 srbcmd->cdb[2] = (u8)(temp & 0x00FF);
2961
2962 srbcmd->cdb[6] = CISS_IDENTIFY_PHYSICAL_DEVICE;
2963
2964 sg64 = (struct sgmap64 *)&srbcmd->sg;
2965 sg64->count = cpu_to_le32(1);
2966 sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16));
2967 sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
2968 sg64->sg[0].count = cpu_to_le32(datasize);
2969
2970 rcode = aac_fib_send(ScsiPortCommand64,
2971 fibptr, fibsize, FsaNormal, 1, 1, NULL, NULL);
2972
2973 if( identify_resp->current_queue_depth_limit <= 0 ||
2974 identify_resp->current_queue_depth_limit > 255)
2975 dev->hba_map[bus][target].qd_limit = 32;
2976 else
2977 dev->hba_map[bus][target].qd_limit = identify_resp->current_queue_depth_limit;
2978
2979 pci_free_consistent(dev->pdev, datasize, (void *)identify_resp, addr);
2980
2981 aac_fib_complete(fibptr);
2982 }
2983
2984 aac_fib_free(fibptr);
2985
2986 return rcode;
2987}
2988
2989int aac_get_adapter_info(struct aac_dev* dev)
2990{
2991 struct fib* fibptr;
2992 int rcode, is_StreamLinedFIB;
2993 u32 tmp, i, bus, target;
2994 struct aac_adapter_info *info;
2995 struct aac_bus_info *command;
2996 struct aac_bus_info_response *bus_info;
2997 u16 fibsize, datasize;
2998 struct aac_srb *srbcmd;
2999 struct sgmap64 *sg64;
3000 struct aac_ciss_phys_luns_resp *phys_luns;
3001 dma_addr_t addr;
3002
3003 is_StreamLinedFIB = 0;
3004
3005 if (!(fibptr = aac_fib_alloc(dev)))
3006 return -ENOMEM;
3007
3008
3009 aac_fib_init(fibptr);
3010 info = (struct aac_adapter_info *) fib_data(fibptr);
3011 memset(info,0,sizeof(*info));
3012
3013 dev->streamlined_fib_support = 0;
3014 rcode = aac_fib_send(RequestAdapterInfo,
3015 fibptr,
3016 sizeof(*info),
3017 FsaNormal,
3018 -1, 1, /* First `interrupt' command uses special wait */
3019 NULL,
3020 NULL);
3021
3022 if (rcode < 0) {
3023 /* FIB should be freed only after
3024 * getting the response from the F/W */
3025 if (rcode != -ERESTARTSYS) {
3026 aac_fib_complete(fibptr);
3027 aac_fib_free(fibptr);
3028 }
3029 aac_err(dev,"Driver Init: RequestAdapterInfo( ) failed - %d\n", rcode);
3030 return rcode;
3031 }
3032 memcpy(&dev->adapter_info, info, sizeof(*info));
3033
3034 dev->supplement_adapter_info.VirtDeviceBus = 0xffff;
3035 if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) {
3036 struct aac_supplement_adapter_info * sinfo;
3037
3038 aac_fib_init(fibptr);
3039
3040 sinfo = (struct aac_supplement_adapter_info *) fib_data(fibptr);
3041
3042 memset(sinfo,0,sizeof(*sinfo));
3043
3044 rcode = aac_fib_send(RequestSupplementAdapterInfo,
3045 fibptr,
3046 sizeof(*sinfo),
3047 FsaNormal,
3048 1, 1,
3049 NULL,
3050 NULL);
3051
3052 if (rcode >= 0)
3053 memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo));
3054
3055 if (rcode < 0) {
3056 if (rcode != -ERESTARTSYS) {
3057 aac_fib_complete(fibptr);
3058 aac_fib_free(fibptr);
3059 }
3060 aac_err(dev, "Driver Init: RequestSupplementAdapterInfo( ) failed - %d\n", rcode);
3061 return rcode;
3062 }
3063#if (defined(AAC_DEBUG_INSTRUMENT_SLOT))
3064 if ((le32_to_cpu(dev->supplement_adapter_info.Version)
3065 < AAC_SIS_VERSION_V3) ||
3066 (dev->supplement_adapter_info.SlotNumber
3067 == cpu_to_le32(AAC_SIS_SLOT_UNKNOWN))) {
3068 dev->supplement_adapter_info.SlotNumber
3069 = cpu_to_le32(PCI_SLOT(dev->pdev->devfn));
3070 (void)aac_adapter_sync_cmd(dev, SEND_SLOT_NUMBER,
3071 PCI_SLOT(dev->pdev->devfn),
3072 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
3073#endif
3074 }
3075 if (dev->supplement_adapter_info.FeatureBits & AAC_FEATURE_STREAMLINED_CONTINUATION)
3076 dev->streamlined_fib_support = 1;
3077
3078 if (dev->adapter_info.options & AAC_OPT_2K_FIB_SUPPORT)
3079 dev->fib_size_supported = FIB_SIZE_2K;
3080 else
3081 dev->fib_size_supported = FIB_SIZE_STANDARD;
3082
3083#if (defined(CODE_STREAM_IDENTIFIER) && !defined(CONFIG_COMMUNITY_KERNEL))
3084 if (dev->supplement_adapter_info.FeatureBits & AAC_FEATURE_FALCON) {
3085 char * finfo;
3086
3087 aac_fib_init(fibptr);
3088
3089 finfo = (char *) fib_data(fibptr);
3090
3091 memset(finfo,0,MAX_CODE_STREAM_IDENTIFIER_LENGTH);
3092
3093 rcode = aac_fib_send(RequestCompatibilityId,
3094 fibptr,
3095 MAX_CODE_STREAM_IDENTIFIER_LENGTH,
3096 FsaNormal,
3097 1, 1,
3098 NULL,
3099 NULL);
3100
3101 if (rcode >= 0)
3102 memcpy(dev->code_stream_identifier, finfo,
3103 MAX_CODE_STREAM_IDENTIFIER_LENGTH);
3104
3105 if (dev->code_stream_identifier[0]
3106 && strncmp(CODE_STREAM_IDENTIFIER,
3107 dev->code_stream_identifier,
3108 MAX_CODE_STREAM_IDENTIFIER_LENGTH)) {
3109 extern char aac_driver_version[];
3110 printk(KERN_INFO
3111 "%s%d: Warning ! ! ! Compatibility Mismatch\n",
3112 dev->name, dev->id);
3113 tmp = le32_to_cpu(dev->adapter_info.kernelrev);
3114 printk(KERN_INFO
3115 "%s%d: Firmware=%d.%d-%d[%d], Device Driver=%s\n",
3116 dev->name, dev->id,
3117 tmp>>24,(tmp>>16)&0xff,tmp&0xff,
3118 le32_to_cpu(dev->adapter_info.kernelbuild),
3119 aac_driver_version);
3120 printk(KERN_INFO
3121 "%s%d: These should be a tested set to avoid possible compatibility problems.\n",
3122 dev->name, dev->id);
3123 }
3124 }
3125#endif
3126
3127 /* reset all previous mapped devices (i.e. for init. after IOP_RESET) */
3128 for (bus = 0; bus < AAC_MAX_BUSES; bus++)
3129 for (target = 0; target < AAC_MAX_TARGETS; target++) {
3130 dev->hba_map[bus][target].devtype = 0;
3131 dev->hba_map[bus][target].qd_limit = 0;
3132 }
3133
3134 /*
3135 * GetBusInfo
3136 */
3137 aac_fib_init(fibptr);
3138
3139 bus_info = (struct aac_bus_info_response *) fib_data(fibptr);
3140 memset(bus_info, 0, sizeof(*bus_info));
3141 command = (struct aac_bus_info *)bus_info;
3142
3143 command->Command = cpu_to_le32(VM_Ioctl);
3144 command->ObjType = cpu_to_le32(FT_DRIVE);
3145 command->MethodId = cpu_to_le32(1);
3146 command->CtlCmd = cpu_to_le32(GetBusInfo);
3147
3148 rcode = aac_fib_send(ContainerCommand,
3149 fibptr,
3150 sizeof (*bus_info),
3151 FsaNormal,
3152 1, 1,
3153 NULL, NULL);
3154
3155 /* reasoned default */
3156 dev->maximum_num_physicals = 16;
3157 if (rcode >= 0 && le32_to_cpu(bus_info->Status) == ST_OK) {
3158 dev->maximum_num_physicals = le32_to_cpu(bus_info->TargetsPerBus);
3159 dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
3160 }
3161
3162 if (!dev->sync_mode && dev->sa_firmware &&
3163 dev->supplement_adapter_info.VirtDeviceBus != 0xffff) {
3164 /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */
3165 fibsize = sizeof (struct aac_srb) -
3166 sizeof (struct sgentry) + sizeof (struct sgentry64);
3167 datasize = sizeof (struct aac_ciss_phys_luns_resp) +
3168 (AAC_MAX_NATIVE_TARGETS-1) * sizeof (struct _ciss_lun);
3169
3170 phys_luns = (struct aac_ciss_phys_luns_resp *)
3171 pci_alloc_consistent(dev->pdev, datasize, &addr);
3172 if (phys_luns != NULL) {
3173 u32 vbus, vid;
3174 vbus = (u32)le16_to_cpu(
3175 dev->supplement_adapter_info.VirtDeviceBus);
3176 vid = (u32)le16_to_cpu(
3177 dev->supplement_adapter_info.VirtDeviceTarget);
3178
3179 aac_fib_init(fibptr);
3180 srbcmd = (struct aac_srb *) fib_data(fibptr);
3181
3182 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
3183 srbcmd->channel = cpu_to_le32(vbus);
3184 srbcmd->id = cpu_to_le32(vid);
3185 srbcmd->lun = 0;
3186 srbcmd->flags = cpu_to_le32(SRB_DataIn);
3187 srbcmd->timeout = cpu_to_le32(10);
3188 srbcmd->retry_limit = 0;
3189 srbcmd->cdb_size = cpu_to_le32(12);
3190 srbcmd->count = cpu_to_le32(datasize);
3191
3192 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
3193 srbcmd->cdb[0] = CISS_REPORT_PHYSICAL_LUNS;
3194 srbcmd->cdb[1] = 2; /* extended reporting */
3195 srbcmd->cdb[8] = (u8)(datasize>>8);
3196 srbcmd->cdb[9] = (u8)(datasize);
3197
3198 sg64 = (struct sgmap64 *)&srbcmd->sg;
3199 sg64->count = cpu_to_le32(1);
3200 sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16));
3201 sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
3202 sg64->sg[0].count = cpu_to_le32(datasize);
3203
3204 rcode = aac_fib_send(ScsiPortCommand64,
3205 fibptr, fibsize, FsaNormal, 1, 1, NULL, NULL);
3206
3207 /* analyse data */
3208 if (rcode >= 0 && phys_luns->resp_flag == 2) {
3209 /* ok and extended reporting */
3210 u32 lun_count, nexus;
3211 u8 expose_flag, attribs;
3212
3213 lun_count = ((phys_luns->list_length[0]<<24) +
3214 (phys_luns->list_length[1]<<16) +
3215 (phys_luns->list_length[2]<<8) +
3216 (phys_luns->list_length[3])) / 24;
3217 for (i = 0; i < lun_count; ++i) {
3218 bus = phys_luns->lun[i].level2[1] & 0x3f;
3219 target = phys_luns->lun[i].level2[0];
3220 expose_flag = phys_luns->lun[i].bus >> 6;
3221 attribs = phys_luns->lun[i].node_ident[9];
3222 nexus = *((u32 *)&phys_luns->lun[i].node_ident[12]);
3223 if (bus < AAC_MAX_BUSES &&
3224 target < AAC_MAX_TARGETS) {
3225 dev->hba_map[bus][target].expose = expose_flag;
3226 if (expose_flag == 0) {
3227 if (nexus != 0 && (attribs & 8)) {
3228 dev->hba_map[bus][target].devtype =
3229 AAC_DEVTYPE_NATIVE_RAW;
3230 dev->hba_map[bus][target].rmw_nexus =
3231 nexus;
3232 } else {
3233 dev->hba_map[bus][target].devtype =
3234 AAC_DEVTYPE_ARC_RAW;
3235 }
3236 } else {
3237 dev->hba_map[bus][target].devtype =
3238 AAC_DEVTYPE_RAID_MEMBER;
3239 }
3240 if( dev->hba_map[bus][target].devtype == AAC_DEVTYPE_NATIVE_RAW )
3241 if( aac_issue_bmic_identify(dev, bus, target) < 0 )
3242 dev->hba_map[bus][target].qd_limit = 32;
3243 }
3244 }
3245 }
3246 }
3247 pci_free_consistent(dev->pdev, datasize,
3248 (void *)phys_luns, addr);
3249 }
3250
3251 if (!dev->in_reset) {
3252 char buffer[16];
3253 tmp = le32_to_cpu(dev->adapter_info.kernelrev);
3254 printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n",
3255 dev->name,
3256 dev->id,
3257 tmp>>24,
3258 (tmp>>16)&0xff,
3259 tmp&0xff,
3260 le32_to_cpu(dev->adapter_info.kernelbuild),
3261 (int)sizeof(dev->supplement_adapter_info.BuildDate),
3262 dev->supplement_adapter_info.BuildDate);
3263#if (0 && defined(BOOTCD))
3264 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
3265 "%s%d: kernel %d.%d-%d[%d] %.*s",
3266 dev->name, dev->id,
3267 tmp>>24, (tmp>>16)&0xff, tmp&0xff,
3268 le32_to_cpu(dev->adapter_info.kernelbuild),
3269 (int)sizeof(dev->supplement_adapter_info.BuildDate),
3270 dev->supplement_adapter_info.BuildDate));
3271#endif
3272 tmp = le32_to_cpu(dev->adapter_info.monitorrev);
3273 printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n",
3274 dev->name, dev->id,
3275 tmp>>24,(tmp>>16)&0xff,tmp&0xff,
3276 le32_to_cpu(dev->adapter_info.monitorbuild));
3277#if (0 && defined(BOOTCD))
3278 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
3279 "%s%d: monitor %d.%d-%d[%d]",
3280 dev->name, dev->id, tmp>>24,(tmp>>16)&0xff,tmp&0xff,
3281 le32_to_cpu(dev->adapter_info.monitorbuild)));
3282#endif
3283 tmp = le32_to_cpu(dev->adapter_info.biosrev);
3284 printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n",
3285 dev->name, dev->id,
3286 tmp>>24,(tmp>>16)&0xff,tmp&0xff,
3287 le32_to_cpu(dev->adapter_info.biosbuild));
3288#if (0 && defined(BOOTCD))
3289 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
3290 "%s%d: bios %d.%d-%d[%d]",
3291 dev->name, dev->id,
3292 tmp>>24,(tmp>>16)&0xff,tmp&0xff,
3293 le32_to_cpu(dev->adapter_info.biosbuild)));
3294#endif
3295 buffer[0] = '\0';
3296#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))
3297 if (aac_show_serial_number(
3298 shost_to_class(dev->scsi_host_ptr), buffer))
3299#else
3300 if (aac_get_serial_number(
3301 shost_to_class(dev->scsi_host_ptr), buffer))
3302#endif
3303#if (0 && defined(BOOTCD))
3304 {
3305#endif
3306 printk(KERN_INFO "%s%d: serial %s",
3307 dev->name, dev->id, buffer);
3308#if (0 && defined(BOOTCD))
3309 if (nblank(fwprintf(x))) {
3310 char * cp = strchr(buffer, '\n');
3311 if (cp)
3312 *cp = '\0';
3313 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
3314 "%s%d: serial %s",
3315 dev->name, dev->id, buffer));
3316 }
3317 }
3318#endif
3319 if (dev->supplement_adapter_info.VpdInfo.Tsid[0]) {
3320 printk(KERN_INFO "%s%d: TSID %.*s\n",
3321 dev->name, dev->id,
3322 (int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid),
3323 dev->supplement_adapter_info.VpdInfo.Tsid);
3324#if (0 && defined(BOOTCD))
3325 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
3326 "%s%d: TSID %.*s",
3327 dev->name, dev->id,
3328 (int)sizeof(dev->supplement_adapter_info.VpdInfo.Tsid),
3329 dev->supplement_adapter_info.VpdInfo.Tsid));
3330#endif
3331 }
3332 if (!aac_check_reset || ((aac_check_reset == 1) &&
3333 (dev->supplement_adapter_info.SupportedOptions2 &
3334 AAC_OPTION_IGNORE_RESET))) {
3335 printk(KERN_INFO "%s%d: Reset Adapter Ignored\n",
3336 dev->name, dev->id);
3337#if (0 && defined(BOOTCD))
3338 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
3339 "%s%d: Reset Adapter Ignored",
3340 dev->name, dev->id));
3341#endif
3342 }
3343 }
3344#if (!defined(CONFIG_COMMUNITY_KERNEL) && !defined(__VMKLNX30__) && !defined(__VMKLNX__) && ((LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)) || !defined(HAS_BOOT_CONFIG)))
3345#if (((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)) && !defined(HAS_KZALLOC)) || !defined(MODULE))
3346 aacraid = kmalloc(COMMAND_LINE_SIZE, GFP_KERNEL);
3347#else
3348 aacraid = kzalloc(COMMAND_LINE_SIZE, GFP_KERNEL);
3349#endif
3350 if (aacraid) {
3351#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))
3352#if (defined(MODULE))
3353 extern struct proc_dir_entry proc_root;
3354 struct proc_dir_entry * entry;
3355
3356#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)) && !defined(HAS_KZALLOC))
3357 memset(aacraid, 0, COMMAND_LINE_SIZE);
3358#endif
3359 for (entry = proc_root.subdir;
3360 entry != (struct proc_dir_entry *)NULL;
3361 entry = entry->next) {
3362 adbg_setup(dev,KERN_INFO,"\"%.*s\"[%d]=%x ", entry->namelen,
3363 entry->name, entry->namelen, entry->low_ino);
3364 if ((entry->low_ino != 0)
3365 && (entry->namelen == 7)
3366 && (memcmp ("cmdline", entry->name, 7) == 0)) {
3367 adbg_setup(dev,KERN_INFO,"%p->read_proc=%p ", entry, entry->read_proc);
3368 if (entry->read_proc != (int (*)(char *, char **, off_t, int, int *, void *))NULL) {
3369 char * start = aacraid;
3370 int eof;
3371 mm_segment_t fs;
3372
3373 fs = get_fs();
3374 set_fs(get_ds());
3375 lock_kernel();
3376 entry->read_proc(aacraid, &start,
3377 (off_t)0, COMMAND_LINE_SIZE-1, &eof,
3378 NULL);
3379 unlock_kernel();
3380 set_fs(fs);
3381 adbg_setup(dev,KERN_INFO,
3382 "cat /proc/cmdline -> \"%s\"\n",
3383 aacraid);
3384#if (defined(AAC_DEBUG_INSTRUMENT_SETUP) || defined(BOOTCD))
3385 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
3386 "cat /proc/cmdline -> \"%s\"",
3387 aacraid));
3388#endif
3389 }
3390 break;
3391 }
3392 }
3393#else
3394#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,20))
3395 extern char *saved_command_line;
3396#else
3397 extern char saved_command_line[];
3398#endif
3399 memcpy(aacraid, saved_command_line, COMMAND_LINE_SIZE);
3400#endif
3401#endif
3402 }
3403 if (aacraid && aacraid[0])
3404 aacraid_setup(dev,aacraid);
3405#endif
3406#if (defined(AAC_EXTENDED_TIMEOUT))
3407 adbg_setup( dev,KERN_INFO,"nondasd=%d dacmode=%d commit=%d "
3408 "coalescethreshold=%d extendedtimeout=%d\n",
3409 nondasd, dacmode, aac_commit, coalescethreshold,
3410 extendedtimeout);
3411#else
3412 adbg_setup( dev,KERN_INFO,"nondasd=%d dacmode=%d commit=%d "
3413 "coalescethreshold=%d\n",
3414 nondasd, dacmode, aac_commit, coalescethreshold);
3415#endif
3416#if (defined(AAC_DEBUG_INSTRUMENT_SETUP) || (0 && defined(BOOTCD)))
3417#if (defined(AAC_EXTENDED_TIMEOUT))
3418 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
3419 "nondasd=%d dacmode=%d commit=%d "
3420 "coalescethreshold=%d extendedtimeout=%d",
3421 nondasd, dacmode, aac_commit, coalescethreshold,
3422 extendedtimeout));
3423#else
3424 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
3425 "nondasd=%d dacmode=%d commit=%d "
3426 "coalescethreshold=%d",
3427 nondasd, dacmode, aac_commit, coalescethreshold));
3428#endif
3429#endif
3430
3431 dev->cache_protected = 0;
3432 dev->jbod = ((dev->supplement_adapter_info.FeatureBits &
3433 AAC_FEATURE_JBOD) != 0);
3434 dev->nondasd_support = 0;
3435 dev->raid_scsi_mode = 0;
3436 if(dev->adapter_info.options & AAC_OPT_NONDASD)
3437 dev->nondasd_support = 1;
3438
3439 /*
3440 * If the firmware supports ROMB RAID/SCSI mode and we are currently
3441 * in RAID/SCSI mode, set the flag. For now if in this mode we will
3442 * force nondasd support on. If we decide to allow the non-dasd flag
3443 * additional changes changes will have to be made to support
3444 * RAID/SCSI. the function aac_scsi_cmd in this module will have to be
3445 * changed to support the new dev->raid_scsi_mode flag instead of
3446 * leaching off of the dev->nondasd_support flag. Also in linit.c the
3447 * function aac_detect will have to be modified where it sets up the
3448 * max number of channels based on the aac->nondasd_support flag only.
3449 */
3450 if ((dev->adapter_info.options & AAC_OPT_SCSI_MANAGED) &&
3451 (dev->adapter_info.options & AAC_OPT_RAID_SCSI_MODE)) {
3452 dev->nondasd_support = 1;
3453 dev->raid_scsi_mode = 1;
3454 }
3455 if (dev->raid_scsi_mode != 0)
3456 printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n",
3457 dev->name, dev->id);
3458 if (nondasd != -1)
3459 dev->nondasd_support = (nondasd!=0);
3460 if (dev->nondasd_support && !dev->in_reset)
3461#if (0 && defined(BOOTCD))
3462 {
3463#endif
3464 printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id);
3465#if (0 && defined(BOOTCD))
3466 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
3467 "%s%d: Non-DASD support enabled.",dev->name, dev->id));
3468 }
3469#endif
3470
3471#if ((LINUX_VERSION_CODE > KERNEL_VERSION(2,6,7)) && !defined(__VMKLNX__))
3472#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0))
3473 if (dma_get_required_mask(&dev->pdev->dev) > DMA_32BIT_MASK)
3474#else
3475 if (dma_get_required_mask(&dev->pdev->dev) > DMA_BIT_MASK(32))
3476#endif
3477#else
3478 if (num_physpages > (0xFFFFFFFFULL >> PAGE_SHIFT))
3479#endif
3480 dev->needs_dac = 1;
3481 dev->dac_support = 0;
3482#if (defined(CONFIG_COMMUNITY_KERNEL))
3483#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,7))
3484 if ((sizeof(dma_addr_t) > 4) && dev->needs_dac &&
3485 (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)) {
3486#else
3487 if( (sizeof(dma_addr_t) > 4) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){
3488#endif
3489 if (!dev->in_reset)
3490 printk(KERN_INFO "%s%d: 64bit support enabled.\n",
3491 dev->name, dev->id);
3492#else
3493 /*
3494 * Only enable DAC mode if the dma_addr_t is larger than 32
3495 * bit addressing, and we have more than 32 bit addressing worth of
3496 * memory and if the controller supports 64 bit scatter gather elements.
3497 */
3498#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,7))
3499 if ((sizeof(dma_addr_t) > 4) && dev->needs_dac &&
3500 (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)) {
3501#else
3502 if( (sizeof(dma_addr_t) > 4) && (num_physpages > (0xFFFFFFFFULL >> PAGE_SHIFT)) && (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)){
3503#endif
3504#endif
3505 dev->dac_support = 1;
3506 }
3507
3508 if(dacmode != -1) {
3509 dev->dac_support = (dacmode!=0);
3510 }
3511
3512 /* avoid problems with AAC_QUIRK_SCSI_32 controllers */
3513 if (dev->dac_support && (aac_get_driver_ident(dev->cardtype)->quirks
3514 & AAC_QUIRK_SCSI_32)) {
3515 dev->nondasd_support = 0;
3516 dev->jbod = 0;
3517 expose_physicals = 0;
3518 }
3519
3520 if(dev->dac_support != 0) {
3521#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0))
3522 if (!pci_set_dma_mask(dev->pdev, DMA_64BIT_MASK) &&
3523 !pci_set_consistent_dma_mask(dev->pdev, DMA_64BIT_MASK)) {
3524#else
3525 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(64)) &&
3526 !pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(64))) {
3527#endif
3528 if (!dev->in_reset)
3529 printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n",
3530 dev->name, dev->id);
3531#if (0 && defined(BOOTCD))
3532 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
3533 "%s%d: 64 Bit DAC enabled",
3534 dev->name, dev->id));
3535#endif
3536#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0))
3537 } else if (!pci_set_dma_mask(dev->pdev, DMA_32BIT_MASK) &&
3538 !pci_set_consistent_dma_mask(dev->pdev, DMA_32BIT_MASK)) {
3539#else
3540 } else if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(32)) &&
3541 !pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32))) {
3542#endif
3543 printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n",
3544 dev->name, dev->id);
3545#if (0 && defined(BOOTCD))
3546 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
3547 "%s%d: DMA mask set failed, 64 Bit DAC disabled",
3548 dev->name, dev->id));
3549#endif
3550 dev->dac_support = 0;
3551 } else {
3552 printk(KERN_WARNING"%s%d: No suitable DMA available.\n",
3553 dev->name, dev->id);
3554#if (0 && defined(BOOTCD))
3555 fwprintf((dev, HBA_FLAGS_DBG_FW_PRINT_B,
3556 "%s%d: No suitable DMA available.",
3557 dev->name, dev->id));
3558#endif
3559 rcode = -ENOMEM;
3560 }
3561 }
3562 /*
3563 * Deal with configuring for the individualized limits of each packet
3564 * interface.
3565 */
3566#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,18)) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) && (!defined(__arm__)) && defined(CONFIG_HIGHMEM) && ((LINUX_VERSION_CODE != KERNEL_VERSION(2,4,19)) || defined(CONFIG_HIGHIO))
3567 dev->scsi_host_ptr->highmem_io = 1;
3568#endif
3569#if (defined(INITFLAGS_APRE_SUPPORTED) && !defined(CONFIG_COMMUNITY_KERNEL))
3570 // WRONG PLACE FOR THIS CHECK TODAY? BUT WE WILL SURVIVE. MGS
3571 if (dev->comm_interface == AAC_COMM_APRE)
3572 dev->a_ops.adapter_scsi_cmd = aac_scsi_cmd_apre;
3573 else {
3574 dev->a_ops.adapter_scsi_cmd = aac_scsi_cmd;
3575#endif
3576 dev->a_ops.adapter_scsi = (dev->dac_support)
3577 ? ((aac_get_driver_ident(dev->cardtype)->quirks & AAC_QUIRK_SCSI_32)
3578 ? aac_scsi_32_64
3579 : aac_scsi_64)
3580 : aac_scsi_32;
3581 if (dev->raw_io_interface) {
3582 dev->a_ops.adapter_bounds = (dev->raw_io_64)
3583 ? aac_bounds_64
3584 : aac_bounds_32;
3585 dev->a_ops.adapter_read = aac_read_raw_io;
3586 dev->a_ops.adapter_write = aac_write_raw_io;
3587 } else {
3588 dev->a_ops.adapter_bounds = aac_bounds_32;
3589 dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
3590 sizeof(struct aac_fibhdr) -
3591 sizeof(struct aac_write) + sizeof(struct sgentry)) /
3592 sizeof(struct sgentry);
3593 if (dev->dac_support) {
3594 dev->a_ops.adapter_read = aac_read_block64;
3595 dev->a_ops.adapter_write = aac_write_block64;
3596 /*
3597 * 38 scatter gather elements
3598 */
3599 dev->scsi_host_ptr->sg_tablesize =
3600 (dev->max_fib_size -
3601 sizeof(struct aac_fibhdr) -
3602 sizeof(struct aac_write64) +
3603 sizeof(struct sgentry64)) /
3604 sizeof(struct sgentry64);
3605 } else {
3606 dev->a_ops.adapter_read = aac_read_block;
3607 dev->a_ops.adapter_write = aac_write_block;
3608#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,18)) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) && (!defined(__arm__)) && defined(CONFIG_HIGHMEM) && ((LINUX_VERSION_CODE != KERNEL_VERSION(2,4,19)) || defined(CONFIG_HIGHIO))
3609 dev->scsi_host_ptr->highmem_io = 0;
3610#endif
3611 }
3612 dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
3613 if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
3614 /*
3615 * Worst case size that could cause sg overflow when
3616 * we break up SG elements that are larger than 64KB.
3617 * Would be nice if we could tell the SCSI layer what
3618 * the maximum SG element size can be. Worst case is
3619 * (sg_tablesize-1) 4KB elements with one 64KB
3620 * element.
3621 * 32bit -> 468 or 238KB 64bit -> 424 or 212KB
3622 */
3623 dev->scsi_host_ptr->max_sectors =
3624 (dev->scsi_host_ptr->sg_tablesize * 8) + 112;
3625 }
3626 }
3627#if (defined(INITFLAGS_APRE_SUPPORTED) && !defined(CONFIG_COMMUNITY_KERNEL))
3628 }
3629#endif
3630
3631 if (!dev->sync_mode && dev->sa_firmware &&
3632 dev->scsi_host_ptr->sg_tablesize > HBA_MAX_SG_SEPARATE)
3633 dev->scsi_host_ptr->sg_tablesize = dev->sg_tablesize =
3634 HBA_MAX_SG_SEPARATE;
3635
3636 /* FIB should be freed only after getting the response from the F/W */
3637 if (rcode != -ERESTARTSYS) {
3638 aac_fib_complete(fibptr);
3639 aac_fib_free(fibptr);
3640 }
3641
3642 return rcode;
3643}
3644
3645
3646static void io_callback(void *context, struct fib * fibptr)
3647{
3648 struct aac_dev *dev;
3649 struct aac_read_reply *readreply;
3650 struct scsi_cmnd *scsicmd;
3651 u32 cid;
3652
3653 scsicmd = (struct scsi_cmnd *) context;
3654
3655 if (!aac_valid_context(scsicmd, fibptr))
3656 return;
3657
3658 dev = fibptr->dev;
3659 cid = scmd_id(scsicmd);
3660
3661 if (nblank(dprintk(x))) {
3662 u64 lba;
3663 switch (scsicmd->cmnd[0]) {
3664 case WRITE_6:
3665 case READ_6:
3666 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
3667 (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
3668 break;
3669#if ((LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) || defined(WRITE_16))
3670 case WRITE_16:
3671 case READ_16:
3672 lba = ((u64)scsicmd->cmnd[2] << 56) |
3673 ((u64)scsicmd->cmnd[3] << 48) |
3674 ((u64)scsicmd->cmnd[4] << 40) |
3675 ((u64)scsicmd->cmnd[5] << 32) |
3676 ((u64)scsicmd->cmnd[6] << 24) |
3677 (scsicmd->cmnd[7] << 16) |
3678 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
3679 break;
3680#endif
3681 case WRITE_12:
3682 case READ_12:
3683 lba = ((u64)scsicmd->cmnd[2] << 24) |
3684 (scsicmd->cmnd[3] << 16) |
3685 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
3686 break;
3687 default:
3688 lba = ((u64)scsicmd->cmnd[2] << 24) |
3689 (scsicmd->cmnd[3] << 16) |
3690 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
3691 break;
3692 }
3693 printk(KERN_DEBUG
3694 "io_callback[cpu %d]: lba = %llu, t = %ld.\n",
3695 smp_processor_id(), (unsigned long long)lba, jiffies);
3696 }
3697
3698 BUG_ON(fibptr == NULL);
3699
3700#if (!defined(__VMKLNX30__) || defined(__x86_64__))
3701 scsi_dma_unmap(scsicmd);
3702#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
3703 if(!scsi_sg_count(scsicmd) && scsi_bufflen(scsicmd))
3704#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,9)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,13))
3705 pci_unmap_single(dev->pdev, (dma_addr_t)scsicmd->SCp.dma_handle,
3706#else
3707 pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle,
3708#endif
3709 scsicmd->request_bufflen,
3710 scsicmd->sc_data_direction);
3711#endif
3712
3713#endif
3714 readreply = (struct aac_read_reply *)fib_data(fibptr);
3715 switch (le32_to_cpu(readreply->status)) {
3716 case ST_OK:
3717 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3718 SAM_STAT_GOOD;
3719 dev->fsa_dev[cid].sense_data.sense_key = NO_SENSE;
3720 break;
3721 case ST_NOT_READY:
3722 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3723 SAM_STAT_CHECK_CONDITION;
3724 set_sense(&dev->fsa_dev[cid].sense_data, NOT_READY,
3725 SENCODE_BECOMING_READY, ASENCODE_BECOMING_READY, 0, 0);
3726 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
3727 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
3728 SCSI_SENSE_BUFFERSIZE));
3729 break;
3730 case ST_MEDERR:
3731 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3732 SAM_STAT_CHECK_CONDITION;
3733 set_sense(&dev->fsa_dev[cid].sense_data, MEDIUM_ERROR,
3734 SENCODE_UNRECOVERED_READ_ERROR, ASENCODE_NO_SENSE, 0, 0);
3735 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
3736 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
3737 SCSI_SENSE_BUFFERSIZE));
3738 break;
3739 default:
3740#ifdef AAC_DETAILED_STATUS_INFO
3741 printk(KERN_WARNING "io_callback: io failed, status = %d\n",
3742 le32_to_cpu(readreply->status));
3743#endif
3744 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3745 SAM_STAT_CHECK_CONDITION;
3746 set_sense(&dev->fsa_dev[cid].sense_data,
3747 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
3748 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
3749 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
3750 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
3751 SCSI_SENSE_BUFFERSIZE));
3752 break;
3753 }
3754 aac_fib_complete(fibptr);
3755
3756#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
3757 aac_fib_free_tag(fibptr);
3758#else
3759 aac_fib_free(fibptr);
3760#endif
3761
3762#if (defined(AAC_DEBUG_INSTRUMENT_TIMING) || defined(AAC_DEBUG_INSTRUMENT_CONTEXT))
3763 aac_io_done(scsicmd);
3764#else
3765 scsicmd->scsi_done(scsicmd);
3766#endif
3767#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3768 if (scsicmd->device->device_blocked) {
3769 struct scsi_cmnd * cmd;
3770 cid = 0;
3771
3772 for (cmd = scsicmd->device->device_queue; cmd; cmd = cmd->next)
3773 if (cmd->serial_number)
3774 ++cid;
3775 if (cid < scsicmd->device->queue_depth)
3776 scsicmd->device->device_blocked = 0;
3777 }
3778#endif
3779}
3780#if (!defined(CONFIG_COMMUNITY_KERNEL))
3781
3782static inline void aac_select_queue_depth(
3783 struct scsi_cmnd * scsicmd,
3784 u64 lba,
3785 u32 count)
3786{
3787 struct scsi_device *device = scsicmd->device;
3788 struct aac_dev *dev;
3789 unsigned depth;
3790 int cid;
3791
3792#if (1 || defined(__VMKERNEL_MODULE__) || defined(__VMKLNX30__) || defined(__VMKLNX__))
3793 /*-
3794 this routine is only used to try to back up
3795 sequential IO into linux's scsi_merge layer
3796 in an attempt to coalesce them. vmkernel
3797 doesn't do this, and in fact changing the
3798 queue_depth like this seems to tickle a bug
3799 in our scsi layer where we don't reschedule
3800 the IOs in a timely fashion.
3801 -gmccready@vmware.com
3802 */
3803 return;
3804#endif
3805 if (coalescethreshold == 0)
3806 return;
3807#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
3808 if (!device->tagged_supported)
3809 return;
3810#endif
3811 if (
3812#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)) || defined(SCSI_HAS_SHOST_STATE_ENUM))
3813 SHOST_RECOVERY == device->host->shost_state
3814#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
3815 test_bit(SHOST_RECOVERY, &device->host->shost_state)
3816#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
3817 test_bit(SHOST_RECOVERY, &device->host->shost_state) ||
3818 device->host->eh_active
3819#else
3820 device->host->in_recovery || device->host->eh_active
3821#endif
3822 )
3823 return;
3824 dev = (struct aac_dev *)device->host->hostdata;
3825 cid = scmd_id(scsicmd);
3826 if (dev->fsa_dev[cid].queue_depth <= 2)
3827 dev->fsa_dev[cid].queue_depth = device->queue_depth;
3828 if (lba == dev->fsa_dev[cid].last) {
3829#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3830 struct scsi_cmnd * cmd;
3831#endif
3832 /*
3833 * If larger than coalescethreshold in size, coalescing has
3834 * less effect on overall performance. Also, if we are
3835 * coalescing right now, leave it alone if above the threshold.
3836 */
3837 if (count > coalescethreshold)
3838 return;
3839#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3840 depth = 0;
3841
3842 for (cmd = device->device_queue; cmd; cmd = cmd->next)
3843 if ((cmd->serial_number)
3844 && (cmd != scsicmd)
3845 && (++depth > 1)) {
3846 device->device_blocked = 1;
3847 break;
3848 }
3849#endif
3850 depth = 2;
3851 } else {
3852#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
3853 device->device_blocked = 0;
3854#endif
3855 depth = dev->fsa_dev[cid].queue_depth;
3856 }
3857#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
3858#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0))
3859 scsi_adjust_queue_depth(device, MSG_ORDERED_TAG, depth);
3860#else
3861 scsi_change_queue_depth(device, depth);
3862#endif
3863#else
3864 device->queue_depth = depth;
3865#endif
3866#if (defined(INITFLAGS_APRE_SUPPORTED) && !defined(CONFIG_COMMUNITY_KERNEL))
3867 dprintk((KERN_DEBUG "l=%llu %llu[%u] q=%u %lu\n",
3868 dev->fsa_dev[cid].last, lba, count, device->queue_depth,
3869 (unsigned long)(atomic_read(&dev->queues->queue[
3870 (dev->comm_interface == AAC_COMM_APRE)
3871 ? ApreCmdQueue
3872 : AdapNormCmdQueue
3873 ].numpending))));
3874#else
3875 dprintk((KERN_DEBUG "l=%llu %llu[%u] q=%u %lu\n",
3876 dev->fsa_dev[cid].last, lba, count, device->queue_depth,
3877 (unsigned long)(atomic_read(&dev->queues->queue[AdapNormCmdQueue].numpending))));
3878#endif
3879 dev->fsa_dev[cid].last = lba + count;
3880}
3881#endif
3882
3883static int aac_read(struct scsi_cmnd * scsicmd)
3884{
3885 u64 lba;
3886 u32 count;
3887 int status;
3888 struct aac_dev *dev;
3889 struct fib * cmd_fibcontext;
3890
3891 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
3892 /*
3893 * Get block address and transfer length
3894 */
3895#if (defined(AAC_DEBUG_INSTRUMENT_IO))
3896 printk(KERN_DEBUG "aac_read: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
3897 scsicmd->cmnd[0], scsicmd->cmnd[1], scsicmd->cmnd[2],
3898 scsicmd->cmnd[3], scsicmd->cmnd[4], scsicmd->cmnd[5],
3899 scsicmd->cmnd[6], scsicmd->cmnd[7], scsicmd->cmnd[8],
3900 scsicmd->cmnd[9], scsicmd->cmnd[10], scsicmd->cmnd[11],
3901 scsicmd->cmnd[12], scsicmd->cmnd[13], scsicmd->cmnd[14],
3902 scsicmd->cmnd[15]);
3903#endif
3904#if 1 || defined(__powerpc__) || defined(__PPC__) || defined(__ppc__)
3905 /*Todo:
3906 * Temparory fix to prevent EEH error on account of hotplug.
3907 * Driver needs to read memory that it writes in case of error
3908 * permission to read a write only memory. This is a temp fix
3909 *
3910 * until the patch that gives permission to write comamnds is
3911 * accepted by kernel.org
3912 *
3913 * Also used for DMAR access in RHEL 6.5 (Inspur)
3914 * */
3915 scsicmd->sc_data_direction = DMA_BIDIRECTIONAL;
3916#endif
3917
3918 switch (scsicmd->cmnd[0]) {
3919 case READ_6:
3920 dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", scmd_id(scsicmd)));
3921
3922 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
3923 (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
3924 count = scsicmd->cmnd[4];
3925
3926 if (count == 0)
3927 count = 256;
3928 break;
3929#if ((LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) || defined(READ_16))
3930 case READ_16:
3931 dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", scmd_id(scsicmd)));
3932
3933 lba = ((u64)scsicmd->cmnd[2] << 56) |
3934 ((u64)scsicmd->cmnd[3] << 48) |
3935 ((u64)scsicmd->cmnd[4] << 40) |
3936 ((u64)scsicmd->cmnd[5] << 32) |
3937 ((u64)scsicmd->cmnd[6] << 24) |
3938 (scsicmd->cmnd[7] << 16) |
3939 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
3940 count = (scsicmd->cmnd[10] << 24) |
3941 (scsicmd->cmnd[11] << 16) |
3942 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
3943 break;
3944#endif
3945 case READ_12:
3946 dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", scmd_id(scsicmd)));
3947
3948 lba = ((u64)scsicmd->cmnd[2] << 24) |
3949 (scsicmd->cmnd[3] << 16) |
3950 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
3951 count = (scsicmd->cmnd[6] << 24) |
3952 (scsicmd->cmnd[7] << 16) |
3953 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
3954 break;
3955 default:
3956 dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", scmd_id(scsicmd)));
3957
3958 lba = ((u64)scsicmd->cmnd[2] << 24) |
3959 (scsicmd->cmnd[3] << 16) |
3960 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
3961 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
3962 break;
3963 }
3964/* ADPml11898 SUNMR Spitfire issue
3965 * FW layer exposes lesser container capacity than the actual one
3966 * It exposes [Actaul size - Spitfire space(10MB)] to the OS, IO's to the 10MB should be prohibhited from the Linux driver
3967 * Sensekey sets to HARDWARE_ERROR and sending the notification to the MID layer
3968 */
3969
3970if(expose_hidden_space <= 0) {
3971 if((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
3972 int cid = scmd_id(scsicmd);
3973 dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
3974 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
3975 SAM_STAT_CHECK_CONDITION;
3976 set_sense(&dev->fsa_dev[cid].sense_data,
3977 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
3978 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
3979 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
3980 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
3981 SCSI_SENSE_BUFFERSIZE));
3982#if (defined(AAC_DEBUG_INSTRUMENT_TIMING) || defined(AAC_DEBUG_INSTRUMENT_CONTEXT))
3983 __aac_io_done(scsicmd);
3984#else
3985 scsicmd->scsi_done(scsicmd);
3986#endif
3987 return 1;
3988 }
3989}
3990
3991 dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
3992 smp_processor_id(), (unsigned long long)lba, jiffies));
3993 if (aac_adapter_bounds(dev,scsicmd,lba))
3994 return 0;
3995#if (!defined(CONFIG_COMMUNITY_KERNEL))
3996 /*
3997 * Are we in a sequential mode?
3998 */
3999 aac_select_queue_depth(scsicmd, lba, count);
4000#endif
4001 /*
4002 * Alocate and initialize a Fib
4003 */
4004#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
4005 if (!(cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd))) {
4006#else
4007 if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
4008#endif
4009 return -1;
4010 }
4011
4012 status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count);
4013
4014 /*
4015 * Check that the command queued to the controller
4016 */
4017 if (status == -EINPROGRESS) {
4018 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
4019 return 0;
4020 }
4021
4022 printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status);
4023 /*
4024 * For some reason, the Fib didn't queue, return QUEUE_FULL
4025 */
4026 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
4027#if (defined(AAC_DEBUG_INSTRUMENT_TIMING) || defined(AAC_DEBUG_INSTRUMENT_CONTEXT))
4028 aac_io_done(scsicmd);
4029#else
4030 scsicmd->scsi_done(scsicmd);
4031#endif
4032 aac_fib_complete(cmd_fibcontext);
4033 aac_fib_free(cmd_fibcontext);
4034 return 0;
4035}
4036
4037static int aac_write(struct scsi_cmnd * scsicmd)
4038{
4039 u64 lba;
4040 u32 count;
4041 int fua;
4042 int status;
4043 struct aac_dev *dev;
4044 struct fib * cmd_fibcontext;
4045
4046 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
4047 /*
4048 * Get block address and transfer length
4049 */
4050#if (defined(AAC_DEBUG_INSTRUMENT_IO))
4051 printk(KERN_DEBUG "aac_write: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
4052 scsicmd->cmnd[0], scsicmd->cmnd[1], scsicmd->cmnd[2],
4053 scsicmd->cmnd[3], scsicmd->cmnd[4], scsicmd->cmnd[5],
4054 scsicmd->cmnd[6], scsicmd->cmnd[7], scsicmd->cmnd[8],
4055 scsicmd->cmnd[9], scsicmd->cmnd[10], scsicmd->cmnd[11],
4056 scsicmd->cmnd[12], scsicmd->cmnd[13], scsicmd->cmnd[14],
4057 scsicmd->cmnd[15]);
4058#endif
4059 if (scsicmd->cmnd[0] == WRITE_6) /* 6 byte command */
4060 {
4061 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
4062 count = scsicmd->cmnd[4];
4063 if (count == 0)
4064 count = 256;
4065 fua = 0;
4066#if ((LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) || defined(WRITE_16))
4067 } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
4068 dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", scmd_id(scsicmd)));
4069
4070 lba = ((u64)scsicmd->cmnd[2] << 56) |
4071 ((u64)scsicmd->cmnd[3] << 48) |
4072 ((u64)scsicmd->cmnd[4] << 40) |
4073 ((u64)scsicmd->cmnd[5] << 32) |
4074 ((u64)scsicmd->cmnd[6] << 24) |
4075 (scsicmd->cmnd[7] << 16) |
4076 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
4077 count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
4078 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
4079 fua = scsicmd->cmnd[1] & 0x8;
4080#endif
4081 } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */
4082 dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", scmd_id(scsicmd)));
4083
4084 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16)
4085 | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
4086 count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16)
4087 | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
4088 fua = scsicmd->cmnd[1] & 0x8;
4089 } else {
4090 dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", scmd_id(scsicmd)));
4091 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
4092 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
4093 fua = scsicmd->cmnd[1] & 0x8;
4094 }
4095
4096/* ADPml11898 SUNMR Spitfire issue
4097 * FW layer exposes lesser container capacity than the actual one
4098 * It exposes [Actaul size - Spitfire space(10MB)] to the OS, IO's to the 10MB should be prohibhited from the Linux driver
4099 * Sensekey sets to HARDWARE_ERROR and sending the notification to the MID layer
4100 */
4101if(expose_hidden_space <= 0) {
4102 if((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
4103 int cid = scmd_id(scsicmd);
4104 dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
4105 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
4106 SAM_STAT_CHECK_CONDITION;
4107 set_sense(&dev->fsa_dev[cid].sense_data,
4108 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
4109 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
4110 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
4111 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
4112 SCSI_SENSE_BUFFERSIZE));
4113#if (defined(AAC_DEBUG_INSTRUMENT_TIMING) || defined(AAC_DEBUG_INSTRUMENT_CONTEXT))
4114 __aac_io_done(scsicmd);
4115#else
4116 scsicmd->scsi_done(scsicmd);
4117#endif
4118 return 1;
4119 }
4120}
4121 dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
4122 smp_processor_id(), (unsigned long long)lba, jiffies));
4123 if (aac_adapter_bounds(dev,scsicmd,lba))
4124 return 0;
4125#if (!defined(CONFIG_COMMUNITY_KERNEL))
4126 /*
4127 * Are we in a sequential mode?
4128 */
4129 aac_select_queue_depth(scsicmd, lba, count);
4130#endif
4131 /*
4132 * Allocate and initialize a Fib then setup a BlockWrite command
4133 */
4134#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
4135 if (!(cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd))) {
4136#else
4137 if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
4138#endif
4139 return -1;
4140 }
4141
4142 status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
4143
4144 /*
4145 * Check that the command queued to the controller
4146 */
4147 if (status == -EINPROGRESS) {
4148 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
4149 return 0;
4150 }
4151
4152 printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status);
4153 /*
4154 * For some reason, the Fib didn't queue, return QUEUE_FULL
4155 */
4156 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
4157#if (defined(AAC_DEBUG_INSTRUMENT_TIMING) || defined(AAC_DEBUG_INSTRUMENT_CONTEXT))
4158 aac_io_done(scsicmd);
4159#else
4160 scsicmd->scsi_done(scsicmd);
4161#endif
4162
4163 aac_fib_complete(cmd_fibcontext);
4164 aac_fib_free(cmd_fibcontext);
4165 return 0;
4166}
4167
4168static void synchronize_callback(void *context, struct fib *fibptr)
4169{
4170 struct aac_synchronize_reply *synchronizereply;
4171 struct scsi_cmnd *cmd;
4172
4173 cmd = context;
4174
4175 if (!aac_valid_context(cmd, fibptr))
4176 return;
4177
4178 dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n",
4179 smp_processor_id(), jiffies));
4180 BUG_ON(fibptr == NULL);
4181
4182
4183 synchronizereply = fib_data(fibptr);
4184 if (le32_to_cpu(synchronizereply->status) == CT_OK)
4185 cmd->result = DID_OK << 16 |
4186 COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
4187 else {
4188 struct scsi_device *sdev = cmd->device;
4189 struct aac_dev *dev = fibptr->dev;
4190 u32 cid = sdev_id(sdev);
4191 printk(KERN_WARNING
4192 "synchronize_callback: synchronize failed, status = %d\n",
4193 le32_to_cpu(synchronizereply->status));
4194 cmd->result = DID_OK << 16 |
4195 COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
4196 set_sense(&dev->fsa_dev[cid].sense_data,
4197 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
4198 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
4199 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
4200 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
4201 SCSI_SENSE_BUFFERSIZE));
4202 }
4203
4204 aac_fib_complete(fibptr);
4205 aac_fib_free(fibptr);
4206#if (defined(AAC_DEBUG_INSTRUMENT_TIMING) || defined(AAC_DEBUG_INSTRUMENT_CONTEXT))
4207 aac_io_done(cmd);
4208#else
4209 cmd->scsi_done(cmd);
4210#endif
4211}
4212
4213static int aac_synchronize(struct scsi_cmnd *scsicmd)
4214{
4215 int status;
4216 struct fib *cmd_fibcontext;
4217 struct aac_synchronize *synchronizecmd;
4218 struct scsi_cmnd *cmd;
4219 struct scsi_device *sdev = scsicmd->device;
4220 int active = 0;
4221 struct aac_dev *aac;
4222 u64 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) |
4223 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
4224 u32 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
4225#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
4226 unsigned long flags;
4227
4228#if (defined(AAC_DEBUG_INSTRUMENT_SYNCHRONIZE))
4229 printk(KERN_INFO "aac_synchronize(%p={.lba=%llu,.count=%lu})\n",
4230 scsicmd, (unsigned long long)lba, (unsigned long)count);
4231#endif
4232 /*
4233 * Wait for all outstanding queued commands to complete to this
4234 * specific target (block).
4235 */
4236 spin_lock_irqsave(&sdev->list_lock, flags);
4237 list_for_each_entry(cmd, &sdev->cmd_list, list)
4238#else
4239#if (defined(AAC_DEBUG_INSTRUMENT_SYNCHRONIZE))
4240 printk(KERN_INFO "aac_synchronize(%p={.lba=%llu,.count=%lu})\n",
4241 scsicmd, (unsigned long long)lba, (unsigned long)count);
4242#endif
4243 for(cmd = sdev->device_queue; cmd; cmd = cmd->next)
4244#endif
4245#if (defined(AAC_DEBUG_INSTRUMENT_SYNCHRONIZE))
4246 {
4247 printk(KERN_INFO "%p={.SCp.phase=%x,.cmnd[0]=%u,",
4248 cmd, (unsigned)cmd->SCp.phase, cmd->cmnd[0]);
4249#endif
4250 if (cmd->SCp.phase == AAC_OWNER_FIRMWARE) {
4251 u64 cmnd_lba;
4252 u32 cmnd_count;
4253
4254 if (cmd->cmnd[0] == WRITE_6) {
4255 cmnd_lba = ((cmd->cmnd[1] & 0x1F) << 16) |
4256 (cmd->cmnd[2] << 8) |
4257 cmd->cmnd[3];
4258 cmnd_count = cmd->cmnd[4];
4259 if (cmnd_count == 0)
4260 cmnd_count = 256;
4261#if ((LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) || defined(WRITE_16))
4262 } else if (cmd->cmnd[0] == WRITE_16) {
4263 cmnd_lba = ((u64)cmd->cmnd[2] << 56) |
4264 ((u64)cmd->cmnd[3] << 48) |
4265 ((u64)cmd->cmnd[4] << 40) |
4266 ((u64)cmd->cmnd[5] << 32) |
4267 ((u64)cmd->cmnd[6] << 24) |
4268 (cmd->cmnd[7] << 16) |
4269 (cmd->cmnd[8] << 8) |
4270 cmd->cmnd[9];
4271 cmnd_count = (cmd->cmnd[10] << 24) |
4272 (cmd->cmnd[11] << 16) |
4273 (cmd->cmnd[12] << 8) |
4274 cmd->cmnd[13];
4275#endif
4276 } else if (cmd->cmnd[0] == WRITE_12) {
4277 cmnd_lba = ((u64)cmd->cmnd[2] << 24) |
4278 (cmd->cmnd[3] << 16) |
4279 (cmd->cmnd[4] << 8) |
4280 cmd->cmnd[5];
4281 cmnd_count = (cmd->cmnd[6] << 24) |
4282 (cmd->cmnd[7] << 16) |
4283 (cmd->cmnd[8] << 8) |
4284 cmd->cmnd[9];
4285 } else if (cmd->cmnd[0] == WRITE_10) {
4286 cmnd_lba = ((u64)cmd->cmnd[2] << 24) |
4287 (cmd->cmnd[3] << 16) |
4288 (cmd->cmnd[4] << 8) |
4289 cmd->cmnd[5];
4290 cmnd_count = (cmd->cmnd[7] << 8) |
4291 cmd->cmnd[8];
4292 } else
4293#if (defined(AAC_DEBUG_INSTRUMENT_SYNCHRONIZE))
4294 {
4295 printk("}\n");
4296 continue;
4297 }
4298 printk(".lba=%llu,.count=%lu,",
4299 (unsigned long long)cmnd_lba,
4300 (unsigned long)cmnd_count);
4301#else
4302 continue;
4303#endif
4304 if (((cmnd_lba + cmnd_count) < lba) ||
4305 (count && ((lba + count) < cmnd_lba)))
4306#if (defined(AAC_DEBUG_INSTRUMENT_SYNCHRONIZE))
4307 {
4308 printk("}\n");
4309 continue;
4310 }
4311 printk(".active}\n");
4312#else
4313 continue;
4314#endif
4315 ++active;
4316 break;
4317 }
4318#if (defined(AAC_DEBUG_INSTRUMENT_SYNCHRONIZE))
4319 printk("}\n");
4320 }
4321#endif
4322#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
4323
4324 spin_unlock_irqrestore(&sdev->list_lock, flags);
4325#endif
4326
4327 /*
4328 * Yield the processor (requeue for later)
4329 */
4330 if (active)
4331#if (defined(AAC_DEBUG_INSTRUMENT_SYNCHRONIZE))
4332 {
4333 printk(KERN_INFO "aac_synchronize ACTIVE!\n");
4334 return SCSI_MLQUEUE_DEVICE_BUSY;
4335 }
4336#else
4337 return SCSI_MLQUEUE_DEVICE_BUSY;
4338#endif
4339
4340 aac = (struct aac_dev *)sdev->host->hostdata;
4341 if (aac->in_reset)
4342#if (defined(AAC_DEBUG_INSTRUMENT_SYNCHRONIZE))
4343 {
4344 printk(KERN_INFO "aac_synchronize RESET!\n");
4345 return SCSI_MLQUEUE_HOST_BUSY;
4346 }
4347#else
4348 return SCSI_MLQUEUE_HOST_BUSY;
4349#endif
4350
4351#if (defined(AAC_DEBUG_INSTRUMENT_SYNCHRONIZE))
4352 printk(KERN_INFO "aac_synchronize START\n");
4353#elif (defined(AAC_DEBUG_INSTRUMENT_IO))
4354 printk(KERN_DEBUG "aac_synchronize[cpu %d]: t = %ld.\n",
4355 smp_processor_id(), jiffies);
4356#endif
4357 /*
4358 * Allocate and initialize a Fib
4359 */
4360 if (!(cmd_fibcontext = aac_fib_alloc(aac)))
4361#if (defined(AAC_DEBUG_INSTRUMENT_SYNCHRONIZE))
4362 {
4363 printk(KERN_INFO "aac_synchronize ALLOC!\n");
4364 return SCSI_MLQUEUE_HOST_BUSY;
4365 }
4366#else
4367 return SCSI_MLQUEUE_HOST_BUSY;
4368#endif
4369
4370 aac_fib_init(cmd_fibcontext);
4371
4372 synchronizecmd = fib_data(cmd_fibcontext);
4373 synchronizecmd->command = cpu_to_le32(VM_ContainerConfig);
4374 synchronizecmd->type = cpu_to_le32(CT_FLUSH_CACHE);
4375 synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd));
4376 synchronizecmd->count =
4377 cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));
4378
4379 /*
4380 * Now send the Fib to the adapter
4381 */
4382 status = aac_fib_send(ContainerCommand,
4383 cmd_fibcontext,
4384 sizeof(struct aac_synchronize),
4385 FsaNormal,
4386 0, 1,
4387 (fib_callback)synchronize_callback,
4388 (void *)scsicmd);
4389
4390 /*
4391 * Check that the command queued to the controller
4392 */
4393 if (status == -EINPROGRESS) {
4394 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
4395 return 0;
4396 }
4397
4398 printk(KERN_WARNING
4399 "aac_synchronize: aac_fib_send failed with status: %d.\n", status);
4400 aac_fib_complete(cmd_fibcontext);
4401 aac_fib_free(cmd_fibcontext);
4402 return SCSI_MLQUEUE_HOST_BUSY;
4403}
4404
4405static void aac_start_stop_callback(void *context, struct fib *fibptr)
4406{
4407 struct scsi_cmnd *scsicmd = context;
4408
4409 if (!aac_valid_context(scsicmd, fibptr))
4410 return;
4411
4412 BUG_ON(fibptr == NULL);
4413
4414 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
4415
4416 aac_fib_complete(fibptr);
4417 aac_fib_free(fibptr);
4418#if (defined(AAC_DEBUG_INSTRUMENT_TIMING) || defined(AAC_DEBUG_INSTRUMENT_CONTEXT))
4419 aac_io_done(scsicmd);
4420#else
4421 scsicmd->scsi_done(scsicmd);
4422#endif
4423}
4424
4425static int aac_start_stop(struct scsi_cmnd *scsicmd)
4426{
4427 int status;
4428 struct fib *cmd_fibcontext;
4429 struct aac_power_management *pmcmd;
4430 struct scsi_device *sdev = scsicmd->device;
4431 struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
4432
4433 if (!(aac->supplement_adapter_info.SupportedOptions2 &
4434 AAC_OPTION_POWER_MANAGEMENT)) {
4435 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
4436 SAM_STAT_GOOD;
4437#if (defined(AAC_DEBUG_INSTRUMENT_TIMING) || defined(AAC_DEBUG_INSTRUMENT_CONTEXT))
4438 __aac_io_done(scsicmd);
4439#else
4440 scsicmd->scsi_done(scsicmd);
4441#endif
4442 return 0;
4443 }
4444
4445 if (aac->in_reset)
4446 return SCSI_MLQUEUE_HOST_BUSY;
4447
4448 /*
4449 * Allocate and initialize a Fib
4450 */
4451#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
4452 cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd);
4453#else
4454 cmd_fibcontext = aac_fib_alloc(aac);
4455#endif
4456 if (!cmd_fibcontext)
4457 return SCSI_MLQUEUE_HOST_BUSY;
4458
4459 aac_fib_init(cmd_fibcontext);
4460
4461 pmcmd = fib_data(cmd_fibcontext);
4462 pmcmd->command = cpu_to_le32(VM_ContainerConfig);
4463 pmcmd->type = cpu_to_le32(CT_POWER_MANAGEMENT);
4464 /* Eject bit ignored, not relevant */
4465 pmcmd->sub = (scsicmd->cmnd[4] & 1) ?
4466 cpu_to_le32(CT_PM_START_UNIT) : cpu_to_le32(CT_PM_STOP_UNIT);
4467 pmcmd->cid = cpu_to_le32(sdev_id(sdev));
4468 pmcmd->parm = (scsicmd->cmnd[1] & 1) ?
4469 cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0;
4470
4471 /*
4472 * Now send the Fib to the adapter
4473 */
4474 status = aac_fib_send(ContainerCommand,
4475 cmd_fibcontext,
4476 sizeof(struct aac_power_management),
4477 FsaNormal,
4478 0, 1,
4479 (fib_callback)aac_start_stop_callback,
4480 (void *)scsicmd);
4481
4482 /*
4483 * Check that the command queued to the controller
4484 */
4485 if (status == -EINPROGRESS) {
4486 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
4487 return 0;
4488 }
4489
4490 aac_fib_complete(cmd_fibcontext);
4491 aac_fib_free(cmd_fibcontext);
4492 return SCSI_MLQUEUE_HOST_BUSY;
4493}
4494#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
4495
4496static inline void get_sd_devname(int disknum, char *buffer)
4497{
4498 if (disknum < 0) {
4499 buffer[0] = '\0';
4500 return;
4501 }
4502
4503 buffer[0] = 's';
4504 buffer[1] = 'd';
4505 if (disknum < 26) {
4506 buffer[2] = 'a' + disknum;
4507 buffer[3] = '\0';
4508 } else {
4509 /*
4510 * For larger numbers of disks, we need to go to a new
4511 * naming scheme.
4512 */
4513 buffer[2] = 'a' - 1 + (disknum / 26);
4514 buffer[3] = 'a' + (disknum % 26);
4515 buffer[4] = '\0';
4516 }
4517}
4518
4519# define strlcpy(s1,s2,n) strncpy(s1,s2,n);s1[n-1]='\0'
4520#endif
4521
4522/**
4523 * aac_scsi_cmd() - Process SCSI command
4524 * @scsicmd: SCSI command block
4525 *
4526 * Emulate a SCSI command and queue the required request for the
4527 * aacraid firmware.
4528 */
4529
4530#if (defined(INITFLAGS_APRE_SUPPORTED) && !defined(CONFIG_COMMUNITY_KERNEL))
4531static
4532#endif
4533int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
4534{
4535 u32 cid, bus;
4536 struct Scsi_Host *host = scsicmd->device->host;
4537 struct aac_dev *dev = (struct aac_dev *)host->hostdata;
4538 struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
4539
4540 if (fsa_dev_ptr == NULL)
4541 return -1;
4542#if (defined(AAC_DEBUG_INSTRUMENT_2TB))
4543 printk(KERN_NOTICE "scsicmd->cmnd={%02x %02x %02x %02x %02x "
4544 "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x}\n",
4545 scsicmd->cmnd[0], scsicmd->cmnd[1], scsicmd->cmnd[2],
4546 scsicmd->cmnd[3], scsicmd->cmnd[4], scsicmd->cmnd[5],
4547 scsicmd->cmnd[6], scsicmd->cmnd[7], scsicmd->cmnd[8],
4548 scsicmd->cmnd[9], scsicmd->cmnd[10], scsicmd->cmnd[11],
4549 scsicmd->cmnd[12], scsicmd->cmnd[13], scsicmd->cmnd[14],
4550 scsicmd->cmnd[15]);
4551#endif
4552 /*
4553 * If the bus, id or lun is out of range, return fail
4554 * Test does not apply to ID 16, the pseudo id for the controller
4555 * itself.
4556 */
4557 cid = scmd_id(scsicmd);
4558 if (cid != host->this_id) {
4559 if (scmd_channel(scsicmd) == CONTAINER_CHANNEL) {
4560 if((cid >= dev->maximum_num_containers) ||
4561 (scsicmd->device->lun != 0)) {
4562#if (defined(AAC_DEBUG_INSTRUMENT_2TB))
4563 printk(KERN_INFO
4564 "scsicmd(0:%d:%d:0) No Connect\n",
4565 scmd_channel(scsicmd), cid);
4566#endif
4567 scsicmd->result = DID_NO_CONNECT << 16;
4568 goto scsi_done_ret;
4569 }
4570
4571 /*
4572 * If the target container doesn't exist, it may have
4573 * been newly created
4574 */
4575 if (((fsa_dev_ptr[cid].valid & 1) == 0) ||
4576 (fsa_dev_ptr[cid].sense_data.sense_key ==
4577 NOT_READY)) {
4578 switch (scsicmd->cmnd[0]) {
4579#if ((LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) || defined(SERVICE_ACTION_IN))
4580 case SERVICE_ACTION_IN:
4581 if (!(dev->raw_io_interface) ||
4582 !(dev->raw_io_64) ||
4583 ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
4584 break;
4585#endif
4586 case INQUIRY:
4587 case READ_CAPACITY:
4588 case TEST_UNIT_READY:
4589 if (dev->in_reset)
4590 return -1;
4591 return _aac_probe_container(scsicmd,
4592 aac_probe_container_callback2);
4593 default:
4594 break;
4595 }
4596 }
4597 } else { /* check for physical non-dasd devices */
4598#if (defined(AAC_DEBUG_INSTRUMENT_2TB))
4599 printk(KERN_INFO "scsicmd(0:%d:%d:0) Phys\n",
4600 scmd_channel(scsicmd), cid);
4601#endif
4602#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
4603 /* ADPml05517 */
4604 /*
4605 * If this is a test unit ready and there is already
4606 * a long command outstanding, we will assume a
4607 * sequentially queued device and report back that
4608 * this needs a retry.
4609 */
4610 if (scsicmd->cmnd[0] == TEST_UNIT_READY) {
4611 struct scsi_cmnd * command;
4612#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
4613 unsigned long flags;
4614 spin_lock_irqsave(&scsicmd->device->list_lock,
4615 flags);
4616 list_for_each_entry(command,
4617 &scsicmd->device->cmd_list, list)
4618#else
4619 for(command = scsicmd->device->device_queue;
4620 command; command = command->next)
4621#endif
4622 {
4623 if (command == scsicmd)
4624 continue;
4625#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12))
4626 if ((command->state == SCSI_STATE_FINISHED)
4627 || (command->state == 0))
4628 continue;
4629#endif
4630#if (1 || (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)))
4631 if (command->timeout_per_command
4632 <= scsicmd->timeout_per_command)
4633 continue;
4634#else
4635 if (command->request->timeout
4636 <= scsicmd->request->timeout)
4637 continue;
4638#endif
4639#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
4640 spin_unlock_irqrestore(
4641 &scsicmd->device->list_lock,
4642 flags);
4643#endif
4644#if ((LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10)) || defined(DID_BUS_BUSY))
4645 scsicmd->result = DID_BUS_BUSY << 16 |
4646 COMMAND_COMPLETE << 8;
4647#else
4648 scsicmd->result = DID_OK << 16
4649 | COMMAND_COMPLETE << 8
4650 | SAM_STAT_CHECK_CONDITION;
4651 set_sense(
4652 &dev->fsa_dev[cid].sense_data,
4653 ABORTED_COMMAND, 0, 0, 0, 0);
4654 memcpy(scsicmd->sense_buffer,
4655 &dev->fsa_dev[cid].sense_data,
4656 min_t(size_t, sizeof(
4657 dev->fsa_dev[cid].sense_data),
4658 SCSI_SENSE_BUFFERSIZE));
4659#endif
4660 goto scsi_done_ret;
4661 }
4662#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
4663 spin_unlock_irqrestore(
4664 &scsicmd->device->list_lock,
4665 flags);
4666#endif
4667 }
4668#endif
4669 bus = aac_logical_to_phys(scmd_channel(scsicmd));
4670 if( bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
4671 ( dev->hba_map[bus][cid].expose == AAC_HIDE_DISK)){
4672 if( scsicmd->cmnd[0] == INQUIRY){
4673 scsicmd->result = DID_NO_CONNECT << 16;
4674 goto scsi_done_ret;
4675 }
4676 }
4677
4678 if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
4679 dev->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
4680 if (dev->in_reset)
4681 return -1;
4682 return aac_send_hba_fib(scsicmd);
4683 } else if (dev->nondasd_support || expose_physicals ||
4684 dev->jbod) {
4685#if (!defined(CONFIG_COMMUNITY_KERNEL))
4686 /*
4687 * Read and Write protect the exposed
4688 * physical devices.
4689 */
4690 if (scsicmd->device->no_uld_attach)
4691 switch (scsicmd->cmnd[0]) {
4692 /* Filter Format? SMART Verify/Fix? */
4693 case MODE_SELECT:
4694 case MODE_SELECT_10:
4695 case LOG_SELECT:
4696 case WRITE_LONG:
4697 case WRITE_SAME:
4698 case WRITE_VERIFY:
4699 case WRITE_VERIFY_12:
4700 case WRITE_6:
4701 case READ_6:
4702 case WRITE_10:
4703 case READ_10:
4704 case WRITE_12:
4705 case READ_12:
4706#if ((LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) || defined(WRITE_16))
4707 case WRITE_16:
4708 case READ_16:
4709#endif
4710 scsicmd->result = DID_OK << 16
4711 | COMMAND_COMPLETE << 8
4712 | SAM_STAT_CHECK_CONDITION;
4713 set_sense(
4714 &dev->fsa_dev[cid].sense_data,
4715 DATA_PROTECT, SENCODE_DATA_PROTECT,
4716 ASENCODE_END_OF_DATA, 0, 0);
4717 memcpy(scsicmd->sense_buffer,
4718 &dev->fsa_dev[cid].sense_data,
4719 min_t(size_t, sizeof(
4720 dev->fsa_dev[cid].sense_data),
4721 SCSI_SENSE_BUFFERSIZE));
4722 goto scsi_done_ret;
4723 }
4724#endif
4725 if (dev->in_reset)
4726 return -1;
4727 return aac_send_srb_fib(scsicmd);
4728 } else {
4729 scsicmd->result = DID_NO_CONNECT << 16;
4730 goto scsi_done_ret;
4731 }
4732 }
4733 }
4734 /*
4735 * else Command for the controller itself
4736 */
4737 else if ((scsicmd->cmnd[0] != INQUIRY) && /* only INQUIRY & TUR cmnd supported for controller */
4738 (scsicmd->cmnd[0] != TEST_UNIT_READY))
4739 {
4740 dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
4741 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
4742 set_sense(&dev->fsa_dev[cid].sense_data,
4743 ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
4744 ASENCODE_INVALID_COMMAND, 0, 0);
4745 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
4746 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
4747 SCSI_SENSE_BUFFERSIZE));
4748 goto scsi_done_ret;
4749 }
4750
4751 switch (scsicmd->cmnd[0]) {
4752 case READ_6:
4753 case READ_10:
4754 case READ_12:
4755#if ((LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) || defined(READ_16))
4756 case READ_16:
4757#endif
4758 if (dev->in_reset)
4759 return -1;
4760 /*
4761 * Hack to keep track of ordinal number of the device that
4762 * corresponds to a container. Needed to convert
4763 * containers to /dev/sd device names
4764 */
4765#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)) && defined(AAC_EXTENDED_TIMEOUT))
4766 if ((scsicmd->eh_state != SCSI_STATE_QUEUED)
4767 && (extendedtimeout > 0)) {
4768 mod_timer(&scsicmd->eh_timeout, jiffies + (extendedtimeout * HZ));
4769 }
4770#endif
4771
4772#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
4773#if (defined(__VMKLNX30__) || defined(__VMKLNX__))
4774 /* This code is commented out because such a mapping
4775 * doesn't seem to exist in vmklinux.
4776 *
4777 * This information is only used when a QUERY_DISK IOCTL
4778 * comes down and as far as I can tell there is no
4779 * equivalent check in vmklinux.
4780 */
4781#endif
4782#if (!defined(__VMKLNX30__) && !defined(__VMKLNX__))
4783 if(fsa_dev_ptr[cid].devname[0]=='\0') {
4784 adbg_ioctl(dev,KERN_INFO,
4785 "rq_disk=%p disk_name=\"%s\"\n",
4786 scsicmd->request->rq_disk,
4787 scsicmd->request->rq_disk
4788 ? scsicmd->request->rq_disk->disk_name
4789 : "Aiiiii");
4790 }
4791 if (scsicmd->request->rq_disk)
4792 strlcpy(fsa_dev_ptr[cid].devname,
4793 scsicmd->request->rq_disk->disk_name,
4794 min(sizeof(fsa_dev_ptr[cid].devname),
4795 sizeof(scsicmd->request->rq_disk->disk_name) + 1));
4796#endif
4797#else
4798 get_sd_devname(DEVICE_NR(scsicmd->request.rq_dev), fsa_dev_ptr[cid].devname);
4799#endif
4800 return aac_read(scsicmd);
4801
4802 case WRITE_6:
4803 case WRITE_10:
4804 case WRITE_12:
4805#if ((LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) || defined(WRITE_16))
4806 case WRITE_16:
4807#endif
4808 if (dev->in_reset)
4809 return -1;
4810#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)) && defined(AAC_EXTENDED_TIMEOUT))
4811 if ((scsicmd->eh_state != SCSI_STATE_QUEUED)
4812 && (extendedtimeout > 0)) {
4813 mod_timer(&scsicmd->eh_timeout, jiffies + (extendedtimeout * HZ));
4814 }
4815#endif
4816 return aac_write(scsicmd);
4817
4818 case SYNCHRONIZE_CACHE:
4819 if (((aac_cache & 6) == 6) && dev->cache_protected) {
4820 scsicmd->result = DID_OK << 16 |
4821 COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
4822 goto scsi_done_ret;
4823 }
4824 /* Issue FIB to tell Firmware to flush it's cache */
4825 if ((aac_cache & 6) != 2)
4826#if (defined(INITFLAGS_APRE_SUPPORTED) && !defined(CONFIG_COMMUNITY_KERNEL))
4827 return aac_adapter_synchronize(dev, scsicmd);
4828#else
4829 return aac_synchronize(scsicmd);
4830#endif
4831
4832 case INQUIRY:
4833#if (defined(INITFLAGS_APRE_SUPPORTED) && !defined(CONFIG_COMMUNITY_KERNEL))
4834 return aac_adapter_inquiry(dev, scsicmd);
4835#else
4836 {
4837 struct inquiry_data inq_data;
4838
4839 dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", cid));
4840 memset(&inq_data, 0, sizeof (struct inquiry_data));
4841
4842 if ((scsicmd->cmnd[1] & 0x1) && aac_wwn) {
4843 char *arr = (char *)&inq_data;
4844
4845 /* EVPD bit set */
4846 arr[0] = (scmd_id(scsicmd) == host->this_id) ?
4847 INQD_PDT_PROC : INQD_PDT_DA;
4848 if (scsicmd->cmnd[2] == 0) {
4849 /* supported vital product data pages */
4850/* Excluding SUSE as it has issues when inbox driver does not have this support but outbox has it.
4851 Because SUSE uses /dev/disk/by-id mapping entries in the OS grub config and VPD 0X83 creates conflicts */
4852#if (!defined(CONFIG_SUSE_KERNEL))
4853 arr[3] = 3;
4854 arr[6] = 0x83;
4855#else
4856 arr[3] = 2;
4857#endif
4858 arr[4] = 0x0;
4859 arr[5] = 0x80;
4860 arr[1] = scsicmd->cmnd[2];
4861#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))
4862 aac_internal_transfer(scsicmd, &inq_data, 0,
4863 sizeof(inq_data));
4864#else
4865 scsi_sg_copy_from_buffer(scsicmd, &inq_data,
4866 sizeof(inq_data));
4867#endif
4868 scsicmd->result = DID_OK << 16 |
4869 COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
4870 } else if (scsicmd->cmnd[2] == 0x80) {
4871 /* unit serial number page */
4872 arr[3] = setinqserial(dev, &arr[4],
4873 scmd_id(scsicmd));
4874 arr[1] = scsicmd->cmnd[2];
4875#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))
4876 aac_internal_transfer(scsicmd, &inq_data, 0,
4877 sizeof(inq_data));
4878#else
4879 scsi_sg_copy_from_buffer(scsicmd, &inq_data,
4880 sizeof(inq_data));
4881#endif
4882 if (aac_wwn != 2)
4883 return aac_get_container_serial(
4884 scsicmd);
4885 /* SLES 10 SP1 special */
4886 scsicmd->result = DID_OK << 16 |
4887 COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
4888/* Excluding SUSE as it has issues when inbox driver does not have this support but outbox has it.
4889 Because SUSE uses /dev/disk/by-id mapping entries in the OS grub config and VPD 0X83 creates conflicts */
4890#if (!defined(CONFIG_SUSE_KERNEL))
4891 } else if (scsicmd->cmnd[2] == 0x83) {
4892 /* vpd page 0x83 - Device Identification Page */
4893 char *sno = (char *)&inq_data;
4894
4895 sno[3] = setinqserial(dev, &sno[4],
4896 scmd_id(scsicmd));
4897
4898 if (aac_wwn != 2)
4899 return aac_get_container_serial(
4900 scsicmd);
4901
4902 scsicmd->result = DID_OK << 16 |
4903 COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
4904#endif
4905 } else {
4906 /* vpd page not implemented */
4907 scsicmd->result = DID_OK << 16 |
4908 COMMAND_COMPLETE << 8 |
4909 SAM_STAT_CHECK_CONDITION;
4910 set_sense(&dev->fsa_dev[cid].sense_data,
4911 ILLEGAL_REQUEST, SENCODE_INVALID_CDB_FIELD,
4912 ASENCODE_NO_SENSE, 7, 2);
4913 memcpy(scsicmd->sense_buffer,
4914 &dev->fsa_dev[cid].sense_data,
4915 min_t(size_t,
4916 sizeof(dev->fsa_dev[cid].sense_data),
4917 SCSI_SENSE_BUFFERSIZE));
4918 }
4919 scsicmd->scsi_done(scsicmd);
4920 return 0;
4921 }
4922 inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */
4923 inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
4924 inq_data.inqd_len = 31;
4925 /*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */
4926 inq_data.inqd_pad2= 0x32 ; /*WBus16|Sync|CmdQue */
4927 /*
4928 * Set the Vendor, Product, and Revision Level
4929 * see: <vendor>.c i.e. aac.c
4930 */
4931 if (cid == host->this_id) {
4932 setinqstr(dev, (void *) (inq_data.inqd_vid), ARRAY_SIZE(container_types));
4933 inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */
4934#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))
4935 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
4936#else
4937 scsi_sg_copy_from_buffer(scsicmd, &inq_data,
4938 sizeof(inq_data));
4939#endif
4940 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
4941 goto scsi_done_ret;
4942 }
4943 if (dev->in_reset)
4944 return -1;
4945 setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type);
4946 inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */
4947#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))
4948 aac_internal_transfer(scsicmd, &inq_data, 0, sizeof(inq_data));
4949#else
4950 scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
4951#endif
4952 return aac_get_container_name(scsicmd);
4953 }
4954#endif
4955#if ((LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) || defined(SERVICE_ACTION_IN))
4956 case SERVICE_ACTION_IN:
4957#if (defined(AAC_DEBUG_INSTRUMENT_2TB))
4958 printk(KERN_NOTICE
4959 "SERVICE_ACTION_IN, raw_io_interface=%d raw_io_64=%d\n",
4960 dev->raw_io_interface, dev->raw_io_64);
4961#endif
4962#if (defined(INITFLAGS_APRE_SUPPORTED) && !defined(CONFIG_COMMUNITY_KERNEL))
4963 if ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)
4964 break;
4965 return aac_adapter_read_capacity_16(dev, scsicmd);
4966#else
4967 if (!(dev->raw_io_interface) ||
4968 !(dev->raw_io_64) ||
4969 ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
4970 break;
4971 {
4972 u64 capacity;
4973 char cp[13];
4974 unsigned int alloc_len;
4975
4976 dprintk((KERN_DEBUG "READ CAPACITY_16 command.\n"));
4977 capacity = fsa_dev_ptr[cid].size - 1;
4978 cp[0] = (capacity >> 56) & 0xff;
4979 cp[1] = (capacity >> 48) & 0xff;
4980 cp[2] = (capacity >> 40) & 0xff;
4981 cp[3] = (capacity >> 32) & 0xff;
4982 cp[4] = (capacity >> 24) & 0xff;
4983 cp[5] = (capacity >> 16) & 0xff;
4984 cp[6] = (capacity >> 8) & 0xff;
4985 cp[7] = (capacity >> 0) & 0xff;
4986 cp[8] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff;
4987 cp[9] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
4988 cp[10] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
4989 cp[11] = (fsa_dev_ptr[cid].block_size) & 0xff;
4990 cp[12] = 0;
4991#if (defined(AAC_DEBUG_INSTRUMENT_2TB))
4992 printk(KERN_INFO "SAI_READ_CAPACITY_16(%d): "
4993 "%02x %02x %02x %02x %02x %02x %02x %02x "
4994 "%02x %02x %02x %02x %02x\n",
4995 scsicmd->cmnd[13],
4996 cp[0] & 0xff, cp[1] & 0xff, cp[2] & 0xff, cp[3] & 0xff,
4997 cp[4] & 0xff, cp[5] & 0xff, cp[6] & 0xff, cp[7] & 0xff,
4998 cp[8] & 0xff, cp[9] & 0xff, cp[10] & 0xff, cp[11] & 0xff,
4999 cp[12] & 0xff);
5000#endif
5001
5002 alloc_len = ((scsicmd->cmnd[10] << 24)
5003 + (scsicmd->cmnd[11] << 16)
5004 + (scsicmd->cmnd[12] << 8) + scsicmd->cmnd[13]);
5005#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))
5006 aac_internal_transfer(scsicmd, cp, 0,
5007 min_t(size_t, alloc_len, sizeof(cp)));
5008 if (sizeof(cp) < alloc_len) {
5009 unsigned int len, offset = sizeof(cp);
5010
5011 memset(cp, 0, offset);
5012 do {
5013 len = min_t(size_t, alloc_len - offset,
5014 sizeof(cp));
5015 aac_internal_transfer(scsicmd, cp, offset, len);
5016 } while ((offset += len) < alloc_len);
5017 }
5018#else
5019
5020 alloc_len = min_t(size_t, alloc_len, sizeof(cp));
5021 scsi_sg_copy_from_buffer(scsicmd, cp, alloc_len);
5022 if (alloc_len < scsi_bufflen(scsicmd))
5023 scsi_set_resid(scsicmd,
5024 scsi_bufflen(scsicmd) - alloc_len);
5025#endif
5026
5027 /* Do not cache partition table for arrays */
5028 scsicmd->device->removable = 1;
5029
5030 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
5031 goto scsi_done_ret;
5032 }
5033#endif
5034#endif
5035
5036 case READ_CAPACITY:
5037#if (defined(INITFLAGS_APRE_SUPPORTED) && !defined(CONFIG_COMMUNITY_KERNEL))
5038 return aac_adapter_read_capacity(dev, scsicmd);
5039#else
5040 {
5041 u32 capacity;
5042 char cp[8];
5043
5044 dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
5045 if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
5046 capacity = fsa_dev_ptr[cid].size - 1;
5047 else
5048 capacity = (u32)-1;
5049
5050 cp[0] = (capacity >> 24) & 0xff;
5051 cp[1] = (capacity >> 16) & 0xff;
5052 cp[2] = (capacity >> 8) & 0xff;
5053 cp[3] = (capacity >> 0) & 0xff;
5054 cp[4] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff;
5055 cp[5] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
5056 cp[6] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
5057 cp[7] = (fsa_dev_ptr[cid].block_size) & 0xff;
5058#if (defined(AAC_DEBUG_INSTRUMENT_2TB))
5059 printk(KERN_INFO "READ_CAPACITY: "
5060 "%02x %02x %02x %02x %02x %02x %02x %02x\n",
5061 cp[0] & 0xff, cp[1] & 0xff, cp[2] & 0xff, cp[3] & 0xff,
5062 cp[4] & 0xff, cp[5] & 0xff, cp[6] & 0xff, cp[7] & 0xff);
5063#endif
5064#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))
5065 aac_internal_transfer(scsicmd, cp, 0, sizeof(cp));
5066#else
5067 scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
5068#endif
5069 /* Do not cache partition table for arrays */
5070 scsicmd->device->removable = 1;
5071 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
5072 SAM_STAT_GOOD;
5073 goto scsi_done_ret;
5074 }
5075#endif
5076
5077 case MODE_SENSE:
5078 {
5079 int mode_buf_length = 4;
5080 u32 capacity;
5081 aac_modep_data mpd;
5082
5083 if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
5084 capacity = fsa_dev_ptr[cid].size - 1;
5085 else
5086 capacity = (u32)-1;
5087
5088 dprintk((KERN_DEBUG "MODE SENSE command.\n"));
5089 memset((char*)&mpd,0,sizeof(aac_modep_data));
5090 mpd.hd.data_length = sizeof(mpd.hd) - 1; /* Mode data length */
5091 mpd.hd.med_type = 0; /* Medium type - default */
5092 mpd.hd.dev_par = 0; /* Device-specific param,
5093 bit 8: 0/1 = write enabled/protected
5094 bit 4: 0/1 = FUA enabled */
5095
5096#if (defined(RIO_SUREWRITE))
5097 if (dev->raw_io_interface && ((aac_cache & 5) != 1))
5098 mpd.hd.dev_par = 0x10;
5099#endif
5100 if (scsicmd->cmnd[1] & 0x8) {
5101 mpd.hd.bd_length = 0; /* Block descriptor length */
5102 } else {
5103 mpd.hd.bd_length = sizeof(mpd.bd);
5104 mpd.hd.data_length += mpd.hd.bd_length;
5105 mpd.bd.block_length[0] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
5106 mpd.bd.block_length[1] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
5107 mpd.bd.block_length[2] = fsa_dev_ptr[cid].block_size & 0xff;
5108
5109 mpd.mpc_buf[0] = scsicmd->cmnd[2];
5110 if(scsicmd->cmnd[2] == 0x1C)
5111 {
5112 mpd.mpc_buf[1] = 0xa; //page length
5113 mpd.hd.data_length = 23; /* Mode data length */
5114 }
5115 else
5116 mpd.hd.data_length = 15; /* Mode data length */
5117
5118 if (capacity > 0xffffff) {
5119 mpd.bd.block_count[0] = 0xff;
5120 mpd.bd.block_count[1] = 0xff;
5121 mpd.bd.block_count[2] = 0xff;
5122 } else {
5123 mpd.bd.block_count[0] = (capacity >> 16) & 0xff;
5124 mpd.bd.block_count[1] = (capacity >> 8) & 0xff;
5125 mpd.bd.block_count[2] = capacity & 0xff;
5126 }
5127 }
5128
5129 if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
5130 ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
5131 mpd.hd.data_length += 3;
5132 mpd.mpc_buf[0] = 8;
5133 mpd.mpc_buf[1] = 1;
5134 mpd.mpc_buf[2] = ((aac_cache & 6) == 2)
5135 ? 0 : 0x04; /* WCE */
5136 mode_buf_length = sizeof(mpd);
5137
5138 }
5139
5140 if (mode_buf_length > scsicmd->cmnd[4])
5141 mode_buf_length = scsicmd->cmnd[4];
5142 else
5143 mode_buf_length = sizeof(mpd);
5144
5145
5146#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))
5147 aac_internal_transfer(scsicmd, (char*)&mpd, 0, mode_buf_length);
5148#else
5149 scsi_sg_copy_from_buffer(scsicmd, (char*)&mpd, mode_buf_length);
5150#endif
5151 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
5152 goto scsi_done_ret;
5153 }
5154
5155 case MODE_SENSE_10:
5156 {
5157 u32 capacity;
5158 int mode_buf_length = 8;
5159 aac_modep10_data mpd10;
5160
5161 if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
5162 capacity = fsa_dev_ptr[cid].size - 1;
5163 else
5164 capacity = (u32)-1;
5165
5166 dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
5167 memset((char*)&mpd10,0,sizeof(aac_modep10_data));
5168 mpd10.hd.data_length[0] = 0; /* Mode data length (MSB) */
5169 mpd10.hd.data_length[1] = sizeof(mpd10.hd) - 1; /* Mode data length (LSB) */
5170 mpd10.hd.med_type = 0; /* Medium type - default */
5171 mpd10.hd.dev_par = 0; /* Device-specific param,
5172 bit 8: 0/1 = write enabled/protected
5173 bit 4: 0/1 = FUA enabled */
5174#if (defined(RIO_SUREWRITE))
5175 if (dev->raw_io_interface && ((aac_cache & 5) != 1))
5176 mpd10.hd.dev_par = 0x10;
5177#endif
5178 mpd10.hd.rsrvd[0] = 0; /* reserved */
5179 mpd10.hd.rsrvd[1] = 0; /* reserved */
5180
5181 if (scsicmd->cmnd[1] & 0x8) {
5182 mpd10.hd.bd_length[0] = 0; /* Block descriptor length (MSB) */
5183 mpd10.hd.bd_length[1] = 0; /* Block descriptor length (LSB) */
5184 } else {
5185 mpd10.hd.bd_length[0] = 0;
5186 mpd10.hd.bd_length[1] = sizeof(mpd10.bd);
5187
5188 mpd10.hd.data_length[1] += mpd10.hd.bd_length[1];
5189
5190 mpd10.bd.block_length[0] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
5191 mpd10.bd.block_length[1] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
5192 mpd10.bd.block_length[2] = fsa_dev_ptr[cid].block_size & 0xff;
5193
5194 if (capacity > 0xffffff) {
5195 mpd10.bd.block_count[0] = 0xff;
5196 mpd10.bd.block_count[1] = 0xff;
5197 mpd10.bd.block_count[2] = 0xff;
5198 } else {
5199 mpd10.bd.block_count[0] = (capacity >> 16) & 0xff;
5200 mpd10.bd.block_count[1] = (capacity >> 8) & 0xff;
5201 mpd10.bd.block_count[2] = capacity & 0xff;
5202 }
5203 }
5204 if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
5205 ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
5206
5207 mpd10.hd.data_length[1] += 3;
5208 mpd10.mpc_buf[0] = 8;
5209 mpd10.mpc_buf[1] = 1;
5210 mpd10.mpc_buf[2] = ((aac_cache & 6) == 2)
5211 ? 0 : 0x04; /* WCE */
5212
5213 mode_buf_length = sizeof(mpd10);
5214
5215 if (mode_buf_length > scsicmd->cmnd[8])
5216 mode_buf_length = scsicmd->cmnd[8];
5217 }
5218
5219#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))
5220 aac_internal_transfer(scsicmd, (char*)&mpd10, 0, mode_buf_length);
5221#else
5222 scsi_sg_copy_from_buffer(scsicmd, (char*)&mpd10, mode_buf_length);
5223#endif
5224
5225 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
5226 goto scsi_done_ret;
5227 }
5228
5229 case REQUEST_SENSE:
5230 dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
5231 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, sizeof (struct sense_data));
5232 memset(&dev->fsa_dev[cid].sense_data, 0, sizeof (struct sense_data));
5233 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
5234 goto scsi_done_ret;
5235
5236 case ALLOW_MEDIUM_REMOVAL:
5237 dprintk((KERN_DEBUG "LOCK command.\n"));
5238 if (scsicmd->cmnd[4])
5239 fsa_dev_ptr[cid].locked = 1;
5240 else
5241 fsa_dev_ptr[cid].locked = 0;
5242
5243 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
5244 goto scsi_done_ret;
5245
5246 /*
5247 * These commands are all No-Ops
5248 */
5249 case TEST_UNIT_READY:
5250 if (fsa_dev_ptr[cid].sense_data.sense_key == NOT_READY) {
5251 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
5252 SAM_STAT_CHECK_CONDITION;
5253 set_sense(&dev->fsa_dev[cid].sense_data,
5254 NOT_READY, SENCODE_BECOMING_READY,
5255 ASENCODE_BECOMING_READY, 0, 0);
5256 memcpy(scsicmd->sense_buffer,
5257 &dev->fsa_dev[cid].sense_data,
5258 min_t(size_t,
5259 sizeof(dev->fsa_dev[cid].sense_data),
5260 SCSI_SENSE_BUFFERSIZE));
5261 goto scsi_done_ret;
5262 }
5263
5264 /* FALLTHRU */
5265 case RESERVE:
5266 case RELEASE:
5267 case REZERO_UNIT:
5268 case REASSIGN_BLOCKS:
5269 case SEEK_10:
5270 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
5271 goto scsi_done_ret;
5272
5273 case START_STOP:
5274 return aac_start_stop(scsicmd);
5275
5276 /* FALLTHRU */
5277 default:
5278 /*
5279 * Unhandled commands
5280 */
5281 goto scsi_default;
5282 }
5283
5284 scsi_default:
5285 dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", scsicmd->cmnd[0]));
5286 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
5287 set_sense(&dev->fsa_dev[cid].sense_data,
5288 ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
5289 ASENCODE_INVALID_COMMAND, 0, 0);
5290 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
5291 min_t(size_t,
5292 sizeof(dev->fsa_dev[cid].sense_data),
5293 SCSI_SENSE_BUFFERSIZE));
5294
5295 scsi_done_ret:
5296#if (defined(AAC_DEBUG_INSTRUMENT_TIMING) || defined(AAC_DEBUG_INSTRUMENT_CONTEXT))
5297 __aac_io_done(scsicmd);
5298#else
5299 scsicmd->scsi_done(scsicmd);
5300#endif
5301 return 0;
5302}
5303#if (defined(INITFLAGS_APRE_SUPPORTED) && !defined(CONFIG_COMMUNITY_KERNEL))
5304
5305static int aac_scsi_cmd_apre(struct scsi_cmnd * cmd)
5306{
5307 struct aac_apre_srb * hw_apre = (struct aac_apre_srb *)&fib->hw_apre_va;
5308 struct aac_dev * dev = fib->dev;
5309 struct fsa_dev_info * fsa_dev_ptr = &dev->fsa_dev[scmd_id(cmd)];
5310 int status;
5311 struct fib * fib;
5312
5313 /*
5314 * Allocate and initialize a Fib then setup a BlockWrite command
5315 */
5316#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
5317 if (!(fib = aac_fib_alloc_tag((struct aac_dev *)cmd->device->host->hostdata, cmd))) {
5318#else
5319 if (!(fib = aac_fib_alloc((struct aac_dev *)cmd->device->host->hostdata))) {
5320#endif
5321
5322 cmd->result = DID_ERROR << 16;
5323#if (defined(AAC_DEBUG_INSTRUMENT_TIMING) || defined(AAC_DEBUG_INSTRUMENT_CONTEXT))
5324 aac_io_done(cmd);
5325#else
5326 cmd->scsi_done(cmd);
5327#endif
5328 return 0;
5329 }
5330
5331 fib->Credits = fsa_dev_ptr->Credits;
5332 hw_apre->DataDir = 0;
5333 if (cmd->request_bufflen) switch (cmd->sc_data_direction) {
5334 case DMA_TO_DEVICE:
5335 hw_apre->DataDir = AAC_DATA_OUT_IOP_PERSPECTIVE;
5336 break;
5337 case DMA_BIDIRECTIONAL:
5338 hw_apre->DataDir = AAC_DATA_OUT_IOP_PERSPECTIVE | AAC_DATA_IN_IOP_PERSPECTIVE;
5339 break;
5340 case DMA_FROM_DEVICE:
5341 hw_apre->DataDir = AAC_DATA_IN_IOP_PERSPECTIVE;
5342 break;
5343 }
5344 hw_apre->header.Operation = AS_REQ_LKP_CODE_EXEC_SCSI_TASK;
5345 hw_apre->header.DestObjHandle = fsa_dev_ptr->DestObjHandle;
5346 hw_apre->CDBLength = (cmd->cmd_len - 1);
5347 memcpy(hw_apre->Cdb, cmd->cmnd, cmd->cmd_len);
5348 hw_apre->TransferSizeLow = cpu_to_le32(cmd->request_bufflen);
5349 hw_apre->TransferSizeHigh = cpu_to_le32((u32)((u64)cmd->request_bufflen>>32));
5350 status = aac_adapter_build_sg(fib, cmd, &hw_apre->Sgl);
5351 hw_apre->NumEsge = (status >
5352 (sizeof(hw_apre->Sgl.nark)/sizeof(hw_apre->Sgl.nark.FirstElement)))
5353 ? 1 : status;
5354 status = aac_adapter_deliver(fib);
5355
5356 /*
5357 * Check that the command queued to the controller
5358 */
5359 if (status == -EINPROGRESS) {
5360 cmd->SCp.phase = AAC_OWNER_FIRMWARE;
5361 return 0;
5362 }
5363
5364 printk(KERN_WARNING "aac_scsi_cmd_apre: failed with status: %d\n", status);
5365 /*
5366 * For some reason, the Fib didn't queue, return QUEUE_FULL
5367 */
5368 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
5369#if (defined(AAC_DEBUG_INSTRUMENT_TIMING) || defined(AAC_DEBUG_INSTRUMENT_CONTEXT))
5370 aac_io_done(cmd);
5371#else
5372 cmd->scsi_done(cmd);
5373#endif
5374
5375 aac_fib_complete(fib);
5376 aac_fib_free(fib);
5377 return 0;
5378}
5379#endif
5380#if (!defined(CONFIG_COMMUNITY_KERNEL))
5381
5382static int busy_disk(struct aac_dev * dev, int cid)
5383{
5384 if ((dev != (struct aac_dev *)NULL)
5385 && (dev->scsi_host_ptr != (struct Scsi_Host *)NULL)) {
5386 struct scsi_device *device;
5387#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
5388 shost_for_each_device(device, dev->scsi_host_ptr)
5389#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
5390 list_for_each_entry(device, &dev->scsi_host_ptr->my_devices, siblings)
5391#else
5392 for (device = dev->scsi_host_ptr->host_queue;
5393 device != (struct scsi_device *)NULL;
5394 device = device->next)
5395#endif
5396 {
5397 if ((device->channel == CONTAINER_TO_CHANNEL(cid))
5398 && (device->id == CONTAINER_TO_ID(cid))
5399 && (device->lun == CONTAINER_TO_LUN(cid))
5400#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
5401 && (atomic_read(&device->access_count)
5402 || test_bit(SHOST_RECOVERY, &dev->scsi_host_ptr->shost_state)
5403 || dev->scsi_host_ptr->eh_active)) {
5404#elif (defined(RHEL_MAJOR) && RHEL_MAJOR == 7 && RHEL_MINOR >= 2)
5405 && (atomic_read(&device->device_busy)
5406 || (SHOST_RECOVERY == dev->scsi_host_ptr->shost_state))) {
5407#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14) && (LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0))))
5408 && (device->device_busy
5409 || (SHOST_RECOVERY == dev->scsi_host_ptr->shost_state))) {
5410#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,17,0)) || defined(SCSI_HAS_SHOST_STATE_ENUM))
5411 && (atomic_read(&device->device_busy)
5412 || (SHOST_RECOVERY == dev->scsi_host_ptr->shost_state))) {
5413#elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
5414 && (device->device_busy
5415 || test_bit(SHOST_RECOVERY, &dev->scsi_host_ptr->shost_state))) {
5416#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
5417 && (device->device_busy
5418 || test_bit(SHOST_RECOVERY, &dev->scsi_host_ptr->shost_state)
5419 || dev->scsi_host_ptr->eh_active)) {
5420#else
5421 && (device->access_count
5422 || dev->scsi_host_ptr->in_recovery
5423 || dev->scsi_host_ptr->eh_active)) {
5424#endif
5425#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0))
5426 scsi_device_put(device);
5427#endif
5428 return 1;
5429 }
5430 }
5431 }
5432 return 0;
5433}
5434#endif
5435
5436static int query_disk(struct aac_dev *dev, void __user *arg)
5437{
5438 struct aac_query_disk qd;
5439 struct fsa_dev_info *fsa_dev_ptr;
5440
5441 fsa_dev_ptr = dev->fsa_dev;
5442 if (!fsa_dev_ptr)
5443 return -EBUSY;
5444 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
5445 return -EFAULT;
5446 if (qd.cnum == -1)
5447 qd.cnum = qd.id;
5448 else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1))
5449 {
5450 if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)
5451 return -EINVAL;
5452 qd.instance = dev->scsi_host_ptr->host_no;
5453 qd.bus = 0;
5454 qd.id = CONTAINER_TO_ID(qd.cnum);
5455 qd.lun = CONTAINER_TO_LUN(qd.cnum);
5456 }
5457 else return -EINVAL;
5458
5459 qd.valid = fsa_dev_ptr[qd.cnum].valid != 0;
5460#if (defined(CONFIG_COMMUNITY_KERNEL))
5461 qd.locked = fsa_dev_ptr[qd.cnum].locked;
5462#else
5463 qd.locked = fsa_dev_ptr[qd.cnum].locked || busy_disk(dev, qd.cnum);
5464#endif
5465 qd.deleted = fsa_dev_ptr[qd.cnum].deleted;
5466
5467#if (!defined(__VMKLNX30__) && !defined(__VMKLNX__))
5468 if (fsa_dev_ptr[qd.cnum].devname[0] == '\0')
5469 qd.unmapped = 1;
5470 else
5471 qd.unmapped = 0;
5472#else
5473 qd.unmapped = 0;
5474#endif
5475
5476 strlcpy(qd.name, fsa_dev_ptr[qd.cnum].devname,
5477 min(sizeof(qd.name), sizeof(fsa_dev_ptr[qd.cnum].devname) + 1));
5478
5479 if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk)))
5480 return -EFAULT;
5481 return 0;
5482}
5483
5484static int force_delete_disk(struct aac_dev *dev, void __user *arg)
5485{
5486 struct aac_delete_disk dd;
5487 struct fsa_dev_info *fsa_dev_ptr;
5488
5489 fsa_dev_ptr = dev->fsa_dev;
5490 if (!fsa_dev_ptr)
5491 return -EBUSY;
5492
5493 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
5494 return -EFAULT;
5495
5496 if (dd.cnum >= dev->maximum_num_containers)
5497 return -EINVAL;
5498 /*
5499 * Mark this container as being deleted.
5500 */
5501 fsa_dev_ptr[dd.cnum].deleted = 1;
5502 /*
5503 * Mark the container as no longer valid
5504 */
5505 fsa_dev_ptr[dd.cnum].valid = 0;
5506 return 0;
5507}
5508
5509static int delete_disk(struct aac_dev *dev, void __user *arg)
5510{
5511 struct aac_delete_disk dd;
5512 struct fsa_dev_info *fsa_dev_ptr;
5513
5514 fsa_dev_ptr = dev->fsa_dev;
5515 if (!fsa_dev_ptr)
5516 return -EBUSY;
5517
5518 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
5519 return -EFAULT;
5520
5521 if (dd.cnum >= dev->maximum_num_containers)
5522 return -EINVAL;
5523 /*
5524 * If the container is locked, it can not be deleted by the API.
5525 */
5526#if (defined(CONFIG_COMMUNITY_KERNEL))
5527 if (fsa_dev_ptr[dd.cnum].locked)
5528#else
5529 if (fsa_dev_ptr[dd.cnum].locked || busy_disk(dev, dd.cnum))
5530#endif
5531 return -EBUSY;
5532 else {
5533 /*
5534 * Mark the container as no longer being valid.
5535 */
5536 fsa_dev_ptr[dd.cnum].valid = 0;
5537 fsa_dev_ptr[dd.cnum].devname[0] = '\0';
5538 return 0;
5539 }
5540}
5541
5542#if (defined(FSACTL_REGISTER_FIB_SEND) && !defined(CONFIG_COMMUNITY_KERNEL))
5543static int aac_register_fib_send(struct aac_dev *dev, void __user *arg)
5544{
5545 fib_send_t __user callback;
5546
5547 if (arg == NULL) {
5548 return -EINVAL;
5549 }
5550 callback = *((fib_send_t __user *)arg);
5551 *((fib_send_t __user *)arg) = (fib_send_t __user)aac_fib_send;
5552 if (callback == (fib_send_t __user)NULL) {
5553 aac_fib_send_switch = aac_fib_send;
5554 return 0;
5555 }
5556 if (aac_fib_send_switch != aac_fib_send) {
5557 return -EBUSY;
5558 }
5559 aac_fib_send_switch = (fib_send_t)callback;
5560 return 0;
5561}
5562
5563#endif
5564int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg)
5565{
5566
5567 int retval;
5568 if (cmd != FSACTL_GET_NEXT_ADAPTER_FIB){
5569 adbg_ioctl(dev, KERN_DEBUG, "aac_dev_ioctl(%p,%x,%p)\n", dev, cmd, arg);
5570 }
5571
5572 switch (cmd) {
5573 case FSACTL_QUERY_DISK:
5574 retval = query_disk(dev, arg);
5575 adbg_ioctl(dev, KERN_DEBUG, "aac_dev_ioctl returns %d\n", retval);
5576 return retval;
5577 case FSACTL_DELETE_DISK:
5578 retval = delete_disk(dev, arg);
5579 adbg_ioctl(dev, KERN_DEBUG, "aac_dev_ioctl returns %d\n", retval);
5580 return retval;
5581 case FSACTL_FORCE_DELETE_DISK:
5582 retval = force_delete_disk(dev, arg);
5583 adbg_ioctl(dev, KERN_DEBUG, "aac_dev_ioctl returns %d\n", retval);
5584 return retval;
5585 case FSACTL_GET_CONTAINERS:
5586 retval = aac_get_containers(dev);
5587 adbg_ioctl(dev, KERN_DEBUG, "aac_dev_ioctl returns %d\n", retval);
5588 return retval;
5589#if (defined(FSACTL_REGISTER_FIB_SEND) && !defined(CONFIG_COMMUNITY_KERNEL))
5590 case FSACTL_REGISTER_FIB_SEND:
5591 retval = aac_register_fib_send(dev, arg);
5592 adbg_ioctl(dev, KERN_DEBUG, "aac_dev_ioctl returns %d\n", retval);
5593 return retval;
5594#endif
5595 default:
5596 adbg_ioctl(dev, KERN_DEBUG,"aac_dev_ioctl returns -ENOTTY\n");
5597 return -ENOTTY;
5598 }
5599}
5600
5601/**
5602 *
5603 * aac_srb_callback
5604 * @context: the context set in the fib - here it is scsi cmd
5605 * @fibptr: pointer to the fib
5606 *
5607 * Handles the completion of a scsi command to a non dasd device
5608 *
5609 */
5610
5611static void aac_srb_callback(void *context, struct fib * fibptr)
5612{
5613 struct aac_dev *dev;
5614 struct aac_srb_reply *srbreply;
5615 struct scsi_cmnd *scsicmd;
5616
5617 scsicmd = (struct scsi_cmnd *) context;
5618
5619 if (!aac_valid_context(scsicmd, fibptr))
5620 return;
5621
5622 BUG_ON(fibptr == NULL);
5623
5624 dev = fibptr->dev;
5625
5626 srbreply = (struct aac_srb_reply *) fib_data(fibptr);
5627
5628 scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
5629
5630 if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
5631 /* fast response */
5632 srbreply->srb_status = cpu_to_le32(SRB_STATUS_SUCCESS);
5633 srbreply->scsi_status = cpu_to_le32(SAM_STAT_GOOD);
5634 } else {
5635 /*
5636 * Calculate resid for sg
5637 */
5638 scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
5639 - le32_to_cpu(srbreply->data_xfer_length));
5640 }
5641
5642#if (!defined(__VMKLNX30__) || defined(__x86_64__))
5643 scsi_dma_unmap(scsicmd);
5644#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
5645 if(!scsi_sg_count(scsicmd) && scsi_bufflen(scsicmd))
5646#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,9)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,13))
5647 pci_unmap_single(dev->pdev, (dma_addr_t)scsicmd->SCp.dma_handle, scsicmd->request_bufflen,
5648#else
5649 pci_unmap_single(dev->pdev, scsicmd->SCp.dma_handle, scsicmd->request_bufflen,
5650#endif
5651 scsicmd->sc_data_direction);
5652#endif
5653#endif
5654 /* expose physical device if expose_physicald flag is on */
5655 if(scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01) && expose_physicals > 0)
5656 aac_expose_phy_device(scsicmd);
5657
5658#if (defined(__VMKERNEL_MODULE__) || defined(__VMKLNX30__) || defined(__VMKLNX__))
5659#if defined(__ESX5__)
5660
5661#define SCSI_VENDOR_ID_OFFSET 8
5662#define SCSI_VENDOR_ID_LEN 8
5663#define SCSI_PRODUCT_ID_OFFSET 16
5664#define SCSI_PRODUCT_ID_LEN 16
5665
5666 if (scmd_channel(scsicmd) == 3) {
5667 if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)) {
5668 char inq_data;
5669 char vi[SCSI_VENDOR_ID_LEN + 1];
5670 char pi[SCSI_PRODUCT_ID_LEN + 1];
5671 char resp[32];
5672
5673 scsi_sg_copy_to_buffer(scsicmd, &inq_data, sizeof(inq_data));
5674 scsi_sg_copy_to_buffer(scsicmd, resp, sizeof(resp));
5675
5676 memcpy(vi,resp + SCSI_VENDOR_ID_OFFSET, SCSI_VENDOR_ID_LEN);
5677 vi[SCSI_VENDOR_ID_LEN]='\0';
5678 memcpy(pi,resp + SCSI_PRODUCT_ID_OFFSET, SCSI_PRODUCT_ID_LEN);
5679 pi[SCSI_PRODUCT_ID_LEN]='\0';
5680
5681 if ((inq_data & 0x1F) == TYPE_ENCLOSURE &&
5682 (strstr(vi,"ADAPTEC") != NULL) &&
5683 (strstr(pi,"SGPIO") != NULL) )
5684 inq_data |= 0x20;
5685
5686 scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
5687 }
5688 }
5689#endif
5690#endif
5691 /*
5692 * First check the fib status
5693 */
5694
5695 if (le32_to_cpu(srbreply->status) != ST_OK){
5696 int len;
5697 printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status));
5698 len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
5699 SCSI_SENSE_BUFFERSIZE);
5700 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
5701 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
5702 }
5703
5704 /*
5705 * Next check the srb status
5706 */
5707 switch( (le32_to_cpu(srbreply->srb_status))&0x3f){
5708 case SRB_STATUS_ERROR_RECOVERY:
5709 case SRB_STATUS_PENDING:
5710 case SRB_STATUS_SUCCESS:
5711//#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)) || defined(BLIST_NO_ULD_ATTACH))
5712#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
5713 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
5714#else
5715 if ((scsicmd->cmnd[0] == INQUIRY) && (expose_physicals <= 0)) {
5716 u8 b;
5717 /* We can't expose disk devices because we can't tell
5718 * whether they are the raw container drives or stand
5719 * alone drives. If they have the removable bit set
5720 * then we should expose them though.
5721 */
5722 b = *((u8*)scsicmd->request_buffer);
5723 if (((b & 0x1F) != TYPE_DISK) ||
5724 (((u8*)scsicmd->request_buffer)[1] & 0x80) ||
5725 /*
5726 * We will allow disk devices if in RAID/SCSI mode and
5727 * the channel is 2
5728 */
5729 ((dev->raid_scsi_mode) &&
5730 (scmd_channel(scsicmd) == 2)) ||
5731 (dev->jbod && !(b >> 5))) {
5732 if (dev->jbod && ((b & 0x1F) == TYPE_DISK))
5733 ((u8*)scsicmd->request_buffer)[1] |=
5734 1 << 7;
5735 scsicmd->result = DID_OK << 16 |
5736 COMMAND_COMPLETE << 8;
5737 } else if (expose_physicals) {
5738#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)) || ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7)) && !defined(BLIST_NO_ULD_ATTACH)))
5739 scsicmd->device->no_uld_attach = (void *)1;
5740#else
5741 scsicmd->device->no_uld_attach = 1;
5742#endif
5743//#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)))
5744 /* Insurance */
5745 (*(u8*)scsicmd->request_buffer) |= 1 << 5;
5746//#endif
5747 scsicmd->result = DID_OK << 16 |
5748 COMMAND_COMPLETE << 8;
5749 } else
5750 scsicmd->result = DID_NO_CONNECT << 16 |
5751 COMMAND_COMPLETE << 8;
5752 } else
5753 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
5754#endif
5755 break;
5756 case SRB_STATUS_DATA_OVERRUN:
5757 switch(scsicmd->cmnd[0]){
5758 case READ_6:
5759 case WRITE_6:
5760 case READ_10:
5761 case WRITE_10:
5762 case READ_12:
5763 case WRITE_12:
5764#if ((LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) || defined(READ_16))
5765 case READ_16:
5766#endif
5767#if ((LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) || defined(WRITE_16))
5768 case WRITE_16:
5769#endif
5770 if (le32_to_cpu(srbreply->data_xfer_length) < scsicmd->underflow) {
5771 printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
5772 } else {
5773 printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
5774 }
5775 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
5776 break;
5777 case INQUIRY: {
5778//#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)) || defined(BLIST_NO_ULD_ATTACH))
5779#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
5780 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
5781#else
5782 if (expose_physicals <= 0) {
5783 /*
5784 * We can't expose disk devices because we
5785 * can't tell whether they are the raw
5786 * container drives or stand alone drives
5787 */
5788 u8 b = *((u8*)scsicmd->request_buffer);
5789 if ((((b & 0x1f) != TYPE_DISK) ||
5790 (((u8*)scsicmd->request_buffer)[1] & 0x80)) ||
5791 /*
5792 * We will allow disk devices if in RAID/SCSI
5793 * mode and the channel is 2
5794 */
5795 ((dev->raid_scsi_mode) &&
5796 (scmd_channel(scsicmd) == 2)) ||
5797 (dev->jbod && !(b >> 5))) {
5798 if (dev->jbod && ((b & 0x1F) == TYPE_DISK))
5799 ((u8*)scsicmd->request_buffer)[1] |=
5800 1 << 7;
5801 scsicmd->result = DID_OK << 16 |
5802 COMMAND_COMPLETE << 8;
5803 } else if (expose_physicals) {
5804#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)) || ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7)) && !defined(BLIST_NO_ULD_ATTACH)))
5805 scsicmd->device->no_uld_attach = (void *)1;
5806#else
5807 scsicmd->device->no_uld_attach = 1;
5808#endif
5809//#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)))
5810 /* Insurance */
5811 (*(u8*)scsicmd->request_buffer) |= 1 << 5;
5812//#endif
5813 scsicmd->result = DID_OK << 16 |
5814 COMMAND_COMPLETE << 8;
5815 } else
5816 scsicmd->result = DID_NO_CONNECT << 16 |
5817 COMMAND_COMPLETE << 8;
5818 } else
5819 scsicmd->result = DID_OK << 16 |
5820 COMMAND_COMPLETE << 8;
5821#endif
5822 break;
5823 }
5824 default:
5825 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
5826 break;
5827 }
5828 break;
5829 case SRB_STATUS_ABORTED:
5830 scsicmd->result = DID_ABORT << 16 | ABORT << 8;
5831 break;
5832 case SRB_STATUS_ABORT_FAILED:
5833 // Not sure about this one - but assuming the hba was trying to abort for some reason
5834 scsicmd->result = DID_ERROR << 16 | ABORT << 8;
5835 break;
5836 case SRB_STATUS_PARITY_ERROR:
5837 scsicmd->result = DID_PARITY << 16 | MSG_PARITY_ERROR << 8;
5838 break;
5839 case SRB_STATUS_NO_DEVICE:
5840 case SRB_STATUS_INVALID_PATH_ID:
5841 case SRB_STATUS_INVALID_TARGET_ID:
5842 case SRB_STATUS_INVALID_LUN:
5843 case SRB_STATUS_SELECTION_TIMEOUT:
5844 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
5845 break;
5846
5847 case SRB_STATUS_COMMAND_TIMEOUT:
5848 case SRB_STATUS_TIMEOUT:
5849 scsicmd->result = DID_TIME_OUT << 16 | COMMAND_COMPLETE << 8;
5850 break;
5851
5852 case SRB_STATUS_BUSY:
5853#if ((LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10)) || defined(DID_BUS_BUSY))
5854 scsicmd->result = DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
5855#else
5856 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
5857#endif
5858 break;
5859
5860 case SRB_STATUS_BUS_RESET:
5861 scsicmd->result = DID_RESET << 16 | COMMAND_COMPLETE << 8;
5862 break;
5863
5864 case SRB_STATUS_MESSAGE_REJECTED:
5865 scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
5866 break;
5867 case SRB_STATUS_REQUEST_FLUSHED:
5868 case SRB_STATUS_ERROR:
5869 case SRB_STATUS_INVALID_REQUEST:
5870 case SRB_STATUS_REQUEST_SENSE_FAILED:
5871 case SRB_STATUS_NO_HBA:
5872 case SRB_STATUS_UNEXPECTED_BUS_FREE:
5873 case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
5874 case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
5875 case SRB_STATUS_DELAYED_RETRY:
5876 case SRB_STATUS_BAD_FUNCTION:
5877 case SRB_STATUS_NOT_STARTED:
5878 case SRB_STATUS_NOT_IN_USE:
5879 case SRB_STATUS_FORCE_ABORT:
5880 case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
5881 default:
5882#ifdef AAC_DETAILED_STATUS_INFO
5883 printk("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",
5884 le32_to_cpu(srbreply->srb_status) & 0x3F,
5885 aac_get_status_string(
5886 le32_to_cpu(srbreply->srb_status) & 0x3F),
5887 scsicmd->cmnd[0],
5888 le32_to_cpu(srbreply->scsi_status));
5889#endif
5890 if((scsicmd->cmnd[0] == ATA_12) || (scsicmd->cmnd[0] == ATA_16)) {
5891
5892 /*
5893 * When the CC bit is SET by the host in ATA pass thru CDB, driver is supposed to return DID_OK
5894 * When the CC bit is RESET by the host, driver should return DID_ERROR
5895 */
5896 if(scsicmd->cmnd[2] & (0x01 << 5)) {
5897 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
5898 break;
5899 }
5900 else {
5901 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
5902 break;
5903 }
5904 }
5905 else {
5906 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
5907 break;
5908 }
5909 }
5910 if (le32_to_cpu(srbreply->scsi_status) == SAM_STAT_CHECK_CONDITION) {
5911 int len;
5912 scsicmd->result |= SAM_STAT_CHECK_CONDITION;
5913 len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
5914 SCSI_SENSE_BUFFERSIZE);
5915#ifdef AAC_DETAILED_STATUS_INFO
5916 printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
5917 le32_to_cpu(srbreply->status), len);
5918#endif
5919 memcpy(scsicmd->sense_buffer, srbreply->sense_data, len);
5920 }
5921 /*
5922 * OR in the scsi status (already shifted up a bit)
5923 */
5924 scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
5925
5926 aac_fib_complete(fibptr);
5927
5928#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
5929 aac_fib_free_tag(fibptr);
5930#else
5931 aac_fib_free(fibptr);
5932#endif
5933
5934#if (defined(AAC_DEBUG_INSTRUMENT_TIMING) || defined(AAC_DEBUG_INSTRUMENT_CONTEXT))
5935 aac_io_done(scsicmd);
5936#else
5937 scsicmd->scsi_done(scsicmd);
5938#endif
5939}
5940
5941/**
5942 *
5943 * aac_hba_callback
5944 * @context: the context set in the fib - here it is scsi cmd
5945 * @fibptr: pointer to the fib
5946 *
5947 * Handles the completion of a native HBA scsi command
5948 *
5949 */
5950
5951void aac_hba_callback(void *context, struct fib * fibptr)
5952{
5953 struct aac_dev *dev;
5954 struct scsi_cmnd *scsicmd;
5955
5956 scsicmd = (struct scsi_cmnd *) context;
5957
5958 if (!aac_valid_context(scsicmd, fibptr))
5959 return;
5960
5961 BUG_ON(fibptr == NULL);
5962 dev = fibptr->dev;
5963
5964 if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF)) {
5965#if (!defined(__VMKLNX30__) || defined(__x86_64__))
5966 scsi_dma_unmap(scsicmd);
5967#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
5968 if(!scsi_sg_count(scsicmd) && scsi_bufflen(scsicmd))
5969#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,9)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,13))
5970 pci_unmap_single(dev->pdev, (dma_addr_t)
5971 scsicmd->SCp.dma_handle, scsicmd->request_bufflen,
5972#else
5973 pci_unmap_single(dev->pdev,
5974 scsicmd->SCp.dma_handle, scsicmd->request_bufflen,
5975#endif
5976 scsicmd->sc_data_direction);
5977#endif
5978#endif
5979 }
5980
5981 if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
5982 /* fast response */
5983 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
5984 } else {
5985 struct aac_hba_resp *err =
5986 &((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err;
5987
5988 // BUG_ON(err->iu_type != HBA_IU_TYPE_RESP);
5989 if (err->service_response == HBA_RESP_SVCRES_TASK_COMPLETE) {
5990 scsicmd->result = err->status;
5991 /* set residual count */
5992 scsi_set_resid(scsicmd,
5993 le32_to_cpu(err->residual_count));
5994
5995 switch (err->status) {
5996 case SAM_STAT_GOOD:
5997 scsicmd->result |= DID_OK << 16 |
5998 COMMAND_COMPLETE << 8;
5999 break;
6000 case SAM_STAT_CHECK_CONDITION:
6001 {
6002 int len;
6003
6004 len = min_t(u8, err->sense_response_data_len,
6005 SCSI_SENSE_BUFFERSIZE);
6006 if (len)
6007 memcpy(scsicmd->sense_buffer,
6008 err->sense_response_buf, len);
6009 scsicmd->result |= DID_OK << 16 |
6010 COMMAND_COMPLETE << 8;
6011 break;
6012 }
6013 case SAM_STAT_BUSY:
6014#if ((LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10)) || defined(DID_BUS_BUSY))
6015 scsicmd->result |= DID_BUS_BUSY << 16 |
6016 COMMAND_COMPLETE << 8;
6017#else
6018 scsicmd->result |= DID_NO_CONNECT << 16 |
6019 COMMAND_COMPLETE << 8;
6020#endif
6021 case SAM_STAT_TASK_ABORTED:
6022 scsicmd->result |= DID_ABORT << 16 |
6023 ABORT << 8;
6024 break;
6025 case SAM_STAT_RESERVATION_CONFLICT:
6026 case SAM_STAT_TASK_SET_FULL:
6027 default:
6028 scsicmd->result |= DID_ERROR << 16 |
6029 COMMAND_COMPLETE << 8;
6030 break;
6031 }
6032 } else if (err->service_response == HBA_RESP_SVCRES_FAILURE) {
6033 switch (err->status) {
6034 case HBA_RESP_STAT_HBAMODE_DISABLED:
6035 {
6036 u32 bus, cid;
6037
6038 bus =aac_logical_to_phys(scmd_channel(scsicmd));
6039 cid =scmd_id(scsicmd);
6040 if (dev->hba_map[bus][cid].devtype ==
6041 AAC_DEVTYPE_NATIVE_RAW) {
6042 dev->hba_map[bus][cid].devtype =
6043 AAC_DEVTYPE_ARC_RAW;
6044 dev->hba_map[bus][cid].rmw_nexus =
6045 0xffffffff;
6046 }
6047 scsicmd->result = DID_NO_CONNECT << 16 |
6048 COMMAND_COMPLETE << 8;
6049 break;
6050 }
6051 case HBA_RESP_STAT_IO_ERROR:
6052 case HBA_RESP_STAT_NO_PATH_TO_DEVICE:
6053 scsicmd->result = DID_OK << 16 |
6054 COMMAND_COMPLETE << 8 | SAM_STAT_BUSY;
6055 break;
6056 case HBA_RESP_STAT_IO_ABORTED:
6057 scsicmd->result = DID_ABORT << 16 |
6058 ABORT << 8;
6059 break;
6060 case HBA_RESP_STAT_INVALID_DEVICE:
6061 scsicmd->result = DID_NO_CONNECT << 16 |
6062 COMMAND_COMPLETE << 8;
6063 break;
6064 case HBA_RESP_STAT_UNDERRUN:
6065 /* UNDERRUN is OK */
6066 scsicmd->result = DID_OK << 16 |
6067 COMMAND_COMPLETE << 8;
6068 break;
6069 case HBA_RESP_STAT_OVERRUN:
6070 default:
6071 scsicmd->result = DID_ERROR << 16 |
6072 COMMAND_COMPLETE << 8;
6073 break;
6074 }
6075 } else if (err->service_response ==
6076 HBA_RESP_SVCRES_TMF_REJECTED) {
6077 scsicmd->result =
6078 DID_ERROR << 16 | MESSAGE_REJECT << 8;
6079 } else if (err->service_response ==
6080 HBA_RESP_SVCRES_TMF_LUN_INVALID) {
6081 scsicmd->result =
6082 DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
6083 } else if ((err->service_response ==
6084 HBA_RESP_SVCRES_TMF_COMPLETE) ||
6085 (err->service_response ==
6086 HBA_RESP_SVCRES_TMF_SUCCEEDED)) {
6087 scsicmd->result =
6088 DID_OK << 16 | COMMAND_COMPLETE << 8;
6089 } else {
6090 scsicmd->result =
6091 DID_ERROR << 16 | COMMAND_COMPLETE << 8;
6092 }
6093 }
6094
6095 aac_fib_complete(fibptr);
6096#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
6097 aac_fib_free_tag(fibptr);
6098#else
6099 aac_fib_free(fibptr);
6100#endif
6101
6102 if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) {
6103 scsicmd->SCp.sent_command = 1;
6104 } else {
6105#if (defined(AAC_DEBUG_INSTRUMENT_TIMING) || defined(AAC_DEBUG_INSTRUMENT_CONTEXT))
6106 aac_io_done(scsicmd);
6107#else
6108 scsicmd->scsi_done(scsicmd);
6109#endif
6110 }
6111}
6112
6113/**
6114 *
6115 * aac_send_scb_fib
6116 * @scsicmd: the scsi command block
6117 *
6118 * This routine will form a FIB and fill in the aac_srb from the
6119 * scsicmd passed in.
6120 */
6121
6122static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
6123{
6124 struct fib* cmd_fibcontext;
6125 struct aac_dev* dev;
6126 int status;
6127
6128 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
6129 if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
6130 scsicmd->device->lun > AAC_MAX_LUN - 1) {
6131 scsicmd->result = DID_NO_CONNECT << 16;
6132#if (defined(AAC_DEBUG_INSTRUMENT_TIMING) || defined(AAC_DEBUG_INSTRUMENT_CONTEXT))
6133 __aac_io_done(scsicmd);
6134#else
6135 scsicmd->scsi_done(scsicmd);
6136#endif
6137 return 0;
6138 }
6139
6140 /*
6141 * Allocate and initialize a Fib then setup a BlockWrite command
6142 */
6143#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
6144 if (!(cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd))) {
6145#else
6146 if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
6147#endif
6148 return -1;
6149 }
6150
6151 status = aac_adapter_scsi(cmd_fibcontext, scsicmd);
6152
6153 /*
6154 * Check that the command queued to the controller
6155 */
6156 if (status == -EINPROGRESS) {
6157 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
6158 return 0;
6159 }
6160
6161 printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status);
6162 aac_fib_complete(cmd_fibcontext);
6163 aac_fib_free(cmd_fibcontext);
6164
6165 return -1;
6166}
6167
6168/**
6169 *
6170 * aac_send_hba_fib
6171 * @scsicmd: the scsi command block
6172 *
6173 * This routine will form a FIB and fill in the aac_hba_cmd_req from the
6174 * scsicmd passed in.
6175 */
6176
6177static int aac_send_hba_fib(struct scsi_cmnd* scsicmd)
6178{
6179 struct fib* cmd_fibcontext;
6180 struct aac_dev* dev;
6181 int status;
6182
6183 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
6184 if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
6185 scsicmd->device->lun > AAC_MAX_LUN - 1) {
6186 scsicmd->result = DID_NO_CONNECT << 16;
6187#if (defined(AAC_DEBUG_INSTRUMENT_TIMING) || defined(AAC_DEBUG_INSTRUMENT_CONTEXT))
6188 __aac_io_done(scsicmd);
6189#else
6190 scsicmd->scsi_done(scsicmd);
6191#endif
6192 return 0;
6193 }
6194
6195 /*
6196 * Allocate and initialize a Fib then setup a BlockWrite command
6197 */
6198#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
6199 if (!(cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd))) {
6200#else
6201 if (!(cmd_fibcontext = aac_fib_alloc(dev))) {
6202#endif
6203 return -1;
6204 }
6205
6206 status = aac_adapter_hba(cmd_fibcontext, scsicmd);
6207
6208 /*
6209 * Check that the command queued to the controller
6210 */
6211 if (status == -EINPROGRESS) {
6212 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
6213 return 0;
6214 }
6215
6216 printk(KERN_WARNING "aac_hba_cmd_req: aac_fib_send failed with status: %d\n", status);
6217 aac_fib_complete(cmd_fibcontext);
6218 aac_fib_free(cmd_fibcontext);
6219
6220 return -1;
6221}
6222
6223#if (defined(INITFLAGS_APRE_SUPPORTED) && !defined(CONFIG_COMMUNITY_KERNEL))
6224static int aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
6225#else
6226static long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
6227#endif
6228{
6229#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
6230 struct Scsi_Host *host = scsicmd->device->host;
6231#endif
6232 struct aac_dev *dev;
6233 unsigned long byte_count = 0;
6234 int nseg;
6235
6236#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
6237 dev = (struct aac_dev *)host->hostdata;
6238#else
6239 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
6240#endif
6241 // Get rid of old data
6242 psg->count = 0;
6243 psg->sg[0].addr = 0;
6244 psg->sg[0].count = 0;
6245
6246 nseg = scsi_dma_map(scsicmd);
6247 if (nseg < 0)
6248 return nseg;
6249 if (nseg) {
6250 struct scatterlist *sg;
6251 int i;
6252#if (defined(AAC_DEBUG_INSTRUMENT_SG) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)))
6253 int nseg_hold = nseg;
6254#endif
6255
6256#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
6257 psg->count = cpu_to_le32(nseg);
6258
6259#endif
6260 scsi_for_each_sg(scsicmd, sg, nseg, i) {
6261#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)) || defined(AAC_DEBUG_INSTRUMENT_SG_PROBE) || defined(__VMKERNEL_MODULE__) || defined(__VMKLNX30__) || defined(__VMKLNX__))
6262 int count = sg_dma_len(sg);
6263#endif
6264#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)) || defined(__VMKERNEL_MODULE__) || defined(__VMKLNX30__) || defined(__VMKLNX__))
6265 u32 addr = sg_dma_address(sg);
6266#endif
6267#if (defined(AAC_DEBUG_INSTRUMENT_SG_PROBE))
6268 char c = ((char *)sg->page + sg->offset)[0];
6269 c = ((char *)sg->page + sg->offset)[count-1];
6270#endif
6271#if (defined(__VMKERNEL_MODULE__) || defined(__VMKLNX30__))
6272 vmk_verify_memory_for_io(addr, count);
6273#endif
6274#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
6275 if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM))
6276 while (count > 65536) {
6277 psg->sg[i].addr = cpu_to_le32(addr);
6278 psg->sg[i].count = cpu_to_le32(65536);
6279 ++i;
6280 if (++nseg > host->sg_tablesize) {
6281#if (defined(AAC_DEBUG_INSTRUMENT_SG))
6282 printk(KERN_INFO
6283 "SG List[%d] too large based on original[%d]:\n",
6284 nseg, nseg_hold);
6285 sg = (struct scatterlist *) scsicmd->request_buffer;
6286 for (i = 0; i < nseg_hold; i++) {
6287 printk(KERN_INFO "0x%llx[%d] ",
6288 (u64)(sg_dma_address(sg)),
6289 (int)(sg_dma_len(sg)));
6290 ++sg;
6291 }
6292 printk(KERN_INFO "...\n");
6293#endif
6294 BUG();
6295 }
6296 byte_count += 65536;
6297 addr += 65536;
6298 count -= 65536;
6299 }
6300
6301 psg->sg[i].addr = cpu_to_le32(addr);
6302 psg->sg[i].count = cpu_to_le32(count);
6303 byte_count += count;
6304#else
6305#if (defined(__VMKERNEL_MODULE__) || defined(__VMKLNX30__) || defined(__VMKLNX__))
6306 psg->sg[i].addr = cpu_to_le32(addr);
6307#else
6308 psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
6309#endif
6310#if (defined(AAC_DEBUG_INSTRUMENT_SG_PROBE) || defined(__VMKERNEL_MODULE__) || defined(__VMKLNX30__) || defined(__VMKLNX__))
6311 psg->sg[i].count = cpu_to_le32(count);
6312#else
6313 psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
6314#endif
6315 byte_count += sg_dma_len(sg);
6316#endif
6317 }
6318#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
6319 psg->count = cpu_to_le32(nseg);
6320#endif
6321 /* hba wants the size to be exact */
6322 if (byte_count > scsi_bufflen(scsicmd)) {
6323 u32 temp = le32_to_cpu(psg->sg[i-1].count) -
6324 (byte_count - scsi_bufflen(scsicmd));
6325 psg->sg[i-1].count = cpu_to_le32(temp);
6326 byte_count = scsi_bufflen(scsicmd);
6327 }
6328 /* Check for command underflow */
6329 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
6330 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
6331 byte_count, scsicmd->underflow);
6332 }
6333 }
6334#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
6335 else if(scsicmd->request_bufflen) {
6336#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
6337 int i, count;
6338#endif
6339 u32 addr;
6340#if (defined(AAC_DEBUG_INSTRUMENT_SG_PROBE))
6341 char c = ((char *)scsicmd->request_buffer)[0];
6342 c = ((char *)scsicmd->request_buffer)[scsicmd->request_bufflen-1];
6343#endif
6344#if ((defined(__VMKERNEL_MODULE__) || defined(__VMKLNX30__)) && !defined(__x86_64__))
6345 scsicmd->SCp.dma_handle = scsicmd->request_bufferMA;
6346 vmk_verify_memory_for_io(scsicmd->request_bufferMA, scsicmd->request_bufflen);
6347#else
6348#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,9)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,13))
6349 scsicmd->SCp.dma_handle = (char *)(uintptr_t)pci_map_single(dev->pdev,
6350#else
6351 scsicmd->SCp.dma_handle = pci_map_single(dev->pdev,
6352#endif
6353 scsicmd->request_buffer,
6354 scsicmd->request_bufflen,
6355 scsicmd->sc_data_direction);
6356#endif
6357#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,9)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,13))
6358 addr = (u32)(uintptr_t)scsicmd->SCp.dma_handle;
6359#else
6360 addr = scsicmd->SCp.dma_handle;
6361#endif
6362#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
6363 count = scsicmd->request_bufflen;
6364 i = 0;
6365 if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM))
6366 while (count > 65536) {
6367 psg->sg[i].addr = cpu_to_le32(addr);
6368 psg->sg[i].count = cpu_to_le32(65536);
6369 if (++i >= host->sg_tablesize) {
6370#if (defined(AAC_DEBUG_INSTRUMENT_SG))
6371 printk(KERN_INFO
6372 "SG List[%d] too large based on original single element %d in size\n",
6373 i, scsicmd->request_bufflen);
6374#endif
6375 BUG();
6376 }
6377 addr += 65536;
6378 count -= 65536;
6379 }
6380 psg->count = cpu_to_le32(1+i);
6381 psg->sg[i].addr = cpu_to_le32(addr);
6382 psg->sg[i].count = cpu_to_le32(count);
6383#else
6384 psg->count = cpu_to_le32(1);
6385 psg->sg[0].addr = cpu_to_le32(addr);
6386 psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);
6387#endif
6388 byte_count = scsicmd->request_bufflen;
6389 }
6390#endif
6391#if (defined(AAC_DEBUG_INSTRUMENT_SG))
6392{
6393 int i, nseg = le32_to_cpu(psg->count);
6394 printk("aac_build_sg:");
6395 for (i = 0; i < nseg; i++) {
6396 int count = le32_to_cpu(psg->sg[i].count);
6397 u32 addr = le32_to_cpu(psg->sg[i].addr);
6398 printk(" %x[%d]", addr, count);
6399 }
6400 printk ("\n");
6401}
6402#endif
6403
6404 adbg_debug_aac_config(scsicmd, psg->count, byte_count);
6405
6406#if (defined(INITFLAGS_APRE_SUPPORTED) && !defined(CONFIG_COMMUNITY_KERNEL))
6407 return le32_to_cpu(psg->count);
6408#else
6409 return byte_count;
6410#endif
6411}
6412
6413
6414#if (defined(INITFLAGS_APRE_SUPPORTED) && !defined(CONFIG_COMMUNITY_KERNEL))
6415static int aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg)
6416#else
6417static long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg)
6418#endif
6419{
6420#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
6421 struct Scsi_Host *host = scsicmd->device->host;
6422#endif
6423 struct aac_dev *dev;
6424 unsigned long byte_count = 0;
6425 u64 addr;
6426 int nseg;
6427
6428#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
6429 dev = (struct aac_dev *)host->hostdata;
6430#else
6431 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
6432#endif
6433 // Get rid of old data
6434 psg->count = 0;
6435 psg->sg[0].addr[0] = 0;
6436 psg->sg[0].addr[1] = 0;
6437 psg->sg[0].count = 0;
6438
6439 nseg = scsi_dma_map(scsicmd);
6440 if (nseg < 0)
6441 return nseg;
6442 if (nseg) {
6443 struct scatterlist *sg;
6444 int i;
6445#if (defined(AAC_DEBUG_INSTRUMENT_SG) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)))
6446 int nseg_hold = nseg;
6447#endif
6448
6449 scsi_for_each_sg(scsicmd, sg, nseg, i) {
6450 int count = sg_dma_len(sg);
6451 addr = sg_dma_address(sg);
6452#if (defined(AAC_DEBUG_INSTRUMENT_SG_PROBE))
6453 char c = ((char *)sg->page + sg->offset)[0];
6454 c = ((char *)sg->page + sg->offset)[count-1];
6455#endif
6456#if (defined(__VMKERNEL_MODULE__) || defined(__VMKLNX30__))
6457 vmk_verify_memory_for_io(addr, count);
6458#endif
6459#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
6460 if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM))
6461 while (count > 65536) {
6462 psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32));
6463 psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
6464 psg->sg[i].count = cpu_to_le32(65536);
6465 ++i;
6466 if (++nseg > host->sg_tablesize) {
6467#if (defined(AAC_DEBUG_INSTRUMENT_SG))
6468 printk(KERN_INFO
6469 "SG List[%d] too large based on original[%d]:\n",
6470 nseg, nseg_hold);
6471 sg = (struct scatterlist *) scsicmd->request_buffer;
6472 for (i = 0; i < nseg_hold; i++) {
6473 printk(KERN_INFO "0x%llx[%d] ",
6474 (u64)sg_dma_address(sg),
6475 (int)sg_dma_len(sg));
6476 ++sg;
6477 }
6478 printk(KERN_INFO "...\n");
6479#endif
6480 BUG();
6481 }
6482 byte_count += 65536;
6483 addr += 65536;
6484 count -= 65536;
6485 }
6486#endif
6487 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
6488 psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
6489 psg->sg[i].count = cpu_to_le32(count);
6490 byte_count += count;
6491 }
6492 psg->count = cpu_to_le32(nseg);
6493 /* hba wants the size to be exact */
6494 if (byte_count > scsi_bufflen(scsicmd)) {
6495 u32 temp = le32_to_cpu(psg->sg[i-1].count) -
6496 (byte_count - scsi_bufflen(scsicmd));
6497 psg->sg[i-1].count = cpu_to_le32(temp);
6498 byte_count = scsi_bufflen(scsicmd);
6499 }
6500 /* Check for command underflow */
6501 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
6502 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
6503 byte_count, scsicmd->underflow);
6504 }
6505 }
6506#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
6507 else if(scsicmd->request_bufflen) {
6508#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
6509 int i;
6510 int count;
6511#endif
6512#if (defined(AAC_DEBUG_INSTRUMENT_SG_PROBE))
6513 char c = ((char *)scsicmd->request_buffer)[0];
6514 c = ((char *)scsicmd->request_buffer)[scsicmd->request_bufflen-1];
6515#endif
6516#if ((defined(__VMKERNEL_MODULE__) || defined(__VMKLNX30__)) && !defined(__x86_64__))
6517 scsicmd->SCp.dma_handle = scsicmd->request_bufferMA;
6518 vmk_verify_memory_for_io(scsicmd->request_bufferMA, scsicmd->request_bufflen);
6519#else
6520#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,9)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,13))
6521 scsicmd->SCp.dma_handle = (char *)(uintptr_t)pci_map_single(dev->pdev,
6522#else
6523 scsicmd->SCp.dma_handle = pci_map_single(dev->pdev,
6524#endif
6525 scsicmd->request_buffer,
6526 scsicmd->request_bufflen,
6527 scsicmd->sc_data_direction);
6528#endif
6529#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,9)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,13))
6530 addr = (u64)(uintptr_t)scsicmd->SCp.dma_handle;
6531#else
6532 addr = scsicmd->SCp.dma_handle;
6533#endif
6534#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
6535 count = scsicmd->request_bufflen;
6536 i = 0;
6537 if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM))
6538 while (count > 65536) {
6539 psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32));
6540 psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
6541 psg->sg[i].count = cpu_to_le32(65536);
6542 if (++i >= host->sg_tablesize) {
6543#if (defined(AAC_DEBUG_INSTRUMENT_SG))
6544 printk(KERN_INFO
6545 "SG List[%d] too large based on original single element %d in size\n",
6546 i, scsicmd->request_bufflen);
6547#endif
6548 BUG();
6549 }
6550 addr += 65536;
6551 count -= 65536;
6552 }
6553 psg->count = cpu_to_le32(1+i);
6554 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
6555 psg->sg[i].addr[1] = cpu_to_le32(addr >> 32);
6556 psg->sg[i].count = cpu_to_le32(count);
6557#else
6558 psg->count = cpu_to_le32(1);
6559 psg->sg[0].addr[0] = cpu_to_le32(addr & 0xffffffff);
6560 psg->sg[0].addr[1] = cpu_to_le32(addr >> 32);
6561 psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);
6562#endif
6563 byte_count = scsicmd->request_bufflen;
6564 }
6565#endif
6566#if (defined(AAC_DEBUG_INSTRUMENT_SG))
6567{
6568 int i, nseg = le32_to_cpu(psg->count);
6569 printk("aac_build_sg64:");
6570 for (i = 0; i < nseg; i++) {
6571 int count = le32_to_cpu(psg->sg[i].count);
6572 u32 addr0 = le32_to_cpu(psg->sg[i].addr[0]);
6573 u32 addr1 = le32_to_cpu(psg->sg[i].addr[1]);
6574 if (addr1 == 0)
6575 printk(" %x[%d]", addr0, count);
6576 else
6577 printk(" %x%08x[%d]", addr1, addr0, count);
6578 }
6579 printk ("\n");
6580}
6581#endif
6582
6583 adbg_debug_aac_config(scsicmd, psg->count, byte_count);
6584
6585#if (defined(INITFLAGS_APRE_SUPPORTED) && !defined(CONFIG_COMMUNITY_KERNEL))
6586 return le32_to_cpu(psg->count);
6587#else
6588 return byte_count;
6589#endif
6590}
6591
6592#if (defined(INITFLAGS_APRE_SUPPORTED) && !defined(CONFIG_COMMUNITY_KERNEL))
6593static int aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg)
6594#else
6595static long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg)
6596#endif
6597{
6598 unsigned long byte_count = 0;
6599 int nseg;
6600
6601 // Get rid of old data
6602 psg->count = 0;
6603 psg->sg[0].next = 0;
6604 psg->sg[0].prev = 0;
6605 psg->sg[0].addr[0] = 0;
6606 psg->sg[0].addr[1] = 0;
6607 psg->sg[0].count = 0;
6608 psg->sg[0].flags = 0;
6609
6610 nseg = scsi_dma_map(scsicmd);
6611 if (nseg < 0)
6612 return nseg;
6613 if (nseg) {
6614 struct scatterlist *sg;
6615 int i;
6616
6617 scsi_for_each_sg(scsicmd, sg, nseg, i) {
6618 int count = sg_dma_len(sg);
6619 u64 addr = sg_dma_address(sg);
6620#if (defined(AAC_DEBUG_INSTRUMENT_SG_PROBE))
6621 char c = ((char *)sg->page + sg->offset)[0];
6622 c = ((char *)sg->page + sg->offset)[count-1];
6623#endif
6624 psg->sg[i].next = 0;
6625 psg->sg[i].prev = 0;
6626 psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32));
6627 psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
6628 psg->sg[i].count = cpu_to_le32(count);
6629 psg->sg[i].flags = 0;
6630 byte_count += count;
6631 }
6632 psg->count = cpu_to_le32(nseg);
6633 /* hba wants the size to be exact */
6634 if (byte_count > scsi_bufflen(scsicmd)) {
6635 u32 temp = le32_to_cpu(psg->sg[i-1].count) -
6636 (byte_count - scsi_bufflen(scsicmd));
6637 psg->sg[i-1].count = cpu_to_le32(temp);
6638 byte_count = scsi_bufflen(scsicmd);
6639 }
6640 /* Check for command underflow */
6641 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
6642 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
6643 byte_count, scsicmd->underflow);
6644 }
6645 }
6646#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
6647 else if(scsicmd->request_bufflen) {
6648#if ((!defined(__VMKERNEL_MODULE__) && !defined(__VMKLNX30__)) || defined(__VMKLNX__))
6649 struct aac_dev *dev = (struct aac_dev *)scsicmd->device->host->hostdata;
6650#endif
6651 int count;
6652 u64 addr;
6653#if (defined(AAC_DEBUG_INSTRUMENT_SG_PROBE))
6654 char c = ((char *)scsicmd->request_buffer)[0];
6655 c = ((char *)scsicmd->request_buffer)[scsicmd->request_bufflen-1];
6656#endif
6657#if ((defined(__VMKERNEL_MODULE__) || defined(__VMKLNX30__)) && !defined(__x86_64__))
6658 scsicmd->SCp.dma_handle = scsicmd->request_bufferMA;
6659 vmk_verify_memory_for_io(scsicmd->request_bufferMA, scsicmd->request_bufflen);
6660#else
6661#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,9)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,13))
6662 scsicmd->SCp.dma_handle = (char *)(uintptr_t)pci_map_single(dev->pdev,
6663#else
6664 scsicmd->SCp.dma_handle = pci_map_single(dev->pdev,
6665#endif
6666 scsicmd->request_buffer,
6667 scsicmd->request_bufflen,
6668 scsicmd->sc_data_direction);
6669#endif
6670#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,9)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,13))
6671 addr = (u64)(uintptr_t)scsicmd->SCp.dma_handle;
6672#else
6673 addr = scsicmd->SCp.dma_handle;
6674#endif
6675 count = scsicmd->request_bufflen;
6676 psg->count = cpu_to_le32(1);
6677 psg->sg[0].next = 0;
6678 psg->sg[0].prev = 0;
6679 psg->sg[0].addr[1] = cpu_to_le32((u32)(addr>>32));
6680 psg->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
6681 psg->sg[0].count = cpu_to_le32(count);
6682 psg->sg[0].flags = 0;
6683 byte_count = scsicmd->request_bufflen;
6684 }
6685#endif
6686
6687 adbg_debug_aac_config(scsicmd, psg->count, byte_count);
6688
6689#if (defined(INITFLAGS_APRE_SUPPORTED) && !defined(CONFIG_COMMUNITY_KERNEL))
6690 return le32_to_cpu(psg->count);
6691#else
6692 return byte_count;
6693#endif
6694}
6695
6696#if (defined(INITFLAGS_APRE_SUPPORTED) && !defined(CONFIG_COMMUNITY_KERNEL))
6697static int aac_build_sgraw2(struct scsi_cmnd* scsicmd, struct aac_raw_io2* rio2, int sg_max)
6698#else
6699static long aac_build_sgraw2(struct scsi_cmnd* scsicmd, struct aac_raw_io2* rio2, int sg_max)
6700#endif
6701{
6702 unsigned long byte_count = 0;
6703 int nseg;
6704
6705 nseg = scsi_dma_map(scsicmd);
6706 if (nseg < 0)
6707 return nseg;
6708 if (nseg) {
6709 struct scatterlist *sg;
6710 int i, conformable = 0;
6711 u32 min_size = PAGE_SIZE, cur_size;
6712
6713 scsi_for_each_sg(scsicmd, sg, nseg, i) {
6714 int count = sg_dma_len(sg);
6715 u64 addr = sg_dma_address(sg);
6716#if (defined(AAC_DEBUG_INSTRUMENT_SG_PROBE))
6717 char c = ((char *)sg->page + sg->offset)[0];
6718 c = ((char *)sg->page + sg->offset)[count-1];
6719#endif
6720 BUG_ON(i >= sg_max);
6721 rio2->sge[i].addrHigh = cpu_to_le32((u32)(addr>>32));
6722 rio2->sge[i].addrLow = cpu_to_le32((u32)(addr & 0xffffffff));
6723 cur_size = cpu_to_le32(count);
6724 rio2->sge[i].length = cur_size;
6725 rio2->sge[i].flags = 0;
6726 if (i == 0) {
6727 conformable = 1;
6728 rio2->sgeFirstSize = cur_size;
6729 } else if (i == 1) {
6730 rio2->sgeNominalSize = cur_size;
6731 min_size = cur_size;
6732 } else if ((i+1) < nseg && cur_size != rio2->sgeNominalSize) {
6733 conformable = 0;
6734 if (cur_size < min_size)
6735 min_size = cur_size;
6736 }
6737 byte_count += count;
6738 }
6739
6740 /* hba wants the size to be exact */
6741 if (byte_count > scsi_bufflen(scsicmd)) {
6742 u32 temp = le32_to_cpu(rio2->sge[i-1].length) -
6743 (byte_count - scsi_bufflen(scsicmd));
6744 rio2->sge[i-1].length = cpu_to_le32(temp);
6745 byte_count = scsi_bufflen(scsicmd);
6746 }
6747
6748 rio2->sgeCnt = cpu_to_le32(nseg);
6749 rio2->flags |= cpu_to_le16(RIO2_SG_FORMAT_IEEE1212);
6750 /* not conformable: evaluate required sg elements */
6751 if (!conformable) {
6752 int j, nseg_new = nseg, err_found;
6753 for (i = min_size / PAGE_SIZE; i >= 1; --i) {
6754 err_found = 0;
6755 nseg_new = 2;
6756 for (j = 1; j < nseg - 1; ++j) {
6757 if (rio2->sge[j].length % (i*PAGE_SIZE)) {
6758 err_found = 1;
6759 break;
6760 }
6761 nseg_new += (rio2->sge[j].length / (i*PAGE_SIZE));
6762 }
6763 if (!err_found)
6764 break;
6765 }
6766 if (i > 0 && nseg_new <= sg_max)
6767 aac_convert_sgraw2(rio2, i, nseg, nseg_new);
6768 } else
6769 rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
6770
6771 /* Check for command underflow */
6772 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
6773 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
6774 byte_count, scsicmd->underflow);
6775 }
6776 }
6777#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
6778 else if(scsicmd->request_bufflen) {
6779#if ((!defined(__VMKERNEL_MODULE__) && !defined(__VMKLNX30__)) || defined(__VMKLNX__))
6780 struct aac_dev *dev = (struct aac_dev *)scsicmd->device->host->hostdata;
6781#endif
6782 int count;
6783 u64 addr;
6784#if (defined(AAC_DEBUG_INSTRUMENT_SG_PROBE))
6785 char c = ((char *)scsicmd->request_buffer)[0];
6786 c = ((char *)scsicmd->request_buffer)[scsicmd->request_bufflen-1];
6787#endif
6788#if ((defined(__VMKERNEL_MODULE__) || defined(__VMKLNX30__)) && !defined(__x86_64__))
6789 scsicmd->SCp.dma_handle = scsicmd->request_bufferMA;
6790 vmk_verify_memory_for_io(scsicmd->request_bufferMA, scsicmd->request_bufflen);
6791#else
6792#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,9)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,13))
6793 scsicmd->SCp.dma_handle = (char *)(uintptr_t)pci_map_single(dev->pdev,
6794#else
6795 scsicmd->SCp.dma_handle = pci_map_single(dev->pdev,
6796#endif
6797 scsicmd->request_buffer,
6798 scsicmd->request_bufflen,
6799 scsicmd->sc_data_direction);
6800#endif
6801#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,9)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,13))
6802 addr = (u64)(uintptr_t)scsicmd->SCp.dma_handle;
6803#else
6804 addr = scsicmd->SCp.dma_handle;
6805#endif
6806 count = scsicmd->request_bufflen;
6807 rio2->sgeCnt = cpu_to_le32(1);
6808 rio2->sge[0].addrHigh = cpu_to_le32((u32)(addr>>32));
6809 rio2->sge[0].addrLow = cpu_to_le32((u32)(addr & 0xffffffff));
6810 rio2->sge[0].length = cpu_to_le32(count);
6811 rio2->sge[0].flags = 0;
6812 rio2->sgeFirstSize = cpu_to_le32(count);
6813 rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT|RIO2_SG_FORMAT_IEEE1212);
6814 byte_count = scsicmd->request_bufflen;
6815 }
6816#endif
6817
6818 adbg_debug_aac_config(scsicmd, rio2->byteCount, byte_count);
6819
6820#if (defined(INITFLAGS_APRE_SUPPORTED) && !defined(CONFIG_COMMUNITY_KERNEL))
6821 return le32_to_cpu(psg->count);
6822#else
6823 return byte_count;
6824#endif
6825}
6826
6827static int aac_convert_sgraw2(struct aac_raw_io2* rio2, int pages, int nseg, int nseg_new)
6828{
6829 struct sge_ieee1212 *sge;
6830 int i, j, pos;
6831 u32 addr_low;
6832
6833 if (aac_convert_sgl == 0)
6834 return 0;
6835
6836 sge = kmalloc(nseg_new * sizeof(struct sge_ieee1212), GFP_ATOMIC);
6837 if (sge == NULL)
6838 return -1;
6839
6840 for (i = 1, pos = 1; i < nseg-1; ++i) {
6841 for (j = 0; j < rio2->sge[i].length / (pages * PAGE_SIZE); ++j) {
6842 addr_low = rio2->sge[i].addrLow + j * pages * PAGE_SIZE;
6843 sge[pos].addrLow = addr_low;
6844 sge[pos].addrHigh = rio2->sge[i].addrHigh;
6845 if (addr_low < rio2->sge[i].addrLow)
6846 sge[pos].addrHigh++;
6847 sge[pos].length = pages * PAGE_SIZE;
6848 sge[pos].flags = 0;
6849 pos++;
6850 }
6851 }
6852 sge[pos] = rio2->sge[nseg-1];
6853 memcpy(&rio2->sge[1], &sge[1], (nseg_new-1)*sizeof(struct sge_ieee1212));
6854
6855 kfree(sge);
6856 rio2->sgeCnt = cpu_to_le32(nseg_new);
6857 rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
6858 rio2->sgeNominalSize = pages * PAGE_SIZE;
6859 return 0;
6860}
6861
6862#if (defined(INITFLAGS_APRE_SUPPORTED) && !defined(CONFIG_COMMUNITY_KERNEL))
6863static int aac_build_sghba(struct scsi_cmnd* scsicmd, struct aac_hba_cmd_req * hbacmd, int sg_max, u64 sg_address)
6864#else
6865static long aac_build_sghba(struct scsi_cmnd* scsicmd, struct aac_hba_cmd_req * hbacmd, int sg_max, u64 sg_address)
6866#endif
6867{
6868 unsigned long byte_count = 0;
6869 int nseg;
6870
6871 nseg = scsi_dma_map(scsicmd);
6872 if (nseg < 0)
6873 return nseg;
6874 if (nseg) {
6875 struct scatterlist *sg;
6876 int i;
6877 u32 cur_size;
6878 struct aac_hba_sgl *sge;
6879
6880 if (nseg > HBA_MAX_SG_EMBEDDED)
6881 sge = &hbacmd->sge[2];
6882 else
6883 sge = &hbacmd->sge[0];
6884 scsi_for_each_sg(scsicmd, sg, nseg, i) {
6885 int count = sg_dma_len(sg);
6886 u64 addr = sg_dma_address(sg);
6887#if (defined(AAC_DEBUG_INSTRUMENT_SG_PROBE))
6888 char c = ((char *)sg->page + sg->offset)[0];
6889 c = ((char *)sg->page + sg->offset)[count-1];
6890#endif
6891 BUG_ON(i >= sg_max);
6892 sge->addr_hi = cpu_to_le32((u32)(addr>>32));
6893 sge->addr_lo = cpu_to_le32((u32)(addr & 0xffffffff));
6894 cur_size = cpu_to_le32(count);
6895 sge->len = cur_size;
6896 sge->flags = 0;
6897 byte_count += count;
6898 sge++;
6899 }
6900
6901 sge--;
6902 /* hba wants the size to be exact */
6903 if (byte_count > scsi_bufflen(scsicmd)) {
6904 u32 temp = le32_to_cpu(sge->len) -
6905 (byte_count - scsi_bufflen(scsicmd));
6906 sge->len = cpu_to_le32(temp);
6907 byte_count = scsi_bufflen(scsicmd);
6908 }
6909
6910 if (nseg <= HBA_MAX_SG_EMBEDDED) {
6911 hbacmd->emb_data_desc_count = cpu_to_le32(nseg);
6912 sge->flags = cpu_to_le32(0x40000000);
6913 } else {
6914 /* not embedded */
6915 hbacmd->sge[0].flags = cpu_to_le32(0x80000000);
6916 hbacmd->emb_data_desc_count = cpu_to_le32(1);
6917 hbacmd->sge[0].addr_hi =
6918 cpu_to_le32((u32)(sg_address >> 32));
6919 hbacmd->sge[0].addr_lo =
6920 cpu_to_le32((u32)(sg_address & 0xffffffff));
6921 }
6922
6923 /* Check for command underflow */
6924 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
6925 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
6926 byte_count, scsicmd->underflow);
6927 }
6928 }
6929#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
6930 else if(scsicmd->request_bufflen) {
6931#if ((!defined(__VMKERNEL_MODULE__) && !defined(__VMKLNX30__)) || defined(__VMKLNX__))
6932 struct aac_dev *dev = (struct aac_dev *)scsicmd->device->host->hostdata;
6933#endif
6934 int count;
6935 u64 addr;
6936#if (defined(AAC_DEBUG_INSTRUMENT_SG_PROBE))
6937 char c = ((char *)scsicmd->request_buffer)[0];
6938 c = ((char *)scsicmd->request_buffer)[scsicmd->request_bufflen-1];
6939#endif
6940#if ((defined(__VMKERNEL_MODULE__) || defined(__VMKLNX30__)) && !defined(__x86_64__))
6941 scsicmd->SCp.dma_handle = scsicmd->request_bufferMA;
6942 vmk_verify_memory_for_io(scsicmd->request_bufferMA, scsicmd->request_bufflen);
6943#else
6944#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,9)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,13))
6945 scsicmd->SCp.dma_handle = (char *)(uintptr_t)pci_map_single(dev->pdev,
6946#else
6947 scsicmd->SCp.dma_handle = pci_map_single(dev->pdev,
6948#endif
6949 scsicmd->request_buffer,
6950 scsicmd->request_bufflen,
6951 scsicmd->sc_data_direction);
6952#endif
6953#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,9)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,13))
6954 addr = (u64)(uintptr_t)scsicmd->SCp.dma_handle;
6955#else
6956 addr = scsicmd->SCp.dma_handle;
6957#endif
6958 count = scsicmd->request_bufflen;
6959 hbacmd->emb_data_desc_count = cpu_to_le32(1);
6960 hbacmd->sge[0].addr_hi = cpu_to_le32((u32)(addr>>32));
6961 hbacmd->sge[0].addr_lo = cpu_to_le32((u32)(addr & 0xffffffff));
6962 hbacmd->sge[0].len = cpu_to_le32(count);
6963 hbacmd->sge[0].flags = cpu_to_le32(0x40000000);
6964 byte_count = scsicmd->request_bufflen;
6965 }
6966#endif
6967
6968 adbg_debug_aac_config(scsicmd, hbacmd->data_length, byte_count);
6969
6970#if (defined(INITFLAGS_APRE_SUPPORTED) && !defined(CONFIG_COMMUNITY_KERNEL))
6971 return le32_to_cpu(psg->count);
6972#else
6973 return byte_count;
6974#endif
6975}
6976
6977#if (defined(INITFLAGS_APRE_SUPPORTED) && !defined(CONFIG_COMMUNITY_KERNEL))
6978
6979int aac_build_sg_nark(struct fib * fib, struct scsi_cmnd* scsicmd, union aac_apre_embedded_sglist* psg)
6980{
6981 struct aac_dev *dev = fib->dev;
6982 struct aac_apre_element_nark * sglist = &(psg->nark.FirstElement);
6983 unsigned long byte_count = 0;
6984
6985 int nseg = scsi_dma_map(scsicmd);
6986 BUG_ON(nseg < 0);
6987
6988 if (nseg) {
6989 struct scatterlist *sg;
6990 int i;
6991
6992 scsi_for_each_sg(scsicmd, sg, nseg, i) {
6993 int count = sg_dma_len(sg);
6994 u64 addr;
6995 if (i == (sizeof(psg->nark)/sizeof(*sglist))) {
6996 int Index = fib->hw_fib_va - dev->hw_fib_va;
6997 struct hw_apre * frame = (struct hw_apre *)&dev->hw_fib_va[Index];
6998 sglist = (struct aac_apre_element_nark *)frame->sg;
6999 *((union aac_apre_embedded_sglist *)frame->sg) = *psg;
7000 addr = fib->hw_fib_pa + offsetof(struct hw_apre, sg[0]);
7001 psg->nark.FirstElement.physAddrLow = cpu_to_le32((u32)(addr & 0xffffffff));
7002 psg->nark.FirstElement.physAddrHigh = cpu_to_le32((u32)(addr>>32));
7003 psg->nark.FirstElement.elementByteCount = cpu_to_le32(sg_count * sizeof(psg->nark.FirstElement));
7004 psg->nark.FirstElement.elementType = APRE_BUFFER_DESC_POINTER;
7005 }
7006 addr = sg_dma_address(sg);
7007 sglist->physAddrLow = cpu_to_le32((u32)(addr & 0xffffffff));
7008 sglist->physAddrHigh = cpu_to_le32((u32)(addr>>32));
7009 sglist->elementByteCount = cpu_to_le32(count);
7010 sglist->domainSelect = APRE_OFF_CHIP_MEM_DOMAIN;
7011 sglist->elementType = APRE_BUFFER_DESC_ENTRY;
7012 byte_count += count;
7013 sglist++;
7014 }
7015#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
7016 } else if (scsicmd->request_bufflen) {
7017 int count;
7018 u64 addr;
7019#if ((defined(__VMKERNEL_MODULE__) || defined(__VMKLNX30__)) && !defined(__x86_64__))
7020 scsicmd->SCp.dma_handle = scsicmd->request_bufferMA;
7021 vmk_verify_memory_for_io(scsicmd->request_bufferMA, scsicmd->request_bufflen);
7022#else
7023 scsicmd->SCp.dma_handle = pci_map_single(dev->pdev,
7024 scsicmd->request_buffer,
7025 scsicmd->request_bufflen,
7026 scsicmd->sc_data_direction);
7027#endif
7028 addr = scsicmd->SCp.dma_handle;
7029 count = scsicmd->request_bufflen;
7030 byte_count = count;
7031 sg_count = 1;
7032 sglist->physAddrLow = cpu_to_le32((u32)(addr & 0xffffffff));
7033 sglist->physAddrHigh = cpu_to_le32((u32)(addr>>32));
7034 sglist->elementByteCount = cpu_to_le32(count);
7035 sglist->domainSelect = APRE_OFF_CHIP_MEM_DOMAIN;
7036 sglist->elementType = APRE_BUFFER_DESC_ENTRY;
7037 }
7038#endif
7039
7040 adbg_debug_aac_config(scsicmd, sg_count, byte_count);
7041
7042 return sg_count;
7043}
7044
7045int aac_build_sg_rx(struct fib * fib, struct scsi_cmnd* scsicmd, union aac_apre_embedded_sglist* psg)
7046{
7047 return 0;
7048}
7049#endif
7050#if (defined(INITFLAGS_BEDROC_SUPPORTED) && !defined(CONFIG_COMMUNITY_KERNEL))
7051int aac_build_sg_bedroc(struct fib * fib, struct scsi_cmnd* scsicmd, struct sgmap64* psg)
7052{
7053 struct SGformat {
7054 __le32 TotalSize:12;
7055 __le32 PageSize:4; /* Page size in Power of two times 512 */
7056 __le32 MaximumEntries:8;
7057 __le32 AddressBE:1;
7058 __le32 LengthBE:1;
7059 __le32 EndBitEnable:1;
7060 __le32 EndBitPre:1; /* place end bit in 2nd to last
7061 entry for pipelines, rather than
7062 in last entry */
7063 __le32 LinkBitEnable:1;
7064 __le32 LinkBitPre:1; /* place end bit in 2nd to last entry */
7065 __le32 Version:2; /* Zero */
7066 __le32 AddressSize:12; /* Address SG field size in # bytes */
7067 __le32 AddressReserved:2;
7068 __le32 AddressBoundary:1: /* Address must be on page
7069 boundary (except first) */
7070 __le32 AddressPage:1; /* Address is page number */
7071 __le32 AddressOffset:16;/* Address SG field offset # bytes */
7072 __le32 LengthSize:12; /* Length SG field size in # bytes */
7073 __le32 LengthReserved:3;
7074 __le32 LengthBoundary:1; /* Length must not exceed page size */
7075 __le32 LengthPage:1; /* Length is number of pages */
7076 __le32 LengthOffset:16; /* Length SG field offset # bytes */
7077 __le31 EndBitLocationBit:3; /* End bit field with the byte */
7078 __le32 EndBitLocationOffset:12; /* End bit field offset in
7079 # of bytes, must be less
7080 less than TotalSize, upper
7081 bits above that ignored */
7082 __le32 EndBitSignedness:1; /* 0 indicates 1 is active */
7083 __le31 LinkBitLocationBit:3; /* Link bit field with the byte */
7084 __le32 LinkBitLocationOffset:12; /* Link bit field offset */
7085 __le32 LinkBitSignedness:1; /* 0 indicates 1 is active */
7086 };
7087
7088 dev->SGformat
7089#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
7090 struct Scsi_Host *host = scsicmd->device->host;
7091#endif
7092 struct aac_dev *dev;
7093 unsigned long byte_count = 0;
7094 u64 addr;
7095 int nseg;
7096
7097#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
7098 dev = (struct aac_dev *)host->hostdata;
7099#else
7100 dev = (struct aac_dev *)scsicmd->device->host->hostdata;
7101#endif
7102 // Get rid of old data
7103 psg->count = 0;
7104 memset(&psg->sg[0], 0, dev->SGformat.TotalSize);
7105 nseg = scsi_dma_map(scsicmd);
7106 BUG_ON(nseg < 0);
7107 if (nseg) {
7108 struct scatterlist *sg;
7109 int i;
7110#if (defined(AAC_DEBUG_INSTRUMENT_SG) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)))
7111 int nseg_hold = nseg;
7112#endif
7113
7114 scsi_for_each_sg(scsicmd, sg, nseg, i) {
7115 int count = sg_dma_len(sg);
7116 addr = sg_dma_address(sg);
7117#if (defined(__VMKERNEL_MODULE__) || defined(__VMKLNX30__))
7118 vmk_verify_memory_for_io(addr, count);
7119#endif
7120#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
7121 if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM))
7122 while (count > 65536) {
7123 psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32));
7124 psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
7125 psg->sg[i].count = cpu_to_le32(65536);
7126 ++i;
7127 if (++nseg > host->sg_tablesize) {
7128#if (defined(AAC_DEBUG_INSTRUMENT_SG))
7129 printk(KERN_INFO
7130 "SG List[%d] too large based on original[%d]:\n",
7131 nseg, nseg_hold);
7132 sg = (struct scatterlist *) scsicmd->request_buffer;
7133 for (i = 0; i < nseg_hold; i++) {
7134 printk(KERN_INFO "0x%llx[%d] ",
7135 (u64)sg_dma_address(sg),
7136 (int)sg_dma_len(sg));
7137 ++sg;
7138 }
7139 printk(KERN_INFO "...\n");
7140#endif
7141 BUG();
7142 }
7143 byte_count += 65536;
7144 addr += 65536;
7145 count -= 65536;
7146 }
7147#endif
7148 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
7149 psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
7150 psg->sg[i].count = cpu_to_le32(count);
7151 byte_count += count;
7152 }
7153 psg->count = cpu_to_le32(nseg);
7154 /* hba wants the size to be exact */
7155 if(byte_count > scsi_bufflen(scsicmd)) {
7156 u32 temp = le32_to_cpu(psg->sg[i-1].count) -
7157 (byte_count - scsi_bufflen(scsicmd));
7158 psg->sg[i-1].count = cpu_to_le32(temp);
7159 byte_count = scsi_bufflen(scsicmd);
7160 }
7161 /* Check for command underflow */
7162 if(scsicmd->underflow && (byte_count < scsicmd->underflow)){
7163 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
7164 byte_count, scsicmd->underflow);
7165 }
7166 }
7167#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22))
7168 else if(scsicmd->request_bufflen) {
7169#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
7170 int i;
7171 int count;
7172#endif
7173#if ((defined(__VMKERNEL_MODULE__) || defined(__VMKLNX30__)) && !defined(__x86_64__))
7174 scsicmd->SCp.dma_handle = scsicmd->request_bufferMA;
7175 vmk_verify_memory_for_io(scsicmd->request_bufferMA, scsicmd->request_bufflen);
7176#else
7177#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,9)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,13))
7178 scsicmd->SCp.dma_handle = (char *)(uintptr_t)pci_map_single(dev->pdev,
7179#else
7180 scsicmd->SCp.dma_handle = pci_map_single(dev->pdev,
7181#endif
7182 scsicmd->request_buffer,
7183 scsicmd->request_bufflen,
7184 scsicmd->sc_data_direction);
7185#endif
7186#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,9)) && (LINUX_VERSION_CODE != KERNEL_VERSION(2,4,13))
7187 addr = (u64)(uintptr_t)scsicmd->SCp.dma_handle;
7188#else
7189 addr = scsicmd->SCp.dma_handle;
7190#endif
7191#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
7192 count = scsicmd->request_bufflen;
7193 i = 0;
7194 if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM))
7195 while (count > 65536) {
7196 psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32));
7197 psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
7198 psg->sg[i].count = cpu_to_le32(65536);
7199 if (++i >= host->sg_tablesize) {
7200#if (defined(AAC_DEBUG_INSTRUMENT_SG))
7201 printk(KERN_INFO
7202 "SG List[%d] too large based on original single element %d in size\n",
7203 i, scsicmd->request_bufflen);
7204#endif
7205 BUG();
7206 }
7207 addr += 65536;
7208 count -= 65536;
7209 }
7210 psg->count = cpu_to_le32(1+i);
7211 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
7212 psg->sg[i].addr[1] = cpu_to_le32(addr >> 32);
7213 psg->sg[i].count = cpu_to_le32(count);
7214#else
7215 psg->count = cpu_to_le32(1);
7216 psg->sg[0].addr[0] = cpu_to_le32(addr & 0xffffffff);
7217 psg->sg[0].addr[1] = cpu_to_le32(addr >> 32);
7218 psg->sg[0].count = cpu_to_le32(scsicmd->request_bufflen);
7219#endif
7220 byte_count = scsicmd->request_bufflen;
7221 }
7222#endif
7223#if (defined(AAC_DEBUG_INSTRUMENT_SG))
7224{
7225 int i, nseg = le32_to_cpu(psg->count);
7226 printk("aac_build_sg64:");
7227 for (i = 0; i < nseg; i++) {
7228 int count = le32_to_cpu(psg->sg[i].count);
7229 u32 addr0 = le32_to_cpu(psg->sg[i].addr[0]);
7230 u32 addr1 = le32_to_cpu(psg->sg[i].addr[1]);
7231 if (addr1 == 0)
7232 printk(" %x[%d]", addr0, count);
7233 else
7234 printk(" %x%08x[%d]", addr1, addr0, count);
7235 }
7236 printk ("\n");
7237}
7238#endif
7239
7240 adbg_debug_aac_config(scsicmd, psg->count, byte_count);
7241
7242 return le32_to_cpu(psg->count);
7243 return 0;
7244}
7245#endif
7246
7247#ifdef AAC_DETAILED_STATUS_INFO
7248
7249struct aac_srb_status_info {
7250 u32 status;
7251 char *str;
7252};
7253
7254
7255static struct aac_srb_status_info srb_status_info[] = {
7256 { SRB_STATUS_PENDING, "Pending Status"},
7257 { SRB_STATUS_SUCCESS, "Success"},
7258 { SRB_STATUS_ABORTED, "Aborted Command"},
7259 { SRB_STATUS_ABORT_FAILED, "Abort Failed"},
7260 { SRB_STATUS_ERROR, "Error Event"},
7261 { SRB_STATUS_BUSY, "Device Busy"},
7262 { SRB_STATUS_INVALID_REQUEST, "Invalid Request"},
7263 { SRB_STATUS_INVALID_PATH_ID, "Invalid Path ID"},
7264 { SRB_STATUS_NO_DEVICE, "No Device"},
7265 { SRB_STATUS_TIMEOUT, "Timeout"},
7266 { SRB_STATUS_SELECTION_TIMEOUT, "Selection Timeout"},
7267 { SRB_STATUS_COMMAND_TIMEOUT, "Command Timeout"},
7268 { SRB_STATUS_MESSAGE_REJECTED, "Message Rejected"},
7269 { SRB_STATUS_BUS_RESET, "Bus Reset"},
7270 { SRB_STATUS_PARITY_ERROR, "Parity Error"},
7271 { SRB_STATUS_REQUEST_SENSE_FAILED,"Request Sense Failed"},
7272 { SRB_STATUS_NO_HBA, "No HBA"},
7273 { SRB_STATUS_DATA_OVERRUN, "Data Overrun/Data Underrun"},
7274 { SRB_STATUS_UNEXPECTED_BUS_FREE,"Unexpected Bus Free"},
7275 { SRB_STATUS_PHASE_SEQUENCE_FAILURE,"Phase Error"},
7276 { SRB_STATUS_BAD_SRB_BLOCK_LENGTH,"Bad Srb Block Length"},
7277 { SRB_STATUS_REQUEST_FLUSHED, "Request Flushed"},
7278 { SRB_STATUS_DELAYED_RETRY, "Delayed Retry"},
7279 { SRB_STATUS_INVALID_LUN, "Invalid LUN"},
7280 { SRB_STATUS_INVALID_TARGET_ID, "Invalid TARGET ID"},
7281 { SRB_STATUS_BAD_FUNCTION, "Bad Function"},
7282 { SRB_STATUS_ERROR_RECOVERY, "Error Recovery"},
7283 { SRB_STATUS_NOT_STARTED, "Not Started"},
7284 { SRB_STATUS_NOT_IN_USE, "Not In Use"},
7285 { SRB_STATUS_FORCE_ABORT, "Force Abort"},
7286 { SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"},
7287 { 0xff, "Unknown Error"}
7288};
7289
7290char *aac_get_status_string(u32 status)
7291{
7292 int i;
7293
7294 for (i = 0; i < ARRAY_SIZE(srb_status_info); i++)
7295 if (srb_status_info[i].status == status)
7296 return srb_status_info[i].str;
7297
7298 return "Bad Status Code";
7299}
7300
7301#endif
7302
7303void aac_simulate_scsi_error(struct aac_dev *dev, struct hw_fib *hw_fib)
7304{
7305 struct aac_hba_resp *err =
7306 &((struct aac_native_hba *)hw_fib)->resp.err;
7307
7308 err->iu_type = HBA_IU_TYPE_RESP;
7309 err->service_response = HBA_RESP_SVCRES_TASK_COMPLETE;
7310 err->residual_count = 0;
7311
7312 if (dev->simulated_scsi_error & 0x01) {
7313 err->status = SAM_STAT_CHECK_CONDITION;
7314 err->datapres = 0x02; /* Indicate Sense Data */
7315 err->sense_response_data_len = 0x08;
7316 err->sense_response_buf[0] = 0x72; /* Descriptor Sense Data */
7317 err->sense_response_buf[1] = 0x05; /* Illegal Request */
7318 err->sense_response_buf[2] = 0x24; /* ASC: Invalid field in the CDB */
7319 err->sense_response_buf[3] = 0x00; /* ASCQ */
7320 err->sense_response_buf[4] = 0x00; /* Reserved */
7321 err->sense_response_buf[5] = 0x00; /* Reserved */
7322 err->sense_response_buf[6] = 0x00; /* Reserved */
7323 err->sense_response_buf[7] = 0x00; /* Additional Sense Length */
7324 } else if (dev->simulated_scsi_error & 0x02) {
7325 err->status = SAM_STAT_BUSY;
7326 err->datapres = 0x00; /* No Data */
7327 err->sense_response_data_len = 0x00;
7328 } else if (dev->simulated_scsi_error & 0x04) {
7329 err->status = SAM_STAT_RESERVATION_CONFLICT;
7330 err->datapres = 0x00; /* No Data */
7331 err->sense_response_data_len = 0x00;
7332 } else if (dev->simulated_scsi_error & 0x08) {
7333 err->status = SAM_STAT_TASK_SET_FULL;
7334 err->datapres = 0x00; /* No Data */
7335 err->sense_response_data_len = 0x00;
7336 } else if (dev->simulated_scsi_error & 0x10) {
7337 err->status = SAM_STAT_TASK_ABORTED;
7338 err->datapres = 0x00; /* No Data */
7339 err->sense_response_data_len = 0x00;
7340 }
7341}
7342
7343void aac_simulate_tgt_failure(struct aac_dev *dev, struct hw_fib *hw_fib)
7344{
7345 struct aac_hba_resp *err =
7346 &((struct aac_native_hba *)hw_fib)->resp.err;
7347
7348 err->iu_type = HBA_IU_TYPE_RESP;
7349 err->service_response = HBA_RESP_SVCRES_FAILURE;
7350 err->datapres = 0;
7351
7352 if (dev->simulated_tgt_failure & 0x01) {
7353 err->status = HBA_RESP_STAT_HBAMODE_DISABLED;
7354 } else if (dev->simulated_tgt_failure & 0x02) {
7355 err->status = HBA_RESP_STAT_IO_ERROR;
7356 err->sense_response_data_len = 0;
7357 } else if (dev->simulated_tgt_failure & 0x04) {
7358 err->status = HBA_RESP_STAT_IO_ABORTED;
7359 err->sense_response_data_len = 0;
7360 } else if (dev->simulated_tgt_failure & 0x08) {
7361 err->status = HBA_RESP_STAT_NO_PATH_TO_DEVICE;
7362 err->sense_response_data_len = 0;
7363 } else if (dev->simulated_tgt_failure & 0x10) {
7364 err->status = HBA_RESP_STAT_INVALID_DEVICE;
7365 err->sense_response_data_len = 0;
7366 } else if (dev->simulated_tgt_failure & 0x20) {
7367 err->status = HBA_RESP_STAT_UNDERRUN;
7368 err->residual_count = 1;
7369 err->sense_response_data_len -= 1;
7370 } else if (dev->simulated_tgt_failure & 0x40) {
7371 err->status = HBA_RESP_STAT_OVERRUN;
7372 err->residual_count = 1;
7373 err->sense_response_data_len -= 1;
7374 }
7375}