1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /***************************************************************************
3                           dpti.c  -  description
4                              -------------------
5     begin                : Thu Sep 7 2000
6     copyright            : (C) 2000 by Adaptec
7 
8 			   July 30, 2001 First version being submitted
9 			   for inclusion in the kernel.  V2.4
10 
11     See Documentation/scsi/dpti.rst for history, notes, license info
12     and credits
13  ***************************************************************************/
14 
15 /***************************************************************************
16  *                                                                         *
17  *                                                                         *
18  ***************************************************************************/
19 /***************************************************************************
20  * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
21  - Support 2.6 kernel and DMA-mapping
22  - ioctl fix for raid tools
23  - use schedule_timeout in long long loop
24  **************************************************************************/
25 
26 /*#define DEBUG 1 */
27 /*#define UARTDELAY 1 */
28 
29 #include <linux/module.h>
30 #include <linux/pgtable.h>
31 
32 MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
33 MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
34 
35 ////////////////////////////////////////////////////////////////
36 
37 #include <linux/ioctl.h>	/* For SCSI-Passthrough */
38 #include <linux/uaccess.h>
39 
40 #include <linux/stat.h>
41 #include <linux/slab.h>		/* for kmalloc() */
42 #include <linux/pci.h>		/* for PCI support */
43 #include <linux/proc_fs.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>	/* for udelay */
46 #include <linux/interrupt.h>
47 #include <linux/kernel.h>	/* for printk */
48 #include <linux/sched.h>
49 #include <linux/reboot.h>
50 #include <linux/spinlock.h>
51 #include <linux/dma-mapping.h>
52 
53 #include <linux/timer.h>
54 #include <linux/string.h>
55 #include <linux/ioport.h>
56 #include <linux/mutex.h>
57 
58 #include <asm/processor.h>	/* for boot_cpu_data */
59 #include <asm/io.h>		/* for virt_to_bus, etc. */
60 
61 #include <scsi/scsi.h>
62 #include <scsi/scsi_cmnd.h>
63 #include <scsi/scsi_device.h>
64 #include <scsi/scsi_host.h>
65 #include <scsi/scsi_tcq.h>
66 
67 #include "dpt/dptsig.h"
68 #include "dpti.h"
69 
70 /*============================================================================
71  * Create a binary signature - this is read by dptsig
72  * Needed for our management apps
73  *============================================================================
74  */
75 static DEFINE_MUTEX(adpt_mutex);
76 static dpt_sig_S DPTI_sig = {
77 	{'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
78 #ifdef __i386__
79 	PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
80 #elif defined(__ia64__)
81 	PROC_INTEL, PROC_IA64,
82 #elif defined(__sparc__)
83 	PROC_ULTRASPARC, PROC_ULTRASPARC,
84 #elif defined(__alpha__)
85 	PROC_ALPHA, PROC_ALPHA,
86 #else
87 	(-1),(-1),
88 #endif
89 	 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
90 	ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
91 	DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
92 };
93 
94 
95 
96 
97 /*============================================================================
98  * Globals
99  *============================================================================
100  */
101 
102 static DEFINE_MUTEX(adpt_configuration_lock);
103 
104 static struct i2o_sys_tbl *sys_tbl;
105 static dma_addr_t sys_tbl_pa;
106 static int sys_tbl_ind;
107 static int sys_tbl_len;
108 
109 static adpt_hba* hba_chain = NULL;
110 static int hba_count = 0;
111 
112 static struct class *adpt_sysfs_class;
113 
114 static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
115 #ifdef CONFIG_COMPAT
116 static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
117 #endif
118 
119 static const struct file_operations adpt_fops = {
120 	.unlocked_ioctl	= adpt_unlocked_ioctl,
121 	.open		= adpt_open,
122 	.release	= adpt_close,
123 #ifdef CONFIG_COMPAT
124 	.compat_ioctl	= compat_adpt_ioctl,
125 #endif
126 	.llseek		= noop_llseek,
127 };
128 
129 /* Structures and definitions for synchronous message posting.
130  * See adpt_i2o_post_wait() for description
131  * */
132 struct adpt_i2o_post_wait_data
133 {
134 	int status;
135 	u32 id;
136 	adpt_wait_queue_head_t *wq;
137 	struct adpt_i2o_post_wait_data *next;
138 };
139 
140 static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
141 static u32 adpt_post_wait_id = 0;
142 static DEFINE_SPINLOCK(adpt_post_wait_lock);
143 
144 
145 /*============================================================================
146  * 				Functions
147  *============================================================================
148  */
149 
dpt_dma64(adpt_hba * pHba)150 static inline int dpt_dma64(adpt_hba *pHba)
151 {
152 	return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
153 }
154 
dma_high(dma_addr_t addr)155 static inline u32 dma_high(dma_addr_t addr)
156 {
157 	return upper_32_bits(addr);
158 }
159 
dma_low(dma_addr_t addr)160 static inline u32 dma_low(dma_addr_t addr)
161 {
162 	return (u32)addr;
163 }
164 
adpt_read_blink_led(adpt_hba * host)165 static u8 adpt_read_blink_led(adpt_hba* host)
166 {
167 	if (host->FwDebugBLEDflag_P) {
168 		if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
169 			return readb(host->FwDebugBLEDvalue_P);
170 		}
171 	}
172 	return 0;
173 }
174 
175 /*============================================================================
176  * Scsi host template interface functions
177  *============================================================================
178  */
179 
180 #ifdef MODULE
181 static struct pci_device_id dptids[] = {
182 	{ PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
183 	{ PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
184 	{ 0, }
185 };
186 #endif
187 
188 MODULE_DEVICE_TABLE(pci,dptids);
189 
adpt_detect(struct scsi_host_template * sht)190 static int adpt_detect(struct scsi_host_template* sht)
191 {
192 	struct pci_dev *pDev = NULL;
193 	adpt_hba *pHba;
194 	adpt_hba *next;
195 
196 	PINFO("Detecting Adaptec I2O RAID controllers...\n");
197 
198         /* search for all Adatpec I2O RAID cards */
199 	while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
200 		if(pDev->device == PCI_DPT_DEVICE_ID ||
201 		   pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
202 			if(adpt_install_hba(sht, pDev) ){
203 				PERROR("Could not Init an I2O RAID device\n");
204 				PERROR("Will not try to detect others.\n");
205 				return hba_count-1;
206 			}
207 			pci_dev_get(pDev);
208 		}
209 	}
210 
211 	/* In INIT state, Activate IOPs */
212 	for (pHba = hba_chain; pHba; pHba = next) {
213 		next = pHba->next;
214 		// Activate does get status , init outbound, and get hrt
215 		if (adpt_i2o_activate_hba(pHba) < 0) {
216 			adpt_i2o_delete_hba(pHba);
217 		}
218 	}
219 
220 
221 	/* Active IOPs in HOLD state */
222 
223 rebuild_sys_tab:
224 	if (hba_chain == NULL)
225 		return 0;
226 
227 	/*
228 	 * If build_sys_table fails, we kill everything and bail
229 	 * as we can't init the IOPs w/o a system table
230 	 */
231 	if (adpt_i2o_build_sys_table() < 0) {
232 		adpt_i2o_sys_shutdown();
233 		return 0;
234 	}
235 
236 	PDEBUG("HBA's in HOLD state\n");
237 
238 	/* If IOP don't get online, we need to rebuild the System table */
239 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
240 		if (adpt_i2o_online_hba(pHba) < 0) {
241 			adpt_i2o_delete_hba(pHba);
242 			goto rebuild_sys_tab;
243 		}
244 	}
245 
246 	/* Active IOPs now in OPERATIONAL state */
247 	PDEBUG("HBA's in OPERATIONAL state\n");
248 
249 	printk("dpti: If you have a lot of devices this could take a few minutes.\n");
250 	for (pHba = hba_chain; pHba; pHba = next) {
251 		next = pHba->next;
252 		printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
253 		if (adpt_i2o_lct_get(pHba) < 0){
254 			adpt_i2o_delete_hba(pHba);
255 			continue;
256 		}
257 
258 		if (adpt_i2o_parse_lct(pHba) < 0){
259 			adpt_i2o_delete_hba(pHba);
260 			continue;
261 		}
262 		adpt_inquiry(pHba);
263 	}
264 
265 	adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
266 	if (IS_ERR(adpt_sysfs_class)) {
267 		printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
268 		adpt_sysfs_class = NULL;
269 	}
270 
271 	for (pHba = hba_chain; pHba; pHba = next) {
272 		next = pHba->next;
273 		if (adpt_scsi_host_alloc(pHba, sht) < 0){
274 			adpt_i2o_delete_hba(pHba);
275 			continue;
276 		}
277 		pHba->initialized = TRUE;
278 		pHba->state &= ~DPTI_STATE_RESET;
279 		if (adpt_sysfs_class) {
280 			struct device *dev = device_create(adpt_sysfs_class,
281 				NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
282 				"dpti%d", pHba->unit);
283 			if (IS_ERR(dev)) {
284 				printk(KERN_WARNING"dpti%d: unable to "
285 					"create device in dpt_i2o class\n",
286 					pHba->unit);
287 			}
288 		}
289 	}
290 
291 	// Register our control device node
292 	// nodes will need to be created in /dev to access this
293 	// the nodes can not be created from within the driver
294 	if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
295 		adpt_i2o_sys_shutdown();
296 		return 0;
297 	}
298 	return hba_count;
299 }
300 
301 
adpt_release(adpt_hba * pHba)302 static void adpt_release(adpt_hba *pHba)
303 {
304 	struct Scsi_Host *shost = pHba->host;
305 
306 	scsi_remove_host(shost);
307 //	adpt_i2o_quiesce_hba(pHba);
308 	adpt_i2o_delete_hba(pHba);
309 	scsi_host_put(shost);
310 }
311 
312 
adpt_inquiry(adpt_hba * pHba)313 static void adpt_inquiry(adpt_hba* pHba)
314 {
315 	u32 msg[17];
316 	u32 *mptr;
317 	u32 *lenptr;
318 	int direction;
319 	int scsidir;
320 	u32 len;
321 	u32 reqlen;
322 	u8* buf;
323 	dma_addr_t addr;
324 	u8  scb[16];
325 	s32 rcode;
326 
327 	memset(msg, 0, sizeof(msg));
328 	buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
329 	if(!buf){
330 		printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
331 		return;
332 	}
333 	memset((void*)buf, 0, 36);
334 
335 	len = 36;
336 	direction = 0x00000000;
337 	scsidir  =0x40000000;	// DATA IN  (iop<--dev)
338 
339 	if (dpt_dma64(pHba))
340 		reqlen = 17;		// SINGLE SGE, 64 bit
341 	else
342 		reqlen = 14;		// SINGLE SGE, 32 bit
343 	/* Stick the headers on */
344 	msg[0] = reqlen<<16 | SGL_OFFSET_12;
345 	msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
346 	msg[2] = 0;
347 	msg[3]  = 0;
348 	// Adaptec/DPT Private stuff
349 	msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
350 	msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
351 	/* Direction, disconnect ok | sense data | simple queue , CDBLen */
352 	// I2O_SCB_FLAG_ENABLE_DISCONNECT |
353 	// I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
354 	// I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
355 	msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
356 
357 	mptr=msg+7;
358 
359 	memset(scb, 0, sizeof(scb));
360 	// Write SCSI command into the message - always 16 byte block
361 	scb[0] = INQUIRY;
362 	scb[1] = 0;
363 	scb[2] = 0;
364 	scb[3] = 0;
365 	scb[4] = 36;
366 	scb[5] = 0;
367 	// Don't care about the rest of scb
368 
369 	memcpy(mptr, scb, sizeof(scb));
370 	mptr+=4;
371 	lenptr=mptr++;		/* Remember me - fill in when we know */
372 
373 	/* Now fill in the SGList and command */
374 	*lenptr = len;
375 	if (dpt_dma64(pHba)) {
376 		*mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
377 		*mptr++ = 1 << PAGE_SHIFT;
378 		*mptr++ = 0xD0000000|direction|len;
379 		*mptr++ = dma_low(addr);
380 		*mptr++ = dma_high(addr);
381 	} else {
382 		*mptr++ = 0xD0000000|direction|len;
383 		*mptr++ = addr;
384 	}
385 
386 	// Send it on it's way
387 	rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
388 	if (rcode != 0) {
389 		sprintf(pHba->detail, "Adaptec I2O RAID");
390 		printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
391 		if (rcode != -ETIME && rcode != -EINTR)
392 			dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
393 	} else {
394 		memset(pHba->detail, 0, sizeof(pHba->detail));
395 		memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
396 		memcpy(&(pHba->detail[16]), " Model: ", 8);
397 		memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
398 		memcpy(&(pHba->detail[40]), " FW: ", 4);
399 		memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
400 		pHba->detail[48] = '\0';	/* precautionary */
401 		dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
402 	}
403 	adpt_i2o_status_get(pHba);
404 	return ;
405 }
406 
407 
adpt_slave_configure(struct scsi_device * device)408 static int adpt_slave_configure(struct scsi_device * device)
409 {
410 	struct Scsi_Host *host = device->host;
411 
412 	if (host->can_queue && device->tagged_supported) {
413 		scsi_change_queue_depth(device,
414 				host->can_queue - 1);
415 	}
416 	return 0;
417 }
418 
adpt_queue_lck(struct scsi_cmnd * cmd)419 static int adpt_queue_lck(struct scsi_cmnd *cmd)
420 {
421 	adpt_hba* pHba = NULL;
422 	struct adpt_device* pDev = NULL;	/* dpt per device information */
423 
424 	/*
425 	 * SCSI REQUEST_SENSE commands will be executed automatically by the
426 	 * Host Adapter for any errors, so they should not be executed
427 	 * explicitly unless the Sense Data is zero indicating that no error
428 	 * occurred.
429 	 */
430 
431 	if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
432 		cmd->result = (DID_OK << 16);
433 		scsi_done(cmd);
434 		return 0;
435 	}
436 
437 	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
438 	if (!pHba) {
439 		return FAILED;
440 	}
441 
442 	rmb();
443 	if ((pHba->state) & DPTI_STATE_RESET)
444 		return SCSI_MLQUEUE_HOST_BUSY;
445 
446 	// TODO if the cmd->device if offline then I may need to issue a bus rescan
447 	// followed by a get_lct to see if the device is there anymore
448 	if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
449 		/*
450 		 * First command request for this device.  Set up a pointer
451 		 * to the device structure.  This should be a TEST_UNIT_READY
452 		 * command from scan_scsis_single.
453 		 */
454 		if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
455 			// TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
456 			// with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
457 			cmd->result = (DID_NO_CONNECT << 16);
458 			scsi_done(cmd);
459 			return 0;
460 		}
461 		cmd->device->hostdata = pDev;
462 	}
463 	pDev->pScsi_dev = cmd->device;
464 
465 	/*
466 	 * If we are being called from when the device is being reset,
467 	 * delay processing of the command until later.
468 	 */
469 	if (pDev->state & DPTI_DEV_RESET ) {
470 		return FAILED;
471 	}
472 	return adpt_scsi_to_i2o(pHba, cmd, pDev);
473 }
474 
DEF_SCSI_QCMD(adpt_queue)475 static DEF_SCSI_QCMD(adpt_queue)
476 
477 static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
478 		sector_t capacity, int geom[])
479 {
480 	int heads=-1;
481 	int sectors=-1;
482 	int cylinders=-1;
483 
484 	// *** First lets set the default geometry ****
485 
486 	// If the capacity is less than ox2000
487 	if (capacity < 0x2000 ) {	// floppy
488 		heads = 18;
489 		sectors = 2;
490 	}
491 	// else if between 0x2000 and 0x20000
492 	else if (capacity < 0x20000) {
493 		heads = 64;
494 		sectors = 32;
495 	}
496 	// else if between 0x20000 and 0x40000
497 	else if (capacity < 0x40000) {
498 		heads = 65;
499 		sectors = 63;
500 	}
501 	// else if between 0x4000 and 0x80000
502 	else if (capacity < 0x80000) {
503 		heads = 128;
504 		sectors = 63;
505 	}
506 	// else if greater than 0x80000
507 	else {
508 		heads = 255;
509 		sectors = 63;
510 	}
511 	cylinders = sector_div(capacity, heads * sectors);
512 
513 	// Special case if CDROM
514 	if(sdev->type == 5) {  // CDROM
515 		heads = 252;
516 		sectors = 63;
517 		cylinders = 1111;
518 	}
519 
520 	geom[0] = heads;
521 	geom[1] = sectors;
522 	geom[2] = cylinders;
523 
524 	PDEBUG("adpt_bios_param: exit\n");
525 	return 0;
526 }
527 
528 
adpt_info(struct Scsi_Host * host)529 static const char *adpt_info(struct Scsi_Host *host)
530 {
531 	adpt_hba* pHba;
532 
533 	pHba = (adpt_hba *) host->hostdata[0];
534 	return (char *) (pHba->detail);
535 }
536 
adpt_show_info(struct seq_file * m,struct Scsi_Host * host)537 static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
538 {
539 	struct adpt_device* d;
540 	int id;
541 	int chan;
542 	adpt_hba* pHba;
543 	int unit;
544 
545 	// Find HBA (host bus adapter) we are looking for
546 	mutex_lock(&adpt_configuration_lock);
547 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
548 		if (pHba->host == host) {
549 			break;	/* found adapter */
550 		}
551 	}
552 	mutex_unlock(&adpt_configuration_lock);
553 	if (pHba == NULL) {
554 		return 0;
555 	}
556 	host = pHba->host;
557 
558 	seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
559 	seq_printf(m, "%s\n", pHba->detail);
560 	seq_printf(m, "SCSI Host=scsi%d  Control Node=/dev/%s  irq=%d\n",
561 			pHba->host->host_no, pHba->name, host->irq);
562 	seq_printf(m, "\tpost fifo size  = %d\n\treply fifo size = %d\n\tsg table size   = %d\n\n",
563 			host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
564 
565 	seq_puts(m, "Devices:\n");
566 	for(chan = 0; chan < MAX_CHANNEL; chan++) {
567 		for(id = 0; id < MAX_ID; id++) {
568 			d = pHba->channel[chan].device[id];
569 			while(d) {
570 				seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
571 				seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
572 
573 				unit = d->pI2o_dev->lct_data.tid;
574 				seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu)  (%s)\n\n",
575 					       unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
576 					       scsi_device_online(d->pScsi_dev)? "online":"offline");
577 				d = d->next_lun;
578 			}
579 		}
580 	}
581 	return 0;
582 }
583 
584 /*
585  *	Turn a pointer to ioctl reply data into an u32 'context'
586  */
adpt_ioctl_to_context(adpt_hba * pHba,void * reply)587 static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
588 {
589 #if BITS_PER_LONG == 32
590 	return (u32)(unsigned long)reply;
591 #else
592 	ulong flags = 0;
593 	u32 nr, i;
594 
595 	spin_lock_irqsave(pHba->host->host_lock, flags);
596 	nr = ARRAY_SIZE(pHba->ioctl_reply_context);
597 	for (i = 0; i < nr; i++) {
598 		if (pHba->ioctl_reply_context[i] == NULL) {
599 			pHba->ioctl_reply_context[i] = reply;
600 			break;
601 		}
602 	}
603 	spin_unlock_irqrestore(pHba->host->host_lock, flags);
604 	if (i >= nr) {
605 		printk(KERN_WARNING"%s: Too many outstanding "
606 				"ioctl commands\n", pHba->name);
607 		return (u32)-1;
608 	}
609 
610 	return i;
611 #endif
612 }
613 
614 /*
615  *	Go from an u32 'context' to a pointer to ioctl reply data.
616  */
adpt_ioctl_from_context(adpt_hba * pHba,u32 context)617 static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
618 {
619 #if BITS_PER_LONG == 32
620 	return (void *)(unsigned long)context;
621 #else
622 	void *p = pHba->ioctl_reply_context[context];
623 	pHba->ioctl_reply_context[context] = NULL;
624 
625 	return p;
626 #endif
627 }
628 
629 /*===========================================================================
630  * Error Handling routines
631  *===========================================================================
632  */
633 
adpt_abort(struct scsi_cmnd * cmd)634 static int adpt_abort(struct scsi_cmnd * cmd)
635 {
636 	adpt_hba* pHba = NULL;	/* host bus adapter structure */
637 	struct adpt_device* dptdevice;	/* dpt per device information */
638 	u32 msg[5];
639 	int rcode;
640 
641 	pHba = (adpt_hba*) cmd->device->host->hostdata[0];
642 	printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
643 	if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
644 		printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
645 		return FAILED;
646 	}
647 
648 	memset(msg, 0, sizeof(msg));
649 	msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
650 	msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
651 	msg[2] = 0;
652 	msg[3]= 0;
653 	/* Add 1 to avoid firmware treating it as invalid command */
654 	msg[4] = scsi_cmd_to_rq(cmd)->tag + 1;
655 	if (pHba->host)
656 		spin_lock_irq(pHba->host->host_lock);
657 	rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
658 	if (pHba->host)
659 		spin_unlock_irq(pHba->host->host_lock);
660 	if (rcode != 0) {
661 		if(rcode == -EOPNOTSUPP ){
662 			printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
663 			return FAILED;
664 		}
665 		printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
666 		return FAILED;
667 	}
668 	printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
669 	return SUCCESS;
670 }
671 
672 
673 #define I2O_DEVICE_RESET 0x27
674 // This is the same for BLK and SCSI devices
675 // NOTE this is wrong in the i2o.h definitions
676 // This is not currently supported by our adapter but we issue it anyway
adpt_device_reset(struct scsi_cmnd * cmd)677 static int adpt_device_reset(struct scsi_cmnd* cmd)
678 {
679 	adpt_hba* pHba;
680 	u32 msg[4];
681 	u32 rcode;
682 	int old_state;
683 	struct adpt_device* d = cmd->device->hostdata;
684 
685 	pHba = (void*) cmd->device->host->hostdata[0];
686 	printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
687 	if (!d) {
688 		printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
689 		return FAILED;
690 	}
691 	memset(msg, 0, sizeof(msg));
692 	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
693 	msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
694 	msg[2] = 0;
695 	msg[3] = 0;
696 
697 	if (pHba->host)
698 		spin_lock_irq(pHba->host->host_lock);
699 	old_state = d->state;
700 	d->state |= DPTI_DEV_RESET;
701 	rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
702 	d->state = old_state;
703 	if (pHba->host)
704 		spin_unlock_irq(pHba->host->host_lock);
705 	if (rcode != 0) {
706 		if(rcode == -EOPNOTSUPP ){
707 			printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
708 			return FAILED;
709 		}
710 		printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
711 		return FAILED;
712 	} else {
713 		printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
714 		return SUCCESS;
715 	}
716 }
717 
718 
719 #define I2O_HBA_BUS_RESET 0x87
720 // This version of bus reset is called by the eh_error handler
adpt_bus_reset(struct scsi_cmnd * cmd)721 static int adpt_bus_reset(struct scsi_cmnd* cmd)
722 {
723 	adpt_hba* pHba;
724 	u32 msg[4];
725 	u32 rcode;
726 
727 	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
728 	memset(msg, 0, sizeof(msg));
729 	printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
730 	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
731 	msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
732 	msg[2] = 0;
733 	msg[3] = 0;
734 	if (pHba->host)
735 		spin_lock_irq(pHba->host->host_lock);
736 	rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
737 	if (pHba->host)
738 		spin_unlock_irq(pHba->host->host_lock);
739 	if (rcode != 0) {
740 		printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
741 		return FAILED;
742 	} else {
743 		printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
744 		return SUCCESS;
745 	}
746 }
747 
748 // This version of reset is called by the eh_error_handler
__adpt_reset(struct scsi_cmnd * cmd)749 static int __adpt_reset(struct scsi_cmnd* cmd)
750 {
751 	adpt_hba* pHba;
752 	int rcode;
753 	char name[32];
754 
755 	pHba = (adpt_hba*)cmd->device->host->hostdata[0];
756 	strncpy(name, pHba->name, sizeof(name));
757 	printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid);
758 	rcode =  adpt_hba_reset(pHba);
759 	if(rcode == 0){
760 		printk(KERN_WARNING"%s: HBA reset complete\n", name);
761 		return SUCCESS;
762 	} else {
763 		printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode);
764 		return FAILED;
765 	}
766 }
767 
adpt_reset(struct scsi_cmnd * cmd)768 static int adpt_reset(struct scsi_cmnd* cmd)
769 {
770 	int rc;
771 
772 	spin_lock_irq(cmd->device->host->host_lock);
773 	rc = __adpt_reset(cmd);
774 	spin_unlock_irq(cmd->device->host->host_lock);
775 
776 	return rc;
777 }
778 
779 // This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
adpt_hba_reset(adpt_hba * pHba)780 static int adpt_hba_reset(adpt_hba* pHba)
781 {
782 	int rcode;
783 
784 	pHba->state |= DPTI_STATE_RESET;
785 
786 	// Activate does get status , init outbound, and get hrt
787 	if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
788 		printk(KERN_ERR "%s: Could not activate\n", pHba->name);
789 		adpt_i2o_delete_hba(pHba);
790 		return rcode;
791 	}
792 
793 	if ((rcode=adpt_i2o_build_sys_table()) < 0) {
794 		adpt_i2o_delete_hba(pHba);
795 		return rcode;
796 	}
797 	PDEBUG("%s: in HOLD state\n",pHba->name);
798 
799 	if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
800 		adpt_i2o_delete_hba(pHba);
801 		return rcode;
802 	}
803 	PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
804 
805 	if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
806 		adpt_i2o_delete_hba(pHba);
807 		return rcode;
808 	}
809 
810 	if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
811 		adpt_i2o_delete_hba(pHba);
812 		return rcode;
813 	}
814 	pHba->state &= ~DPTI_STATE_RESET;
815 
816 	scsi_host_complete_all_commands(pHba->host, DID_RESET);
817 	return 0;	/* return success */
818 }
819 
820 /*===========================================================================
821  *
822  *===========================================================================
823  */
824 
825 
adpt_i2o_sys_shutdown(void)826 static void adpt_i2o_sys_shutdown(void)
827 {
828 	adpt_hba *pHba, *pNext;
829 	struct adpt_i2o_post_wait_data *p1, *old;
830 
831 	printk(KERN_INFO "Shutting down Adaptec I2O controllers.\n");
832 	printk(KERN_INFO "   This could take a few minutes if there are many devices attached\n");
833 	/* Delete all IOPs from the controller chain */
834 	/* They should have already been released by the
835 	 * scsi-core
836 	 */
837 	for (pHba = hba_chain; pHba; pHba = pNext) {
838 		pNext = pHba->next;
839 		adpt_i2o_delete_hba(pHba);
840 	}
841 
842 	/* Remove any timedout entries from the wait queue.  */
843 //	spin_lock_irqsave(&adpt_post_wait_lock, flags);
844 	/* Nothing should be outstanding at this point so just
845 	 * free them
846 	 */
847 	for(p1 = adpt_post_wait_queue; p1;) {
848 		old = p1;
849 		p1 = p1->next;
850 		kfree(old);
851 	}
852 //	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
853 	adpt_post_wait_queue = NULL;
854 
855 	printk(KERN_INFO "Adaptec I2O controllers down.\n");
856 }
857 
adpt_install_hba(struct scsi_host_template * sht,struct pci_dev * pDev)858 static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
859 {
860 
861 	adpt_hba* pHba = NULL;
862 	adpt_hba* p = NULL;
863 	ulong base_addr0_phys = 0;
864 	ulong base_addr1_phys = 0;
865 	u32 hba_map0_area_size = 0;
866 	u32 hba_map1_area_size = 0;
867 	void __iomem *base_addr_virt = NULL;
868 	void __iomem *msg_addr_virt = NULL;
869 	int dma64 = 0;
870 
871 	int raptorFlag = FALSE;
872 
873 	if(pci_enable_device(pDev)) {
874 		return -EINVAL;
875 	}
876 
877 	if (pci_request_regions(pDev, "dpt_i2o")) {
878 		PERROR("dpti: adpt_config_hba: pci request region failed\n");
879 		return -EINVAL;
880 	}
881 
882 	pci_set_master(pDev);
883 
884 	/*
885 	 *	See if we should enable dma64 mode.
886 	 */
887 	if (sizeof(dma_addr_t) > 4 &&
888 	    dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
889 	    dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
890 		dma64 = 1;
891 
892 	if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
893 		return -EINVAL;
894 
895 	/* adapter only supports message blocks below 4GB */
896 	dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
897 
898 	base_addr0_phys = pci_resource_start(pDev,0);
899 	hba_map0_area_size = pci_resource_len(pDev,0);
900 
901 	// Check if standard PCI card or single BAR Raptor
902 	if(pDev->device == PCI_DPT_DEVICE_ID){
903 		if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
904 			// Raptor card with this device id needs 4M
905 			hba_map0_area_size = 0x400000;
906 		} else { // Not Raptor - it is a PCI card
907 			if(hba_map0_area_size > 0x100000 ){
908 				hba_map0_area_size = 0x100000;
909 			}
910 		}
911 	} else {// Raptor split BAR config
912 		// Use BAR1 in this configuration
913 		base_addr1_phys = pci_resource_start(pDev,1);
914 		hba_map1_area_size = pci_resource_len(pDev,1);
915 		raptorFlag = TRUE;
916 	}
917 
918 #if BITS_PER_LONG == 64
919 	/*
920 	 *	The original Adaptec 64 bit driver has this comment here:
921 	 *	"x86_64 machines need more optimal mappings"
922 	 *
923 	 *	I assume some HBAs report ridiculously large mappings
924 	 *	and we need to limit them on platforms with IOMMUs.
925 	 */
926 	if (raptorFlag == TRUE) {
927 		if (hba_map0_area_size > 128)
928 			hba_map0_area_size = 128;
929 		if (hba_map1_area_size > 524288)
930 			hba_map1_area_size = 524288;
931 	} else {
932 		if (hba_map0_area_size > 524288)
933 			hba_map0_area_size = 524288;
934 	}
935 #endif
936 
937 	base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
938 	if (!base_addr_virt) {
939 		pci_release_regions(pDev);
940 		PERROR("dpti: adpt_config_hba: io remap failed\n");
941 		return -EINVAL;
942 	}
943 
944         if(raptorFlag == TRUE) {
945 		msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
946 		if (!msg_addr_virt) {
947 			PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
948 			iounmap(base_addr_virt);
949 			pci_release_regions(pDev);
950 			return -EINVAL;
951 		}
952 	} else {
953 		msg_addr_virt = base_addr_virt;
954 	}
955 
956 	// Allocate and zero the data structure
957 	pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
958 	if (!pHba) {
959 		if (msg_addr_virt != base_addr_virt)
960 			iounmap(msg_addr_virt);
961 		iounmap(base_addr_virt);
962 		pci_release_regions(pDev);
963 		return -ENOMEM;
964 	}
965 
966 	mutex_lock(&adpt_configuration_lock);
967 
968 	if(hba_chain != NULL){
969 		for(p = hba_chain; p->next; p = p->next);
970 		p->next = pHba;
971 	} else {
972 		hba_chain = pHba;
973 	}
974 	pHba->next = NULL;
975 	pHba->unit = hba_count;
976 	sprintf(pHba->name, "dpti%d", hba_count);
977 	hba_count++;
978 
979 	mutex_unlock(&adpt_configuration_lock);
980 
981 	pHba->pDev = pDev;
982 	pHba->base_addr_phys = base_addr0_phys;
983 
984 	// Set up the Virtual Base Address of the I2O Device
985 	pHba->base_addr_virt = base_addr_virt;
986 	pHba->msg_addr_virt = msg_addr_virt;
987 	pHba->irq_mask = base_addr_virt+0x30;
988 	pHba->post_port = base_addr_virt+0x40;
989 	pHba->reply_port = base_addr_virt+0x44;
990 
991 	pHba->hrt = NULL;
992 	pHba->lct = NULL;
993 	pHba->lct_size = 0;
994 	pHba->status_block = NULL;
995 	pHba->post_count = 0;
996 	pHba->state = DPTI_STATE_RESET;
997 	pHba->pDev = pDev;
998 	pHba->devices = NULL;
999 	pHba->dma64 = dma64;
1000 
1001 	// Initializing the spinlocks
1002 	spin_lock_init(&pHba->state_lock);
1003 	spin_lock_init(&adpt_post_wait_lock);
1004 
1005 	if(raptorFlag == 0){
1006 		printk(KERN_INFO "Adaptec I2O RAID controller"
1007 				 " %d at %p size=%x irq=%d%s\n",
1008 			hba_count-1, base_addr_virt,
1009 			hba_map0_area_size, pDev->irq,
1010 			dma64 ? " (64-bit DMA)" : "");
1011 	} else {
1012 		printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1013 			hba_count-1, pDev->irq,
1014 			dma64 ? " (64-bit DMA)" : "");
1015 		printk(KERN_INFO"     BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1016 		printk(KERN_INFO"     BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1017 	}
1018 
1019 	if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1020 		printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1021 		adpt_i2o_delete_hba(pHba);
1022 		return -EINVAL;
1023 	}
1024 
1025 	return 0;
1026 }
1027 
1028 
adpt_i2o_delete_hba(adpt_hba * pHba)1029 static void adpt_i2o_delete_hba(adpt_hba* pHba)
1030 {
1031 	adpt_hba* p1;
1032 	adpt_hba* p2;
1033 	struct i2o_device* d;
1034 	struct i2o_device* next;
1035 	int i;
1036 	int j;
1037 	struct adpt_device* pDev;
1038 	struct adpt_device* pNext;
1039 
1040 
1041 	mutex_lock(&adpt_configuration_lock);
1042 	if(pHba->host){
1043 		free_irq(pHba->host->irq, pHba);
1044 	}
1045 	p2 = NULL;
1046 	for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1047 		if(p1 == pHba) {
1048 			if(p2) {
1049 				p2->next = p1->next;
1050 			} else {
1051 				hba_chain = p1->next;
1052 			}
1053 			break;
1054 		}
1055 	}
1056 
1057 	hba_count--;
1058 	mutex_unlock(&adpt_configuration_lock);
1059 
1060 	iounmap(pHba->base_addr_virt);
1061 	pci_release_regions(pHba->pDev);
1062 	if(pHba->msg_addr_virt != pHba->base_addr_virt){
1063 		iounmap(pHba->msg_addr_virt);
1064 	}
1065 	if(pHba->FwDebugBuffer_P)
1066 	   	iounmap(pHba->FwDebugBuffer_P);
1067 	if(pHba->hrt) {
1068 		dma_free_coherent(&pHba->pDev->dev,
1069 			pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1070 			pHba->hrt, pHba->hrt_pa);
1071 	}
1072 	if(pHba->lct) {
1073 		dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1074 			pHba->lct, pHba->lct_pa);
1075 	}
1076 	if(pHba->status_block) {
1077 		dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1078 			pHba->status_block, pHba->status_block_pa);
1079 	}
1080 	if(pHba->reply_pool) {
1081 		dma_free_coherent(&pHba->pDev->dev,
1082 			pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1083 			pHba->reply_pool, pHba->reply_pool_pa);
1084 	}
1085 
1086 	for(d = pHba->devices; d ; d = next){
1087 		next = d->next;
1088 		kfree(d);
1089 	}
1090 	for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1091 		for(j = 0; j < MAX_ID; j++){
1092 			if(pHba->channel[i].device[j] != NULL){
1093 				for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1094 					pNext = pDev->next_lun;
1095 					kfree(pDev);
1096 				}
1097 			}
1098 		}
1099 	}
1100 	pci_dev_put(pHba->pDev);
1101 	if (adpt_sysfs_class)
1102 		device_destroy(adpt_sysfs_class,
1103 				MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1104 	kfree(pHba);
1105 
1106 	if(hba_count <= 0){
1107 		unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1108 		if (adpt_sysfs_class) {
1109 			class_destroy(adpt_sysfs_class);
1110 			adpt_sysfs_class = NULL;
1111 		}
1112 	}
1113 }
1114 
adpt_find_device(adpt_hba * pHba,u32 chan,u32 id,u64 lun)1115 static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1116 {
1117 	struct adpt_device* d;
1118 
1119 	if (chan >= MAX_CHANNEL)
1120 		return NULL;
1121 
1122 	d = pHba->channel[chan].device[id];
1123 	if(!d || d->tid == 0) {
1124 		return NULL;
1125 	}
1126 
1127 	/* If it is the only lun at that address then this should match*/
1128 	if(d->scsi_lun == lun){
1129 		return d;
1130 	}
1131 
1132 	/* else we need to look through all the luns */
1133 	for(d=d->next_lun ; d ; d = d->next_lun){
1134 		if(d->scsi_lun == lun){
1135 			return d;
1136 		}
1137 	}
1138 	return NULL;
1139 }
1140 
1141 
adpt_i2o_post_wait(adpt_hba * pHba,u32 * msg,int len,int timeout)1142 static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1143 {
1144 	// I used my own version of the WAIT_QUEUE_HEAD
1145 	// to handle some version differences
1146 	// When embedded in the kernel this could go back to the vanilla one
1147 	ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1148 	int status = 0;
1149 	ulong flags = 0;
1150 	struct adpt_i2o_post_wait_data *p1, *p2;
1151 	struct adpt_i2o_post_wait_data *wait_data =
1152 		kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1153 	DECLARE_WAITQUEUE(wait, current);
1154 
1155 	if (!wait_data)
1156 		return -ENOMEM;
1157 
1158 	/*
1159 	 * The spin locking is needed to keep anyone from playing
1160 	 * with the queue pointers and id while we do the same
1161 	 */
1162 	spin_lock_irqsave(&adpt_post_wait_lock, flags);
1163        // TODO we need a MORE unique way of getting ids
1164        // to support async LCT get
1165 	wait_data->next = adpt_post_wait_queue;
1166 	adpt_post_wait_queue = wait_data;
1167 	adpt_post_wait_id++;
1168 	adpt_post_wait_id &= 0x7fff;
1169 	wait_data->id =  adpt_post_wait_id;
1170 	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1171 
1172 	wait_data->wq = &adpt_wq_i2o_post;
1173 	wait_data->status = -ETIMEDOUT;
1174 
1175 	add_wait_queue(&adpt_wq_i2o_post, &wait);
1176 
1177 	msg[2] |= 0x80000000 | ((u32)wait_data->id);
1178 	timeout *= HZ;
1179 	if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1180 		set_current_state(TASK_INTERRUPTIBLE);
1181 		if(pHba->host)
1182 			spin_unlock_irq(pHba->host->host_lock);
1183 		if (!timeout)
1184 			schedule();
1185 		else{
1186 			timeout = schedule_timeout(timeout);
1187 			if (timeout == 0) {
1188 				// I/O issued, but cannot get result in
1189 				// specified time. Freeing resorces is
1190 				// dangerous.
1191 				status = -ETIME;
1192 			}
1193 		}
1194 		if(pHba->host)
1195 			spin_lock_irq(pHba->host->host_lock);
1196 	}
1197 	remove_wait_queue(&adpt_wq_i2o_post, &wait);
1198 
1199 	if(status == -ETIMEDOUT){
1200 		printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1201 		// We will have to free the wait_data memory during shutdown
1202 		return status;
1203 	}
1204 
1205 	/* Remove the entry from the queue.  */
1206 	p2 = NULL;
1207 	spin_lock_irqsave(&adpt_post_wait_lock, flags);
1208 	for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1209 		if(p1 == wait_data) {
1210 			if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1211 				status = -EOPNOTSUPP;
1212 			}
1213 			if(p2) {
1214 				p2->next = p1->next;
1215 			} else {
1216 				adpt_post_wait_queue = p1->next;
1217 			}
1218 			break;
1219 		}
1220 	}
1221 	spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1222 
1223 	kfree(wait_data);
1224 
1225 	return status;
1226 }
1227 
1228 
adpt_i2o_post_this(adpt_hba * pHba,u32 * data,int len)1229 static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1230 {
1231 
1232 	u32 m = EMPTY_QUEUE;
1233 	u32 __iomem *msg;
1234 	ulong timeout = jiffies + 30*HZ;
1235 	do {
1236 		rmb();
1237 		m = readl(pHba->post_port);
1238 		if (m != EMPTY_QUEUE) {
1239 			break;
1240 		}
1241 		if(time_after(jiffies,timeout)){
1242 			printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1243 			return -ETIMEDOUT;
1244 		}
1245 		schedule_timeout_uninterruptible(1);
1246 	} while(m == EMPTY_QUEUE);
1247 
1248 	msg = pHba->msg_addr_virt + m;
1249 	memcpy_toio(msg, data, len);
1250 	wmb();
1251 
1252 	//post message
1253 	writel(m, pHba->post_port);
1254 	wmb();
1255 
1256 	return 0;
1257 }
1258 
1259 
adpt_i2o_post_wait_complete(u32 context,int status)1260 static void adpt_i2o_post_wait_complete(u32 context, int status)
1261 {
1262 	struct adpt_i2o_post_wait_data *p1 = NULL;
1263 	/*
1264 	 * We need to search through the adpt_post_wait
1265 	 * queue to see if the given message is still
1266 	 * outstanding.  If not, it means that the IOP
1267 	 * took longer to respond to the message than we
1268 	 * had allowed and timer has already expired.
1269 	 * Not much we can do about that except log
1270 	 * it for debug purposes, increase timeout, and recompile
1271 	 *
1272 	 * Lock needed to keep anyone from moving queue pointers
1273 	 * around while we're looking through them.
1274 	 */
1275 
1276 	context &= 0x7fff;
1277 
1278 	spin_lock(&adpt_post_wait_lock);
1279 	for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1280 		if(p1->id == context) {
1281 			p1->status = status;
1282 			spin_unlock(&adpt_post_wait_lock);
1283 			wake_up_interruptible(p1->wq);
1284 			return;
1285 		}
1286 	}
1287 	spin_unlock(&adpt_post_wait_lock);
1288         // If this happens we lose commands that probably really completed
1289 	printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1290 	printk(KERN_DEBUG"      Tasks in wait queue:\n");
1291 	for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1292 		printk(KERN_DEBUG"           %d\n",p1->id);
1293 	}
1294 	return;
1295 }
1296 
adpt_i2o_reset_hba(adpt_hba * pHba)1297 static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1298 {
1299 	u32 msg[8];
1300 	u8* status;
1301 	dma_addr_t addr;
1302 	u32 m = EMPTY_QUEUE ;
1303 	ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1304 
1305 	if(pHba->initialized  == FALSE) {	// First time reset should be quick
1306 		timeout = jiffies + (25*HZ);
1307 	} else {
1308 		adpt_i2o_quiesce_hba(pHba);
1309 	}
1310 
1311 	do {
1312 		rmb();
1313 		m = readl(pHba->post_port);
1314 		if (m != EMPTY_QUEUE) {
1315 			break;
1316 		}
1317 		if(time_after(jiffies,timeout)){
1318 			printk(KERN_WARNING"Timeout waiting for message!\n");
1319 			return -ETIMEDOUT;
1320 		}
1321 		schedule_timeout_uninterruptible(1);
1322 	} while (m == EMPTY_QUEUE);
1323 
1324 	status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1325 	if(status == NULL) {
1326 		adpt_send_nop(pHba, m);
1327 		printk(KERN_ERR"IOP reset failed - no free memory.\n");
1328 		return -ENOMEM;
1329 	}
1330 
1331 	msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1332 	msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1333 	msg[2]=0;
1334 	msg[3]=0;
1335 	msg[4]=0;
1336 	msg[5]=0;
1337 	msg[6]=dma_low(addr);
1338 	msg[7]=dma_high(addr);
1339 
1340 	memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1341 	wmb();
1342 	writel(m, pHba->post_port);
1343 	wmb();
1344 
1345 	while(*status == 0){
1346 		if(time_after(jiffies,timeout)){
1347 			printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1348 			/* We lose 4 bytes of "status" here, but we cannot
1349 			   free these because controller may awake and corrupt
1350 			   those bytes at any time */
1351 			/* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1352 			return -ETIMEDOUT;
1353 		}
1354 		rmb();
1355 		schedule_timeout_uninterruptible(1);
1356 	}
1357 
1358 	if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1359 		PDEBUG("%s: Reset in progress...\n", pHba->name);
1360 		// Here we wait for message frame to become available
1361 		// indicated that reset has finished
1362 		do {
1363 			rmb();
1364 			m = readl(pHba->post_port);
1365 			if (m != EMPTY_QUEUE) {
1366 				break;
1367 			}
1368 			if(time_after(jiffies,timeout)){
1369 				printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1370 				/* We lose 4 bytes of "status" here, but we
1371 				   cannot free these because controller may
1372 				   awake and corrupt those bytes at any time */
1373 				/* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1374 				return -ETIMEDOUT;
1375 			}
1376 			schedule_timeout_uninterruptible(1);
1377 		} while (m == EMPTY_QUEUE);
1378 		// Flush the offset
1379 		adpt_send_nop(pHba, m);
1380 	}
1381 	adpt_i2o_status_get(pHba);
1382 	if(*status == 0x02 ||
1383 			pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1384 		printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1385 				pHba->name);
1386 	} else {
1387 		PDEBUG("%s: Reset completed.\n", pHba->name);
1388 	}
1389 
1390 	dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1391 #ifdef UARTDELAY
1392 	// This delay is to allow someone attached to the card through the debug UART to
1393 	// set up the dump levels that they want before the rest of the initialization sequence
1394 	adpt_delay(20000);
1395 #endif
1396 	return 0;
1397 }
1398 
1399 
adpt_i2o_parse_lct(adpt_hba * pHba)1400 static int adpt_i2o_parse_lct(adpt_hba* pHba)
1401 {
1402 	int i;
1403 	int max;
1404 	int tid;
1405 	struct i2o_device *d;
1406 	i2o_lct *lct = pHba->lct;
1407 	u8 bus_no = 0;
1408 	s16 scsi_id;
1409 	u64 scsi_lun;
1410 	u32 buf[10]; // larger than 7, or 8 ...
1411 	struct adpt_device* pDev;
1412 
1413 	if (lct == NULL) {
1414 		printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1415 		return -1;
1416 	}
1417 
1418 	max = lct->table_size;
1419 	max -= 3;
1420 	max /= 9;
1421 
1422 	for(i=0;i<max;i++) {
1423 		if( lct->lct_entry[i].user_tid != 0xfff){
1424 			/*
1425 			 * If we have hidden devices, we need to inform the upper layers about
1426 			 * the possible maximum id reference to handle device access when
1427 			 * an array is disassembled. This code has no other purpose but to
1428 			 * allow us future access to devices that are currently hidden
1429 			 * behind arrays, hotspares or have not been configured (JBOD mode).
1430 			 */
1431 			if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1432 			    lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1433 			    lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1434 			    	continue;
1435 			}
1436 			tid = lct->lct_entry[i].tid;
1437 			// I2O_DPT_DEVICE_INFO_GROUP_NO;
1438 			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1439 				continue;
1440 			}
1441 			bus_no = buf[0]>>16;
1442 			scsi_id = buf[1];
1443 			scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1444 			if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1445 				printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1446 				continue;
1447 			}
1448 			if (scsi_id >= MAX_ID){
1449 				printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1450 				continue;
1451 			}
1452 			if(bus_no > pHba->top_scsi_channel){
1453 				pHba->top_scsi_channel = bus_no;
1454 			}
1455 			if(scsi_id > pHba->top_scsi_id){
1456 				pHba->top_scsi_id = scsi_id;
1457 			}
1458 			if(scsi_lun > pHba->top_scsi_lun){
1459 				pHba->top_scsi_lun = scsi_lun;
1460 			}
1461 			continue;
1462 		}
1463 		d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1464 		if(d==NULL)
1465 		{
1466 			printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1467 			return -ENOMEM;
1468 		}
1469 
1470 		d->controller = pHba;
1471 		d->next = NULL;
1472 
1473 		memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1474 
1475 		d->flags = 0;
1476 		tid = d->lct_data.tid;
1477 		adpt_i2o_report_hba_unit(pHba, d);
1478 		adpt_i2o_install_device(pHba, d);
1479 	}
1480 	bus_no = 0;
1481 	for(d = pHba->devices; d ; d = d->next) {
1482 		if(d->lct_data.class_id  == I2O_CLASS_BUS_ADAPTER_PORT ||
1483 		   d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PORT){
1484 			tid = d->lct_data.tid;
1485 			// TODO get the bus_no from hrt-but for now they are in order
1486 			//bus_no =
1487 			if(bus_no > pHba->top_scsi_channel){
1488 				pHba->top_scsi_channel = bus_no;
1489 			}
1490 			pHba->channel[bus_no].type = d->lct_data.class_id;
1491 			pHba->channel[bus_no].tid = tid;
1492 			if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1493 			{
1494 				pHba->channel[bus_no].scsi_id = buf[1];
1495 				PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1496 			}
1497 			// TODO remove - this is just until we get from hrt
1498 			bus_no++;
1499 			if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1500 				printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1501 				break;
1502 			}
1503 		}
1504 	}
1505 
1506 	// Setup adpt_device table
1507 	for(d = pHba->devices; d ; d = d->next) {
1508 		if(d->lct_data.class_id  == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1509 		   d->lct_data.class_id  == I2O_CLASS_SCSI_PERIPHERAL ||
1510 		   d->lct_data.class_id  == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1511 
1512 			tid = d->lct_data.tid;
1513 			scsi_id = -1;
1514 			// I2O_DPT_DEVICE_INFO_GROUP_NO;
1515 			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1516 				bus_no = buf[0]>>16;
1517 				scsi_id = buf[1];
1518 				scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1519 				if(bus_no >= MAX_CHANNEL) {	// Something wrong skip it
1520 					continue;
1521 				}
1522 				if (scsi_id >= MAX_ID) {
1523 					continue;
1524 				}
1525 				if( pHba->channel[bus_no].device[scsi_id] == NULL){
1526 					pDev =  kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1527 					if(pDev == NULL) {
1528 						return -ENOMEM;
1529 					}
1530 					pHba->channel[bus_no].device[scsi_id] = pDev;
1531 				} else {
1532 					for( pDev = pHba->channel[bus_no].device[scsi_id];
1533 							pDev->next_lun; pDev = pDev->next_lun){
1534 					}
1535 					pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1536 					if(pDev->next_lun == NULL) {
1537 						return -ENOMEM;
1538 					}
1539 					pDev = pDev->next_lun;
1540 				}
1541 				pDev->tid = tid;
1542 				pDev->scsi_channel = bus_no;
1543 				pDev->scsi_id = scsi_id;
1544 				pDev->scsi_lun = scsi_lun;
1545 				pDev->pI2o_dev = d;
1546 				d->owner = pDev;
1547 				pDev->type = (buf[0])&0xff;
1548 				pDev->flags = (buf[0]>>8)&0xff;
1549 				if(scsi_id > pHba->top_scsi_id){
1550 					pHba->top_scsi_id = scsi_id;
1551 				}
1552 				if(scsi_lun > pHba->top_scsi_lun){
1553 					pHba->top_scsi_lun = scsi_lun;
1554 				}
1555 			}
1556 			if(scsi_id == -1){
1557 				printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1558 						d->lct_data.identity_tag);
1559 			}
1560 		}
1561 	}
1562 	return 0;
1563 }
1564 
1565 
1566 /*
1567  *	Each I2O controller has a chain of devices on it - these match
1568  *	the useful parts of the LCT of the board.
1569  */
1570 
adpt_i2o_install_device(adpt_hba * pHba,struct i2o_device * d)1571 static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1572 {
1573 	mutex_lock(&adpt_configuration_lock);
1574 	d->controller=pHba;
1575 	d->owner=NULL;
1576 	d->next=pHba->devices;
1577 	d->prev=NULL;
1578 	if (pHba->devices != NULL){
1579 		pHba->devices->prev=d;
1580 	}
1581 	pHba->devices=d;
1582 	*d->dev_name = 0;
1583 
1584 	mutex_unlock(&adpt_configuration_lock);
1585 	return 0;
1586 }
1587 
adpt_open(struct inode * inode,struct file * file)1588 static int adpt_open(struct inode *inode, struct file *file)
1589 {
1590 	int minor;
1591 	adpt_hba* pHba;
1592 
1593 	mutex_lock(&adpt_mutex);
1594 	//TODO check for root access
1595 	//
1596 	minor = iminor(inode);
1597 	if (minor >= hba_count) {
1598 		mutex_unlock(&adpt_mutex);
1599 		return -ENXIO;
1600 	}
1601 	mutex_lock(&adpt_configuration_lock);
1602 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1603 		if (pHba->unit == minor) {
1604 			break;	/* found adapter */
1605 		}
1606 	}
1607 	if (pHba == NULL) {
1608 		mutex_unlock(&adpt_configuration_lock);
1609 		mutex_unlock(&adpt_mutex);
1610 		return -ENXIO;
1611 	}
1612 
1613 //	if(pHba->in_use){
1614 	//	mutex_unlock(&adpt_configuration_lock);
1615 //		return -EBUSY;
1616 //	}
1617 
1618 	pHba->in_use = 1;
1619 	mutex_unlock(&adpt_configuration_lock);
1620 	mutex_unlock(&adpt_mutex);
1621 
1622 	return 0;
1623 }
1624 
adpt_close(struct inode * inode,struct file * file)1625 static int adpt_close(struct inode *inode, struct file *file)
1626 {
1627 	int minor;
1628 	adpt_hba* pHba;
1629 
1630 	minor = iminor(inode);
1631 	if (minor >= hba_count) {
1632 		return -ENXIO;
1633 	}
1634 	mutex_lock(&adpt_configuration_lock);
1635 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1636 		if (pHba->unit == minor) {
1637 			break;	/* found adapter */
1638 		}
1639 	}
1640 	mutex_unlock(&adpt_configuration_lock);
1641 	if (pHba == NULL) {
1642 		return -ENXIO;
1643 	}
1644 
1645 	pHba->in_use = 0;
1646 
1647 	return 0;
1648 }
1649 
1650 
adpt_i2o_passthru(adpt_hba * pHba,u32 __user * arg)1651 static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1652 {
1653 	u32 msg[MAX_MESSAGE_SIZE];
1654 	u32* reply = NULL;
1655 	u32 size = 0;
1656 	u32 reply_size = 0;
1657 	u32 __user *user_msg = arg;
1658 	u32 __user * user_reply = NULL;
1659 	void **sg_list = NULL;
1660 	u32 sg_offset = 0;
1661 	u32 sg_count = 0;
1662 	int sg_index = 0;
1663 	u32 i = 0;
1664 	u32 rcode = 0;
1665 	void *p = NULL;
1666 	dma_addr_t addr;
1667 	ulong flags = 0;
1668 
1669 	memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1670 	// get user msg size in u32s
1671 	if(get_user(size, &user_msg[0])){
1672 		return -EFAULT;
1673 	}
1674 	size = size>>16;
1675 
1676 	user_reply = &user_msg[size];
1677 	if(size > MAX_MESSAGE_SIZE){
1678 		return -EFAULT;
1679 	}
1680 	size *= 4; // Convert to bytes
1681 
1682 	/* Copy in the user's I2O command */
1683 	if(copy_from_user(msg, user_msg, size)) {
1684 		return -EFAULT;
1685 	}
1686 	get_user(reply_size, &user_reply[0]);
1687 	reply_size = reply_size>>16;
1688 	if(reply_size > REPLY_FRAME_SIZE){
1689 		reply_size = REPLY_FRAME_SIZE;
1690 	}
1691 	reply_size *= 4;
1692 	reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1693 	if(reply == NULL) {
1694 		printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1695 		return -ENOMEM;
1696 	}
1697 	sg_offset = (msg[0]>>4)&0xf;
1698 	msg[2] = 0x40000000; // IOCTL context
1699 	msg[3] = adpt_ioctl_to_context(pHba, reply);
1700 	if (msg[3] == (u32)-1) {
1701 		rcode = -EBUSY;
1702 		goto free;
1703 	}
1704 
1705 	sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL);
1706 	if (!sg_list) {
1707 		rcode = -ENOMEM;
1708 		goto free;
1709 	}
1710 	if(sg_offset) {
1711 		// TODO add 64 bit API
1712 		struct sg_simple_element *sg =  (struct sg_simple_element*) (msg+sg_offset);
1713 		sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1714 		if (sg_count > pHba->sg_tablesize){
1715 			printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1716 			rcode = -EINVAL;
1717 			goto free;
1718 		}
1719 
1720 		for(i = 0; i < sg_count; i++) {
1721 			int sg_size;
1722 
1723 			if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1724 				printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i,  sg[i].flag_count);
1725 				rcode = -EINVAL;
1726 				goto cleanup;
1727 			}
1728 			sg_size = sg[i].flag_count & 0xffffff;
1729 			/* Allocate memory for the transfer */
1730 			p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1731 			if(!p) {
1732 				printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1733 						pHba->name,sg_size,i,sg_count);
1734 				rcode = -ENOMEM;
1735 				goto cleanup;
1736 			}
1737 			sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1738 			/* Copy in the user's SG buffer if necessary */
1739 			if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1740 				// sg_simple_element API is 32 bit
1741 				if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1742 					printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1743 					rcode = -EFAULT;
1744 					goto cleanup;
1745 				}
1746 			}
1747 			/* sg_simple_element API is 32 bit, but addr < 4GB */
1748 			sg[i].addr_bus = addr;
1749 		}
1750 	}
1751 
1752 	do {
1753 		/*
1754 		 * Stop any new commands from enterring the
1755 		 * controller while processing the ioctl
1756 		 */
1757 		if (pHba->host) {
1758 			scsi_block_requests(pHba->host);
1759 			spin_lock_irqsave(pHba->host->host_lock, flags);
1760 		}
1761 		rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1762 		if (rcode != 0)
1763 			printk("adpt_i2o_passthru: post wait failed %d %p\n",
1764 					rcode, reply);
1765 		if (pHba->host) {
1766 			spin_unlock_irqrestore(pHba->host->host_lock, flags);
1767 			scsi_unblock_requests(pHba->host);
1768 		}
1769 	} while (rcode == -ETIMEDOUT);
1770 
1771 	if(rcode){
1772 		goto cleanup;
1773 	}
1774 
1775 	if(sg_offset) {
1776 	/* Copy back the Scatter Gather buffers back to user space */
1777 		u32 j;
1778 		// TODO add 64 bit API
1779 		struct sg_simple_element* sg;
1780 		int sg_size;
1781 
1782 		// re-acquire the original message to handle correctly the sg copy operation
1783 		memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1784 		// get user msg size in u32s
1785 		if(get_user(size, &user_msg[0])){
1786 			rcode = -EFAULT;
1787 			goto cleanup;
1788 		}
1789 		size = size>>16;
1790 		size *= 4;
1791 		if (size > MAX_MESSAGE_SIZE) {
1792 			rcode = -EINVAL;
1793 			goto cleanup;
1794 		}
1795 		/* Copy in the user's I2O command */
1796 		if (copy_from_user (msg, user_msg, size)) {
1797 			rcode = -EFAULT;
1798 			goto cleanup;
1799 		}
1800 		sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1801 
1802 		// TODO add 64 bit API
1803 		sg 	 = (struct sg_simple_element*)(msg + sg_offset);
1804 		for (j = 0; j < sg_count; j++) {
1805 			/* Copy out the SG list to user's buffer if necessary */
1806 			if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1807 				sg_size = sg[j].flag_count & 0xffffff;
1808 				// sg_simple_element API is 32 bit
1809 				if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1810 					printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1811 					rcode = -EFAULT;
1812 					goto cleanup;
1813 				}
1814 			}
1815 		}
1816 	}
1817 
1818 	/* Copy back the reply to user space */
1819 	if (reply_size) {
1820 		// we wrote our own values for context - now restore the user supplied ones
1821 		if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1822 			printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1823 			rcode = -EFAULT;
1824 		}
1825 		if(copy_to_user(user_reply, reply, reply_size)) {
1826 			printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1827 			rcode = -EFAULT;
1828 		}
1829 	}
1830 
1831 
1832 cleanup:
1833 	if (rcode != -ETIME && rcode != -EINTR) {
1834 		struct sg_simple_element *sg =
1835 				(struct sg_simple_element*) (msg +sg_offset);
1836 		while(sg_index) {
1837 			if(sg_list[--sg_index]) {
1838 				dma_free_coherent(&pHba->pDev->dev,
1839 					sg[sg_index].flag_count & 0xffffff,
1840 					sg_list[sg_index],
1841 					sg[sg_index].addr_bus);
1842 			}
1843 		}
1844 	}
1845 
1846 free:
1847 	kfree(sg_list);
1848 	kfree(reply);
1849 	return rcode;
1850 }
1851 
1852 #if defined __ia64__
adpt_ia64_info(sysInfo_S * si)1853 static void adpt_ia64_info(sysInfo_S* si)
1854 {
1855 	// This is all the info we need for now
1856 	// We will add more info as our new
1857 	// managmenent utility requires it
1858 	si->processorType = PROC_IA64;
1859 }
1860 #endif
1861 
1862 #if defined __sparc__
adpt_sparc_info(sysInfo_S * si)1863 static void adpt_sparc_info(sysInfo_S* si)
1864 {
1865 	// This is all the info we need for now
1866 	// We will add more info as our new
1867 	// managmenent utility requires it
1868 	si->processorType = PROC_ULTRASPARC;
1869 }
1870 #endif
1871 #if defined __alpha__
adpt_alpha_info(sysInfo_S * si)1872 static void adpt_alpha_info(sysInfo_S* si)
1873 {
1874 	// This is all the info we need for now
1875 	// We will add more info as our new
1876 	// managmenent utility requires it
1877 	si->processorType = PROC_ALPHA;
1878 }
1879 #endif
1880 
1881 #if defined __i386__
1882 
1883 #include <uapi/asm/vm86.h>
1884 
adpt_i386_info(sysInfo_S * si)1885 static void adpt_i386_info(sysInfo_S* si)
1886 {
1887 	// This is all the info we need for now
1888 	// We will add more info as our new
1889 	// managmenent utility requires it
1890 	switch (boot_cpu_data.x86) {
1891 	case CPU_386:
1892 		si->processorType = PROC_386;
1893 		break;
1894 	case CPU_486:
1895 		si->processorType = PROC_486;
1896 		break;
1897 	case CPU_586:
1898 		si->processorType = PROC_PENTIUM;
1899 		break;
1900 	default:  // Just in case
1901 		si->processorType = PROC_PENTIUM;
1902 		break;
1903 	}
1904 }
1905 #endif
1906 
1907 /*
1908  * This routine returns information about the system.  This does not effect
1909  * any logic and if the info is wrong - it doesn't matter.
1910  */
1911 
1912 /* Get all the info we can not get from kernel services */
adpt_system_info(void __user * buffer)1913 static int adpt_system_info(void __user *buffer)
1914 {
1915 	sysInfo_S si;
1916 
1917 	memset(&si, 0, sizeof(si));
1918 
1919 	si.osType = OS_LINUX;
1920 	si.osMajorVersion = 0;
1921 	si.osMinorVersion = 0;
1922 	si.osRevision = 0;
1923 	si.busType = SI_PCI_BUS;
1924 	si.processorFamily = DPTI_sig.dsProcessorFamily;
1925 
1926 #if defined __i386__
1927 	adpt_i386_info(&si);
1928 #elif defined (__ia64__)
1929 	adpt_ia64_info(&si);
1930 #elif defined(__sparc__)
1931 	adpt_sparc_info(&si);
1932 #elif defined (__alpha__)
1933 	adpt_alpha_info(&si);
1934 #else
1935 	si.processorType = 0xff ;
1936 #endif
1937 	if (copy_to_user(buffer, &si, sizeof(si))){
1938 		printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1939 		return -EFAULT;
1940 	}
1941 
1942 	return 0;
1943 }
1944 
adpt_ioctl(struct inode * inode,struct file * file,uint cmd,ulong arg)1945 static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1946 {
1947 	int minor;
1948 	int error = 0;
1949 	adpt_hba* pHba;
1950 	ulong flags = 0;
1951 	void __user *argp = (void __user *)arg;
1952 
1953 	minor = iminor(inode);
1954 	if (minor >= DPTI_MAX_HBA){
1955 		return -ENXIO;
1956 	}
1957 	mutex_lock(&adpt_configuration_lock);
1958 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
1959 		if (pHba->unit == minor) {
1960 			break;	/* found adapter */
1961 		}
1962 	}
1963 	mutex_unlock(&adpt_configuration_lock);
1964 	if(pHba == NULL){
1965 		return -ENXIO;
1966 	}
1967 
1968 	while((volatile u32) pHba->state & DPTI_STATE_RESET )
1969 		schedule_timeout_uninterruptible(2);
1970 
1971 	switch (cmd) {
1972 	// TODO: handle 3 cases
1973 	case DPT_SIGNATURE:
1974 		if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
1975 			return -EFAULT;
1976 		}
1977 		break;
1978 	case I2OUSRCMD:
1979 		return adpt_i2o_passthru(pHba, argp);
1980 
1981 	case DPT_CTRLINFO:{
1982 		drvrHBAinfo_S HbaInfo;
1983 
1984 #define FLG_OSD_PCI_VALID 0x0001
1985 #define FLG_OSD_DMA	  0x0002
1986 #define FLG_OSD_I2O	  0x0004
1987 		memset(&HbaInfo, 0, sizeof(HbaInfo));
1988 		HbaInfo.drvrHBAnum = pHba->unit;
1989 		HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
1990 		HbaInfo.blinkState = adpt_read_blink_led(pHba);
1991 		HbaInfo.pciBusNum =  pHba->pDev->bus->number;
1992 		HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
1993 		HbaInfo.Interrupt = pHba->pDev->irq;
1994 		HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
1995 		if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
1996 			printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
1997 			return -EFAULT;
1998 		}
1999 		break;
2000 		}
2001 	case DPT_SYSINFO:
2002 		return adpt_system_info(argp);
2003 	case DPT_BLINKLED:{
2004 		u32 value;
2005 		value = (u32)adpt_read_blink_led(pHba);
2006 		if (copy_to_user(argp, &value, sizeof(value))) {
2007 			return -EFAULT;
2008 		}
2009 		break;
2010 		}
2011 	case I2ORESETCMD: {
2012 		struct Scsi_Host *shost = pHba->host;
2013 
2014 		if (shost)
2015 			spin_lock_irqsave(shost->host_lock, flags);
2016 		adpt_hba_reset(pHba);
2017 		if (shost)
2018 			spin_unlock_irqrestore(shost->host_lock, flags);
2019 		break;
2020 	}
2021 	case I2ORESCANCMD:
2022 		adpt_rescan(pHba);
2023 		break;
2024 	default:
2025 		return -EINVAL;
2026 	}
2027 
2028 	return error;
2029 }
2030 
adpt_unlocked_ioctl(struct file * file,uint cmd,ulong arg)2031 static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2032 {
2033 	struct inode *inode;
2034 	long ret;
2035 
2036 	inode = file_inode(file);
2037 
2038 	mutex_lock(&adpt_mutex);
2039 	ret = adpt_ioctl(inode, file, cmd, arg);
2040 	mutex_unlock(&adpt_mutex);
2041 
2042 	return ret;
2043 }
2044 
2045 #ifdef CONFIG_COMPAT
compat_adpt_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2046 static long compat_adpt_ioctl(struct file *file,
2047 				unsigned int cmd, unsigned long arg)
2048 {
2049 	struct inode *inode;
2050 	long ret;
2051 
2052 	inode = file_inode(file);
2053 
2054 	mutex_lock(&adpt_mutex);
2055 
2056 	switch(cmd) {
2057 		case DPT_SIGNATURE:
2058 		case I2OUSRCMD:
2059 		case DPT_CTRLINFO:
2060 		case DPT_SYSINFO:
2061 		case DPT_BLINKLED:
2062 		case I2ORESETCMD:
2063 		case I2ORESCANCMD:
2064 		case (DPT_TARGET_BUSY & 0xFFFF):
2065 		case DPT_TARGET_BUSY:
2066 			ret = adpt_ioctl(inode, file, cmd, arg);
2067 			break;
2068 		default:
2069 			ret =  -ENOIOCTLCMD;
2070 	}
2071 
2072 	mutex_unlock(&adpt_mutex);
2073 
2074 	return ret;
2075 }
2076 #endif
2077 
adpt_isr(int irq,void * dev_id)2078 static irqreturn_t adpt_isr(int irq, void *dev_id)
2079 {
2080 	struct scsi_cmnd* cmd;
2081 	adpt_hba* pHba = dev_id;
2082 	u32 m;
2083 	void __iomem *reply;
2084 	u32 status=0;
2085 	u32 context;
2086 	ulong flags = 0;
2087 	int handled = 0;
2088 
2089 	if (pHba == NULL){
2090 		printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2091 		return IRQ_NONE;
2092 	}
2093 	if(pHba->host)
2094 		spin_lock_irqsave(pHba->host->host_lock, flags);
2095 
2096 	while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2097 		m = readl(pHba->reply_port);
2098 		if(m == EMPTY_QUEUE){
2099 			// Try twice then give up
2100 			rmb();
2101 			m = readl(pHba->reply_port);
2102 			if(m == EMPTY_QUEUE){
2103 				// This really should not happen
2104 				printk(KERN_ERR"dpti: Could not get reply frame\n");
2105 				goto out;
2106 			}
2107 		}
2108 		if (pHba->reply_pool_pa <= m &&
2109 		    m < pHba->reply_pool_pa +
2110 			(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2111 			reply = (u8 *)pHba->reply_pool +
2112 						(m - pHba->reply_pool_pa);
2113 		} else {
2114 			/* Ick, we should *never* be here */
2115 			printk(KERN_ERR "dpti: reply frame not from pool\n");
2116 			reply = (u8 *)bus_to_virt(m);
2117 		}
2118 
2119 		if (readl(reply) & MSG_FAIL) {
2120 			u32 old_m = readl(reply+28);
2121 			void __iomem *msg;
2122 			u32 old_context;
2123 			PDEBUG("%s: Failed message\n",pHba->name);
2124 			if(old_m >= 0x100000){
2125 				printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2126 				writel(m,pHba->reply_port);
2127 				continue;
2128 			}
2129 			// Transaction context is 0 in failed reply frame
2130 			msg = pHba->msg_addr_virt + old_m;
2131 			old_context = readl(msg+12);
2132 			writel(old_context, reply+12);
2133 			adpt_send_nop(pHba, old_m);
2134 		}
2135 		context = readl(reply+8);
2136 		if(context & 0x40000000){ // IOCTL
2137 			void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2138 			if( p != NULL) {
2139 				memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2140 			}
2141 			// All IOCTLs will also be post wait
2142 		}
2143 		if(context & 0x80000000){ // Post wait message
2144 			status = readl(reply+16);
2145 			if(status  >> 24){
2146 				status &=  0xffff; /* Get detail status */
2147 			} else {
2148 				status = I2O_POST_WAIT_OK;
2149 			}
2150 			if(!(context & 0x40000000)) {
2151 				/*
2152 				 * The request tag is one less than the command tag
2153 				 * as the firmware might treat a 0 tag as invalid
2154 				 */
2155 				cmd = scsi_host_find_tag(pHba->host,
2156 							 readl(reply + 12) - 1);
2157 				if(cmd != NULL) {
2158 					printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2159 				}
2160 			}
2161 			adpt_i2o_post_wait_complete(context, status);
2162 		} else { // SCSI message
2163 			/*
2164 			 * The request tag is one less than the command tag
2165 			 * as the firmware might treat a 0 tag as invalid
2166 			 */
2167 			cmd = scsi_host_find_tag(pHba->host,
2168 						 readl(reply + 12) - 1);
2169 			if(cmd != NULL){
2170 				scsi_dma_unmap(cmd);
2171 				adpt_i2o_scsi_complete(reply, cmd);
2172 			}
2173 		}
2174 		writel(m, pHba->reply_port);
2175 		wmb();
2176 		rmb();
2177 	}
2178 	handled = 1;
2179 out:	if(pHba->host)
2180 		spin_unlock_irqrestore(pHba->host->host_lock, flags);
2181 	return IRQ_RETVAL(handled);
2182 }
2183 
adpt_scsi_to_i2o(adpt_hba * pHba,struct scsi_cmnd * cmd,struct adpt_device * d)2184 static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2185 {
2186 	int i;
2187 	u32 msg[MAX_MESSAGE_SIZE];
2188 	u32* mptr;
2189 	u32* lptr;
2190 	u32 *lenptr;
2191 	int direction;
2192 	int scsidir;
2193 	int nseg;
2194 	u32 len;
2195 	u32 reqlen;
2196 	s32 rcode;
2197 	dma_addr_t addr;
2198 
2199 	memset(msg, 0 , sizeof(msg));
2200 	len = scsi_bufflen(cmd);
2201 	direction = 0x00000000;
2202 
2203 	scsidir = 0x00000000;			// DATA NO XFER
2204 	if(len) {
2205 		/*
2206 		 * Set SCBFlags to indicate if data is being transferred
2207 		 * in or out, or no data transfer
2208 		 * Note:  Do not have to verify index is less than 0 since
2209 		 * cmd->cmnd[0] is an unsigned char
2210 		 */
2211 		switch(cmd->sc_data_direction){
2212 		case DMA_FROM_DEVICE:
2213 			scsidir  =0x40000000;	// DATA IN  (iop<--dev)
2214 			break;
2215 		case DMA_TO_DEVICE:
2216 			direction=0x04000000;	// SGL OUT
2217 			scsidir  =0x80000000;	// DATA OUT (iop-->dev)
2218 			break;
2219 		case DMA_NONE:
2220 			break;
2221 		case DMA_BIDIRECTIONAL:
2222 			scsidir  =0x40000000;	// DATA IN  (iop<--dev)
2223 			// Assume In - and continue;
2224 			break;
2225 		default:
2226 			printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2227 			     pHba->name, cmd->cmnd[0]);
2228 			cmd->result = (DID_ERROR <<16);
2229 			scsi_done(cmd);
2230 			return 	0;
2231 		}
2232 	}
2233 	// msg[0] is set later
2234 	// I2O_CMD_SCSI_EXEC
2235 	msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2236 	msg[2] = 0;
2237 	/* Add 1 to avoid firmware treating it as invalid command */
2238 	msg[3] = scsi_cmd_to_rq(cmd)->tag + 1;
2239 	// Our cards use the transaction context as the tag for queueing
2240 	// Adaptec/DPT Private stuff
2241 	msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2242 	msg[5] = d->tid;
2243 	/* Direction, disconnect ok | sense data | simple queue , CDBLen */
2244 	// I2O_SCB_FLAG_ENABLE_DISCONNECT |
2245 	// I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2246 	// I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2247 	msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2248 
2249 	mptr=msg+7;
2250 
2251 	// Write SCSI command into the message - always 16 byte block
2252 	memset(mptr, 0,  16);
2253 	memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2254 	mptr+=4;
2255 	lenptr=mptr++;		/* Remember me - fill in when we know */
2256 	if (dpt_dma64(pHba)) {
2257 		reqlen = 16;		// SINGLE SGE
2258 		*mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2259 		*mptr++ = 1 << PAGE_SHIFT;
2260 	} else {
2261 		reqlen = 14;		// SINGLE SGE
2262 	}
2263 	/* Now fill in the SGList and command */
2264 
2265 	nseg = scsi_dma_map(cmd);
2266 	BUG_ON(nseg < 0);
2267 	if (nseg) {
2268 		struct scatterlist *sg;
2269 
2270 		len = 0;
2271 		scsi_for_each_sg(cmd, sg, nseg, i) {
2272 			lptr = mptr;
2273 			*mptr++ = direction|0x10000000|sg_dma_len(sg);
2274 			len+=sg_dma_len(sg);
2275 			addr = sg_dma_address(sg);
2276 			*mptr++ = dma_low(addr);
2277 			if (dpt_dma64(pHba))
2278 				*mptr++ = dma_high(addr);
2279 			/* Make this an end of list */
2280 			if (i == nseg - 1)
2281 				*lptr = direction|0xD0000000|sg_dma_len(sg);
2282 		}
2283 		reqlen = mptr - msg;
2284 		*lenptr = len;
2285 
2286 		if(cmd->underflow && len != cmd->underflow){
2287 			printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2288 				len, cmd->underflow);
2289 		}
2290 	} else {
2291 		*lenptr = len = 0;
2292 		reqlen = 12;
2293 	}
2294 
2295 	/* Stick the headers on */
2296 	msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2297 
2298 	// Send it on it's way
2299 	rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2300 	if (rcode == 0) {
2301 		return 0;
2302 	}
2303 	return rcode;
2304 }
2305 
2306 
adpt_scsi_host_alloc(adpt_hba * pHba,struct scsi_host_template * sht)2307 static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2308 {
2309 	struct Scsi_Host *host;
2310 
2311 	host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2312 	if (host == NULL) {
2313 		printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2314 		return -1;
2315 	}
2316 	host->hostdata[0] = (unsigned long)pHba;
2317 	pHba->host = host;
2318 
2319 	host->irq = pHba->pDev->irq;
2320 	/* no IO ports, so don't have to set host->io_port and
2321 	 * host->n_io_port
2322 	 */
2323 	host->io_port = 0;
2324 	host->n_io_port = 0;
2325 				/* see comments in scsi_host.h */
2326 	host->max_id = 16;
2327 	host->max_lun = 256;
2328 	host->max_channel = pHba->top_scsi_channel + 1;
2329 	host->cmd_per_lun = 1;
2330 	host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2331 	host->sg_tablesize = pHba->sg_tablesize;
2332 	host->can_queue = pHba->post_fifo_size;
2333 
2334 	return 0;
2335 }
2336 
2337 
adpt_i2o_scsi_complete(void __iomem * reply,struct scsi_cmnd * cmd)2338 static void adpt_i2o_scsi_complete(void __iomem *reply, struct scsi_cmnd *cmd)
2339 {
2340 	adpt_hba* pHba;
2341 	u32 hba_status;
2342 	u32 dev_status;
2343 	u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2344 	// I know this would look cleaner if I just read bytes
2345 	// but the model I have been using for all the rest of the
2346 	// io is in 4 byte words - so I keep that model
2347 	u16 detailed_status = readl(reply+16) &0xffff;
2348 	dev_status = (detailed_status & 0xff);
2349 	hba_status = detailed_status >> 8;
2350 
2351 	// calculate resid for sg
2352 	scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2353 
2354 	pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2355 
2356 	cmd->sense_buffer[0] = '\0';  // initialize sense valid flag to false
2357 
2358 	if(!(reply_flags & MSG_FAIL)) {
2359 		switch(detailed_status & I2O_SCSI_DSC_MASK) {
2360 		case I2O_SCSI_DSC_SUCCESS:
2361 			cmd->result = (DID_OK << 16);
2362 			// handle underflow
2363 			if (readl(reply+20) < cmd->underflow) {
2364 				cmd->result = (DID_ERROR <<16);
2365 				printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2366 			}
2367 			break;
2368 		case I2O_SCSI_DSC_REQUEST_ABORTED:
2369 			cmd->result = (DID_ABORT << 16);
2370 			break;
2371 		case I2O_SCSI_DSC_PATH_INVALID:
2372 		case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2373 		case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2374 		case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2375 		case I2O_SCSI_DSC_NO_ADAPTER:
2376 		case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2377 			printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2378 				pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2379 			cmd->result = (DID_TIME_OUT << 16);
2380 			break;
2381 		case I2O_SCSI_DSC_ADAPTER_BUSY:
2382 		case I2O_SCSI_DSC_BUS_BUSY:
2383 			cmd->result = (DID_BUS_BUSY << 16);
2384 			break;
2385 		case I2O_SCSI_DSC_SCSI_BUS_RESET:
2386 		case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2387 			cmd->result = (DID_RESET << 16);
2388 			break;
2389 		case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2390 			printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2391 			cmd->result = (DID_PARITY << 16);
2392 			break;
2393 		case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2394 		case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2395 		case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2396 		case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2397 		case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2398 		case I2O_SCSI_DSC_DATA_OVERRUN:
2399 		case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2400 		case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2401 		case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2402 		case I2O_SCSI_DSC_PROVIDE_FAILURE:
2403 		case I2O_SCSI_DSC_REQUEST_TERMINATED:
2404 		case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2405 		case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2406 		case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2407 		case I2O_SCSI_DSC_INVALID_CDB:
2408 		case I2O_SCSI_DSC_LUN_INVALID:
2409 		case I2O_SCSI_DSC_SCSI_TID_INVALID:
2410 		case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2411 		case I2O_SCSI_DSC_NO_NEXUS:
2412 		case I2O_SCSI_DSC_CDB_RECEIVED:
2413 		case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2414 		case I2O_SCSI_DSC_QUEUE_FROZEN:
2415 		case I2O_SCSI_DSC_REQUEST_INVALID:
2416 		default:
2417 			printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2418 				pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2419 			       hba_status, dev_status, cmd->cmnd[0]);
2420 			cmd->result = (DID_ERROR << 16);
2421 			break;
2422 		}
2423 
2424 		// copy over the request sense data if it was a check
2425 		// condition status
2426 		if (dev_status == SAM_STAT_CHECK_CONDITION) {
2427 			u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2428 			// Copy over the sense data
2429 			memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2430 			if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2431 			   cmd->sense_buffer[2] == DATA_PROTECT ){
2432 				/* This is to handle an array failed */
2433 				cmd->result = (DID_TIME_OUT << 16);
2434 				printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2435 					pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2436 					hba_status, dev_status, cmd->cmnd[0]);
2437 
2438 			}
2439 		}
2440 	} else {
2441 		/* In this condtion we could not talk to the tid
2442 		 * the card rejected it.  We should signal a retry
2443 		 * for a limitted number of retries.
2444 		 */
2445 		cmd->result = (DID_TIME_OUT << 16);
2446 		printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2447 			pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2448 			((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2449 	}
2450 
2451 	cmd->result |= (dev_status);
2452 
2453 	scsi_done(cmd);
2454 }
2455 
2456 
adpt_rescan(adpt_hba * pHba)2457 static s32 adpt_rescan(adpt_hba* pHba)
2458 {
2459 	s32 rcode;
2460 	ulong flags = 0;
2461 
2462 	if(pHba->host)
2463 		spin_lock_irqsave(pHba->host->host_lock, flags);
2464 	if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2465 		goto out;
2466 	if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2467 		goto out;
2468 	rcode = 0;
2469 out:	if(pHba->host)
2470 		spin_unlock_irqrestore(pHba->host->host_lock, flags);
2471 	return rcode;
2472 }
2473 
2474 
adpt_i2o_reparse_lct(adpt_hba * pHba)2475 static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2476 {
2477 	int i;
2478 	int max;
2479 	int tid;
2480 	struct i2o_device *d;
2481 	i2o_lct *lct = pHba->lct;
2482 	u8 bus_no = 0;
2483 	s16 scsi_id;
2484 	u64 scsi_lun;
2485 	u32 buf[10]; // at least 8 u32's
2486 	struct adpt_device* pDev = NULL;
2487 	struct i2o_device* pI2o_dev = NULL;
2488 
2489 	if (lct == NULL) {
2490 		printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2491 		return -1;
2492 	}
2493 
2494 	max = lct->table_size;
2495 	max -= 3;
2496 	max /= 9;
2497 
2498 	// Mark each drive as unscanned
2499 	for (d = pHba->devices; d; d = d->next) {
2500 		pDev =(struct adpt_device*) d->owner;
2501 		if(!pDev){
2502 			continue;
2503 		}
2504 		pDev->state |= DPTI_DEV_UNSCANNED;
2505 	}
2506 
2507 	printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2508 
2509 	for(i=0;i<max;i++) {
2510 		if( lct->lct_entry[i].user_tid != 0xfff){
2511 			continue;
2512 		}
2513 
2514 		if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2515 		    lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2516 		    lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2517 			tid = lct->lct_entry[i].tid;
2518 			if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2519 				printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2520 				continue;
2521 			}
2522 			bus_no = buf[0]>>16;
2523 			if (bus_no >= MAX_CHANNEL) {	/* Something wrong skip it */
2524 				printk(KERN_WARNING
2525 					"%s: Channel number %d out of range\n",
2526 					pHba->name, bus_no);
2527 				continue;
2528 			}
2529 
2530 			scsi_id = buf[1];
2531 			scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
2532 			pDev = pHba->channel[bus_no].device[scsi_id];
2533 			/* da lun */
2534 			while(pDev) {
2535 				if(pDev->scsi_lun == scsi_lun) {
2536 					break;
2537 				}
2538 				pDev = pDev->next_lun;
2539 			}
2540 			if(!pDev ) { // Something new add it
2541 				d = kmalloc(sizeof(struct i2o_device),
2542 					    GFP_ATOMIC);
2543 				if(d==NULL)
2544 				{
2545 					printk(KERN_CRIT "Out of memory for I2O device data.\n");
2546 					return -ENOMEM;
2547 				}
2548 
2549 				d->controller = pHba;
2550 				d->next = NULL;
2551 
2552 				memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2553 
2554 				d->flags = 0;
2555 				adpt_i2o_report_hba_unit(pHba, d);
2556 				adpt_i2o_install_device(pHba, d);
2557 
2558 				pDev = pHba->channel[bus_no].device[scsi_id];
2559 				if( pDev == NULL){
2560 					pDev =
2561 					  kzalloc(sizeof(struct adpt_device),
2562 						  GFP_ATOMIC);
2563 					if(pDev == NULL) {
2564 						return -ENOMEM;
2565 					}
2566 					pHba->channel[bus_no].device[scsi_id] = pDev;
2567 				} else {
2568 					while (pDev->next_lun) {
2569 						pDev = pDev->next_lun;
2570 					}
2571 					pDev = pDev->next_lun =
2572 					  kzalloc(sizeof(struct adpt_device),
2573 						  GFP_ATOMIC);
2574 					if(pDev == NULL) {
2575 						return -ENOMEM;
2576 					}
2577 				}
2578 				pDev->tid = d->lct_data.tid;
2579 				pDev->scsi_channel = bus_no;
2580 				pDev->scsi_id = scsi_id;
2581 				pDev->scsi_lun = scsi_lun;
2582 				pDev->pI2o_dev = d;
2583 				d->owner = pDev;
2584 				pDev->type = (buf[0])&0xff;
2585 				pDev->flags = (buf[0]>>8)&0xff;
2586 				// Too late, SCSI system has made up it's mind, but what the hey ...
2587 				if(scsi_id > pHba->top_scsi_id){
2588 					pHba->top_scsi_id = scsi_id;
2589 				}
2590 				if(scsi_lun > pHba->top_scsi_lun){
2591 					pHba->top_scsi_lun = scsi_lun;
2592 				}
2593 				continue;
2594 			} // end of new i2o device
2595 
2596 			// We found an old device - check it
2597 			while(pDev) {
2598 				if(pDev->scsi_lun == scsi_lun) {
2599 					if(!scsi_device_online(pDev->pScsi_dev)) {
2600 						printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
2601 								pHba->name,bus_no,scsi_id,scsi_lun);
2602 						if (pDev->pScsi_dev) {
2603 							scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2604 						}
2605 					}
2606 					d = pDev->pI2o_dev;
2607 					if(d->lct_data.tid != tid) { // something changed
2608 						pDev->tid = tid;
2609 						memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2610 						if (pDev->pScsi_dev) {
2611 							pDev->pScsi_dev->changed = TRUE;
2612 							pDev->pScsi_dev->removable = TRUE;
2613 						}
2614 					}
2615 					// Found it - mark it scanned
2616 					pDev->state = DPTI_DEV_ONLINE;
2617 					break;
2618 				}
2619 				pDev = pDev->next_lun;
2620 			}
2621 		}
2622 	}
2623 	for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2624 		pDev =(struct adpt_device*) pI2o_dev->owner;
2625 		if(!pDev){
2626 			continue;
2627 		}
2628 		// Drive offline drives that previously existed but could not be found
2629 		// in the LCT table
2630 		if (pDev->state & DPTI_DEV_UNSCANNED){
2631 			pDev->state = DPTI_DEV_OFFLINE;
2632 			printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2633 			if (pDev->pScsi_dev) {
2634 				scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2635 			}
2636 		}
2637 	}
2638 	return 0;
2639 }
2640 
2641 /*============================================================================
2642  *  Routines from i2o subsystem
2643  *============================================================================
2644  */
2645 
2646 
2647 
2648 /*
2649  *	Bring an I2O controller into HOLD state. See the spec.
2650  */
adpt_i2o_activate_hba(adpt_hba * pHba)2651 static int adpt_i2o_activate_hba(adpt_hba* pHba)
2652 {
2653 	int rcode;
2654 
2655 	if(pHba->initialized ) {
2656 		if (adpt_i2o_status_get(pHba) < 0) {
2657 			if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2658 				printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2659 				return rcode;
2660 			}
2661 			if (adpt_i2o_status_get(pHba) < 0) {
2662 				printk(KERN_INFO "HBA not responding.\n");
2663 				return -1;
2664 			}
2665 		}
2666 
2667 		if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2668 			printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2669 			return -1;
2670 		}
2671 
2672 		if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2673 		    pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2674 		    pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2675 		    pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2676 			adpt_i2o_reset_hba(pHba);
2677 			if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2678 				printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2679 				return -1;
2680 			}
2681 		}
2682 	} else {
2683 		if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2684 			printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2685 			return rcode;
2686 		}
2687 
2688 	}
2689 
2690 	if (adpt_i2o_init_outbound_q(pHba) < 0) {
2691 		return -1;
2692 	}
2693 
2694 	/* In HOLD state */
2695 
2696 	if (adpt_i2o_hrt_get(pHba) < 0) {
2697 		return -1;
2698 	}
2699 
2700 	return 0;
2701 }
2702 
2703 /*
2704  *	Bring a controller online into OPERATIONAL state.
2705  */
2706 
adpt_i2o_online_hba(adpt_hba * pHba)2707 static int adpt_i2o_online_hba(adpt_hba* pHba)
2708 {
2709 	if (adpt_i2o_systab_send(pHba) < 0)
2710 		return -1;
2711 	/* In READY state */
2712 
2713 	if (adpt_i2o_enable_hba(pHba) < 0)
2714 		return -1;
2715 
2716 	/* In OPERATIONAL state  */
2717 	return 0;
2718 }
2719 
adpt_send_nop(adpt_hba * pHba,u32 m)2720 static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2721 {
2722 	u32 __iomem *msg;
2723 	ulong timeout = jiffies + 5*HZ;
2724 
2725 	while(m == EMPTY_QUEUE){
2726 		rmb();
2727 		m = readl(pHba->post_port);
2728 		if(m != EMPTY_QUEUE){
2729 			break;
2730 		}
2731 		if(time_after(jiffies,timeout)){
2732 			printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2733 			return 2;
2734 		}
2735 		schedule_timeout_uninterruptible(1);
2736 	}
2737 	msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2738 	writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2739 	writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2740 	writel( 0,&msg[2]);
2741 	wmb();
2742 
2743 	writel(m, pHba->post_port);
2744 	wmb();
2745 	return 0;
2746 }
2747 
adpt_i2o_init_outbound_q(adpt_hba * pHba)2748 static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2749 {
2750 	u8 *status;
2751 	dma_addr_t addr;
2752 	u32 __iomem *msg = NULL;
2753 	int i;
2754 	ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2755 	u32 m;
2756 
2757 	do {
2758 		rmb();
2759 		m = readl(pHba->post_port);
2760 		if (m != EMPTY_QUEUE) {
2761 			break;
2762 		}
2763 
2764 		if(time_after(jiffies,timeout)){
2765 			printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2766 			return -ETIMEDOUT;
2767 		}
2768 		schedule_timeout_uninterruptible(1);
2769 	} while(m == EMPTY_QUEUE);
2770 
2771 	msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2772 
2773 	status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2774 	if (!status) {
2775 		adpt_send_nop(pHba, m);
2776 		printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2777 			pHba->name);
2778 		return -ENOMEM;
2779 	}
2780 
2781 	writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2782 	writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2783 	writel(0, &msg[2]);
2784 	writel(0x0106, &msg[3]);	/* Transaction context */
2785 	writel(4096, &msg[4]);		/* Host page frame size */
2786 	writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]);	/* Outbound msg frame size and Initcode */
2787 	writel(0xD0000004, &msg[6]);		/* Simple SG LE, EOB */
2788 	writel((u32)addr, &msg[7]);
2789 
2790 	writel(m, pHba->post_port);
2791 	wmb();
2792 
2793 	// Wait for the reply status to come back
2794 	do {
2795 		if (*status) {
2796 			if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2797 				break;
2798 			}
2799 		}
2800 		rmb();
2801 		if(time_after(jiffies,timeout)){
2802 			printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2803 			/* We lose 4 bytes of "status" here, but we
2804 			   cannot free these because controller may
2805 			   awake and corrupt those bytes at any time */
2806 			/* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2807 			return -ETIMEDOUT;
2808 		}
2809 		schedule_timeout_uninterruptible(1);
2810 	} while (1);
2811 
2812 	// If the command was successful, fill the fifo with our reply
2813 	// message packets
2814 	if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2815 		dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2816 		return -2;
2817 	}
2818 	dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2819 
2820 	if(pHba->reply_pool != NULL) {
2821 		dma_free_coherent(&pHba->pDev->dev,
2822 			pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2823 			pHba->reply_pool, pHba->reply_pool_pa);
2824 	}
2825 
2826 	pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2827 				pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2828 				&pHba->reply_pool_pa, GFP_KERNEL);
2829 	if (!pHba->reply_pool) {
2830 		printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2831 		return -ENOMEM;
2832 	}
2833 
2834 	for(i = 0; i < pHba->reply_fifo_size; i++) {
2835 		writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2836 			pHba->reply_port);
2837 		wmb();
2838 	}
2839 	adpt_i2o_status_get(pHba);
2840 	return 0;
2841 }
2842 
2843 
2844 /*
2845  * I2O System Table.  Contains information about
2846  * all the IOPs in the system.  Used to inform IOPs
2847  * about each other's existence.
2848  *
2849  * sys_tbl_ver is the CurrentChangeIndicator that is
2850  * used by IOPs to track changes.
2851  */
2852 
2853 
2854 
adpt_i2o_status_get(adpt_hba * pHba)2855 static s32 adpt_i2o_status_get(adpt_hba* pHba)
2856 {
2857 	ulong timeout;
2858 	u32 m;
2859 	u32 __iomem *msg;
2860 	u8 *status_block=NULL;
2861 
2862 	if(pHba->status_block == NULL) {
2863 		pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2864 					sizeof(i2o_status_block),
2865 					&pHba->status_block_pa, GFP_KERNEL);
2866 		if(pHba->status_block == NULL) {
2867 			printk(KERN_ERR
2868 			"dpti%d: Get Status Block failed; Out of memory. \n",
2869 			pHba->unit);
2870 			return -ENOMEM;
2871 		}
2872 	}
2873 	memset(pHba->status_block, 0, sizeof(i2o_status_block));
2874 	status_block = (u8*)(pHba->status_block);
2875 	timeout = jiffies+TMOUT_GETSTATUS*HZ;
2876 	do {
2877 		rmb();
2878 		m = readl(pHba->post_port);
2879 		if (m != EMPTY_QUEUE) {
2880 			break;
2881 		}
2882 		if(time_after(jiffies,timeout)){
2883 			printk(KERN_ERR "%s: Timeout waiting for message !\n",
2884 					pHba->name);
2885 			return -ETIMEDOUT;
2886 		}
2887 		schedule_timeout_uninterruptible(1);
2888 	} while(m==EMPTY_QUEUE);
2889 
2890 
2891 	msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2892 
2893 	writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2894 	writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2895 	writel(1, &msg[2]);
2896 	writel(0, &msg[3]);
2897 	writel(0, &msg[4]);
2898 	writel(0, &msg[5]);
2899 	writel( dma_low(pHba->status_block_pa), &msg[6]);
2900 	writel( dma_high(pHba->status_block_pa), &msg[7]);
2901 	writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2902 
2903 	//post message
2904 	writel(m, pHba->post_port);
2905 	wmb();
2906 
2907 	while(status_block[87]!=0xff){
2908 		if(time_after(jiffies,timeout)){
2909 			printk(KERN_ERR"dpti%d: Get status timeout.\n",
2910 				pHba->unit);
2911 			return -ETIMEDOUT;
2912 		}
2913 		rmb();
2914 		schedule_timeout_uninterruptible(1);
2915 	}
2916 
2917 	// Set up our number of outbound and inbound messages
2918 	pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2919 	if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2920 		pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2921 	}
2922 
2923 	pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2924 	if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2925 		pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2926 	}
2927 
2928 	// Calculate the Scatter Gather list size
2929 	if (dpt_dma64(pHba)) {
2930 		pHba->sg_tablesize
2931 		  = ((pHba->status_block->inbound_frame_size * 4
2932 		  - 14 * sizeof(u32))
2933 		  / (sizeof(struct sg_simple_element) + sizeof(u32)));
2934 	} else {
2935 		pHba->sg_tablesize
2936 		  = ((pHba->status_block->inbound_frame_size * 4
2937 		  - 12 * sizeof(u32))
2938 		  / sizeof(struct sg_simple_element));
2939 	}
2940 	if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2941 		pHba->sg_tablesize = SG_LIST_ELEMENTS;
2942 	}
2943 
2944 
2945 #ifdef DEBUG
2946 	printk("dpti%d: State = ",pHba->unit);
2947 	switch(pHba->status_block->iop_state) {
2948 		case 0x01:
2949 			printk("INIT\n");
2950 			break;
2951 		case 0x02:
2952 			printk("RESET\n");
2953 			break;
2954 		case 0x04:
2955 			printk("HOLD\n");
2956 			break;
2957 		case 0x05:
2958 			printk("READY\n");
2959 			break;
2960 		case 0x08:
2961 			printk("OPERATIONAL\n");
2962 			break;
2963 		case 0x10:
2964 			printk("FAILED\n");
2965 			break;
2966 		case 0x11:
2967 			printk("FAULTED\n");
2968 			break;
2969 		default:
2970 			printk("%x (unknown!!)\n",pHba->status_block->iop_state);
2971 	}
2972 #endif
2973 	return 0;
2974 }
2975 
2976 /*
2977  * Get the IOP's Logical Configuration Table
2978  */
adpt_i2o_lct_get(adpt_hba * pHba)2979 static int adpt_i2o_lct_get(adpt_hba* pHba)
2980 {
2981 	u32 msg[8];
2982 	int ret;
2983 	u32 buf[16];
2984 
2985 	if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
2986 		pHba->lct_size = pHba->status_block->expected_lct_size;
2987 	}
2988 	do {
2989 		if (pHba->lct == NULL) {
2990 			pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
2991 					pHba->lct_size, &pHba->lct_pa,
2992 					GFP_ATOMIC);
2993 			if(pHba->lct == NULL) {
2994 				printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
2995 					pHba->name);
2996 				return -ENOMEM;
2997 			}
2998 		}
2999 		memset(pHba->lct, 0, pHba->lct_size);
3000 
3001 		msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3002 		msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3003 		msg[2] = 0;
3004 		msg[3] = 0;
3005 		msg[4] = 0xFFFFFFFF;	/* All devices */
3006 		msg[5] = 0x00000000;	/* Report now */
3007 		msg[6] = 0xD0000000|pHba->lct_size;
3008 		msg[7] = (u32)pHba->lct_pa;
3009 
3010 		if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3011 			printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3012 				pHba->name, ret);
3013 			printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3014 			return ret;
3015 		}
3016 
3017 		if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3018 			pHba->lct_size = pHba->lct->table_size << 2;
3019 			dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3020 					pHba->lct, pHba->lct_pa);
3021 			pHba->lct = NULL;
3022 		}
3023 	} while (pHba->lct == NULL);
3024 
3025 	PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3026 
3027 
3028 	// I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3029 	if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3030 		pHba->FwDebugBufferSize = buf[1];
3031 		pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3032 						pHba->FwDebugBufferSize);
3033 		if (pHba->FwDebugBuffer_P) {
3034 			pHba->FwDebugFlags_P     = pHba->FwDebugBuffer_P +
3035 							FW_DEBUG_FLAGS_OFFSET;
3036 			pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3037 							FW_DEBUG_BLED_OFFSET;
3038 			pHba->FwDebugBLEDflag_P  = pHba->FwDebugBLEDvalue_P + 1;
3039 			pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3040 						FW_DEBUG_STR_LENGTH_OFFSET;
3041 			pHba->FwDebugBuffer_P += buf[2];
3042 			pHba->FwDebugFlags = 0;
3043 		}
3044 	}
3045 
3046 	return 0;
3047 }
3048 
adpt_i2o_build_sys_table(void)3049 static int adpt_i2o_build_sys_table(void)
3050 {
3051 	adpt_hba* pHba = hba_chain;
3052 	int count = 0;
3053 
3054 	if (sys_tbl)
3055 		dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3056 					sys_tbl, sys_tbl_pa);
3057 
3058 	sys_tbl_len = sizeof(struct i2o_sys_tbl) +	// Header + IOPs
3059 				(hba_count) * sizeof(struct i2o_sys_tbl_entry);
3060 
3061 	sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3062 				sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3063 	if (!sys_tbl) {
3064 		printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3065 		return -ENOMEM;
3066 	}
3067 
3068 	sys_tbl->num_entries = hba_count;
3069 	sys_tbl->version = I2OVERSION;
3070 	sys_tbl->change_ind = sys_tbl_ind++;
3071 
3072 	for(pHba = hba_chain; pHba; pHba = pHba->next) {
3073 		u64 addr;
3074 		// Get updated Status Block so we have the latest information
3075 		if (adpt_i2o_status_get(pHba)) {
3076 			sys_tbl->num_entries--;
3077 			continue; // try next one
3078 		}
3079 
3080 		sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3081 		sys_tbl->iops[count].iop_id = pHba->unit + 2;
3082 		sys_tbl->iops[count].seg_num = 0;
3083 		sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3084 		sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3085 		sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3086 		sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3087 		sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3088 		sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3089 		addr = pHba->base_addr_phys + 0x40;
3090 		sys_tbl->iops[count].inbound_low = dma_low(addr);
3091 		sys_tbl->iops[count].inbound_high = dma_high(addr);
3092 
3093 		count++;
3094 	}
3095 
3096 #ifdef DEBUG
3097 {
3098 	u32 *table = (u32*)sys_tbl;
3099 	printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3100 	for(count = 0; count < (sys_tbl_len >>2); count++) {
3101 		printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3102 			count, table[count]);
3103 	}
3104 }
3105 #endif
3106 
3107 	return 0;
3108 }
3109 
3110 
3111 /*
3112  *	 Dump the information block associated with a given unit (TID)
3113  */
3114 
adpt_i2o_report_hba_unit(adpt_hba * pHba,struct i2o_device * d)3115 static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3116 {
3117 	char buf[64];
3118 	int unit = d->lct_data.tid;
3119 
3120 	printk(KERN_INFO "TID %3.3d ", unit);
3121 
3122 	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3123 	{
3124 		buf[16]=0;
3125 		printk(" Vendor: %-12.12s", buf);
3126 	}
3127 	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3128 	{
3129 		buf[16]=0;
3130 		printk(" Device: %-12.12s", buf);
3131 	}
3132 	if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3133 	{
3134 		buf[8]=0;
3135 		printk(" Rev: %-12.12s\n", buf);
3136 	}
3137 #ifdef DEBUG
3138 	 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3139 	 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3140 	 printk(KERN_INFO "\tFlags: ");
3141 
3142 	 if(d->lct_data.device_flags&(1<<0))
3143 		  printk("C");	     // ConfigDialog requested
3144 	 if(d->lct_data.device_flags&(1<<1))
3145 		  printk("U");	     // Multi-user capable
3146 	 if(!(d->lct_data.device_flags&(1<<4)))
3147 		  printk("P");	     // Peer service enabled!
3148 	 if(!(d->lct_data.device_flags&(1<<5)))
3149 		  printk("M");	     // Mgmt service enabled!
3150 	 printk("\n");
3151 #endif
3152 }
3153 
3154 #ifdef DEBUG
3155 /*
3156  *	Do i2o class name lookup
3157  */
adpt_i2o_get_class_name(int class)3158 static const char *adpt_i2o_get_class_name(int class)
3159 {
3160 	int idx = 16;
3161 	static char *i2o_class_name[] = {
3162 		"Executive",
3163 		"Device Driver Module",
3164 		"Block Device",
3165 		"Tape Device",
3166 		"LAN Interface",
3167 		"WAN Interface",
3168 		"Fibre Channel Port",
3169 		"Fibre Channel Device",
3170 		"SCSI Device",
3171 		"ATE Port",
3172 		"ATE Device",
3173 		"Floppy Controller",
3174 		"Floppy Device",
3175 		"Secondary Bus Port",
3176 		"Peer Transport Agent",
3177 		"Peer Transport",
3178 		"Unknown"
3179 	};
3180 
3181 	switch(class&0xFFF) {
3182 	case I2O_CLASS_EXECUTIVE:
3183 		idx = 0; break;
3184 	case I2O_CLASS_DDM:
3185 		idx = 1; break;
3186 	case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3187 		idx = 2; break;
3188 	case I2O_CLASS_SEQUENTIAL_STORAGE:
3189 		idx = 3; break;
3190 	case I2O_CLASS_LAN:
3191 		idx = 4; break;
3192 	case I2O_CLASS_WAN:
3193 		idx = 5; break;
3194 	case I2O_CLASS_FIBRE_CHANNEL_PORT:
3195 		idx = 6; break;
3196 	case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3197 		idx = 7; break;
3198 	case I2O_CLASS_SCSI_PERIPHERAL:
3199 		idx = 8; break;
3200 	case I2O_CLASS_ATE_PORT:
3201 		idx = 9; break;
3202 	case I2O_CLASS_ATE_PERIPHERAL:
3203 		idx = 10; break;
3204 	case I2O_CLASS_FLOPPY_CONTROLLER:
3205 		idx = 11; break;
3206 	case I2O_CLASS_FLOPPY_DEVICE:
3207 		idx = 12; break;
3208 	case I2O_CLASS_BUS_ADAPTER_PORT:
3209 		idx = 13; break;
3210 	case I2O_CLASS_PEER_TRANSPORT_AGENT:
3211 		idx = 14; break;
3212 	case I2O_CLASS_PEER_TRANSPORT:
3213 		idx = 15; break;
3214 	}
3215 	return i2o_class_name[idx];
3216 }
3217 #endif
3218 
3219 
adpt_i2o_hrt_get(adpt_hba * pHba)3220 static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3221 {
3222 	u32 msg[6];
3223 	int ret, size = sizeof(i2o_hrt);
3224 
3225 	do {
3226 		if (pHba->hrt == NULL) {
3227 			pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3228 					size, &pHba->hrt_pa, GFP_KERNEL);
3229 			if (pHba->hrt == NULL) {
3230 				printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3231 				return -ENOMEM;
3232 			}
3233 		}
3234 
3235 		msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3236 		msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3237 		msg[2]= 0;
3238 		msg[3]= 0;
3239 		msg[4]= (0xD0000000 | size);    /* Simple transaction */
3240 		msg[5]= (u32)pHba->hrt_pa;	/* Dump it here */
3241 
3242 		if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3243 			printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3244 			return ret;
3245 		}
3246 
3247 		if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3248 			int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3249 			dma_free_coherent(&pHba->pDev->dev, size,
3250 				pHba->hrt, pHba->hrt_pa);
3251 			size = newsize;
3252 			pHba->hrt = NULL;
3253 		}
3254 	} while(pHba->hrt == NULL);
3255 	return 0;
3256 }
3257 
3258 /*
3259  *	 Query one scalar group value or a whole scalar group.
3260  */
adpt_i2o_query_scalar(adpt_hba * pHba,int tid,int group,int field,void * buf,int buflen)3261 static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3262 			int group, int field, void *buf, int buflen)
3263 {
3264 	u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3265 	u8 *opblk_va;
3266 	dma_addr_t opblk_pa;
3267 	u8 *resblk_va;
3268 	dma_addr_t resblk_pa;
3269 
3270 	int size;
3271 
3272 	/* 8 bytes for header */
3273 	resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3274 			sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3275 	if (resblk_va == NULL) {
3276 		printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3277 		return -ENOMEM;
3278 	}
3279 
3280 	opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3281 			sizeof(opblk), &opblk_pa, GFP_KERNEL);
3282 	if (opblk_va == NULL) {
3283 		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3284 			resblk_va, resblk_pa);
3285 		printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
3286 			pHba->name);
3287 		return -ENOMEM;
3288 	}
3289 	if (field == -1)  		/* whole group */
3290 			opblk[4] = -1;
3291 
3292 	memcpy(opblk_va, opblk, sizeof(opblk));
3293 	size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3294 		opblk_va, opblk_pa, sizeof(opblk),
3295 		resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3296 	dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3297 	if (size == -ETIME) {
3298 		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3299 							resblk_va, resblk_pa);
3300 		printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3301 		return -ETIME;
3302 	} else if (size == -EINTR) {
3303 		dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3304 							resblk_va, resblk_pa);
3305 		printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3306 		return -EINTR;
3307 	}
3308 
3309 	memcpy(buf, resblk_va+8, buflen);  /* cut off header */
3310 
3311 	dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3312 						resblk_va, resblk_pa);
3313 	if (size < 0)
3314 		return size;
3315 
3316 	return buflen;
3317 }
3318 
3319 
3320 /*	Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3321  *
3322  *	This function can be used for all UtilParamsGet/Set operations.
3323  *	The OperationBlock is given in opblk-buffer,
3324  *	and results are returned in resblk-buffer.
3325  *	Note that the minimum sized resblk is 8 bytes and contains
3326  *	ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3327  */
adpt_i2o_issue_params(int cmd,adpt_hba * pHba,int tid,void * opblk_va,dma_addr_t opblk_pa,int oplen,void * resblk_va,dma_addr_t resblk_pa,int reslen)3328 static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3329 		  void *opblk_va,  dma_addr_t opblk_pa, int oplen,
3330 		void *resblk_va, dma_addr_t resblk_pa, int reslen)
3331 {
3332 	u32 msg[9];
3333 	u32 *res = (u32 *)resblk_va;
3334 	int wait_status;
3335 
3336 	msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3337 	msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3338 	msg[2] = 0;
3339 	msg[3] = 0;
3340 	msg[4] = 0;
3341 	msg[5] = 0x54000000 | oplen;	/* OperationBlock */
3342 	msg[6] = (u32)opblk_pa;
3343 	msg[7] = 0xD0000000 | reslen;	/* ResultBlock */
3344 	msg[8] = (u32)resblk_pa;
3345 
3346 	if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3347 		printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3348    		return wait_status; 	/* -DetailedStatus */
3349 	}
3350 
3351 	if (res[1]&0x00FF0000) { 	/* BlockStatus != SUCCESS */
3352 		printk(KERN_WARNING "%s: %s - Error:\n  ErrorInfoSize = 0x%02x, "
3353 			"BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3354 			pHba->name,
3355 			(cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3356 							 : "PARAMS_GET",
3357 			res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3358 		return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3359 	}
3360 
3361 	return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3362 }
3363 
3364 
adpt_i2o_quiesce_hba(adpt_hba * pHba)3365 static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3366 {
3367 	u32 msg[4];
3368 	int ret;
3369 
3370 	adpt_i2o_status_get(pHba);
3371 
3372 	/* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3373 
3374 	if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3375    	   (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3376 		return 0;
3377 	}
3378 
3379 	msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3380 	msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3381 	msg[2] = 0;
3382 	msg[3] = 0;
3383 
3384 	if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3385 		printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3386 				pHba->unit, -ret);
3387 	} else {
3388 		printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3389 	}
3390 
3391 	adpt_i2o_status_get(pHba);
3392 	return ret;
3393 }
3394 
3395 
3396 /*
3397  * Enable IOP. Allows the IOP to resume external operations.
3398  */
adpt_i2o_enable_hba(adpt_hba * pHba)3399 static int adpt_i2o_enable_hba(adpt_hba* pHba)
3400 {
3401 	u32 msg[4];
3402 	int ret;
3403 
3404 	adpt_i2o_status_get(pHba);
3405 	if(!pHba->status_block){
3406 		return -ENOMEM;
3407 	}
3408 	/* Enable only allowed on READY state */
3409 	if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3410 		return 0;
3411 
3412 	if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3413 		return -EINVAL;
3414 
3415 	msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3416 	msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3417 	msg[2]= 0;
3418 	msg[3]= 0;
3419 
3420 	if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3421 		printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3422 			pHba->name, ret);
3423 	} else {
3424 		PDEBUG("%s: Enabled.\n", pHba->name);
3425 	}
3426 
3427 	adpt_i2o_status_get(pHba);
3428 	return ret;
3429 }
3430 
3431 
adpt_i2o_systab_send(adpt_hba * pHba)3432 static int adpt_i2o_systab_send(adpt_hba* pHba)
3433 {
3434 	u32 msg[12];
3435 	int ret;
3436 
3437 	msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3438 	msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3439 	msg[2] = 0;
3440 	msg[3] = 0;
3441 	msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3442 	msg[5] = 0;				   /* Segment 0 */
3443 
3444 	/*
3445 	 * Provide three SGL-elements:
3446 	 * System table (SysTab), Private memory space declaration and
3447 	 * Private i/o space declaration
3448 	 */
3449 	msg[6] = 0x54000000 | sys_tbl_len;
3450 	msg[7] = (u32)sys_tbl_pa;
3451 	msg[8] = 0x54000000 | 0;
3452 	msg[9] = 0;
3453 	msg[10] = 0xD4000000 | 0;
3454 	msg[11] = 0;
3455 
3456 	if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3457 		printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3458 			pHba->name, ret);
3459 	}
3460 #ifdef DEBUG
3461 	else {
3462 		PINFO("%s: SysTab set.\n", pHba->name);
3463 	}
3464 #endif
3465 
3466 	return ret;
3467 }
3468 
3469 
3470 /*============================================================================
3471  *
3472  *============================================================================
3473  */
3474 
3475 
3476 #ifdef UARTDELAY
3477 
adpt_delay(int millisec)3478 static static void adpt_delay(int millisec)
3479 {
3480 	int i;
3481 	for (i = 0; i < millisec; i++) {
3482 		udelay(1000);	/* delay for one millisecond */
3483 	}
3484 }
3485 
3486 #endif
3487 
3488 static struct scsi_host_template driver_template = {
3489 	.module			= THIS_MODULE,
3490 	.name			= "dpt_i2o",
3491 	.proc_name		= "dpt_i2o",
3492 	.show_info		= adpt_show_info,
3493 	.info			= adpt_info,
3494 	.queuecommand		= adpt_queue,
3495 	.eh_abort_handler	= adpt_abort,
3496 	.eh_device_reset_handler = adpt_device_reset,
3497 	.eh_bus_reset_handler	= adpt_bus_reset,
3498 	.eh_host_reset_handler	= adpt_reset,
3499 	.bios_param		= adpt_bios_param,
3500 	.slave_configure	= adpt_slave_configure,
3501 	.can_queue		= MAX_TO_IOP_MESSAGES,
3502 	.this_id		= 7,
3503 };
3504 
adpt_init(void)3505 static int __init adpt_init(void)
3506 {
3507 	int		error;
3508 	adpt_hba	*pHba, *next;
3509 
3510 	printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3511 
3512 	error = adpt_detect(&driver_template);
3513 	if (error < 0)
3514 		return error;
3515 	if (hba_chain == NULL)
3516 		return -ENODEV;
3517 
3518 	for (pHba = hba_chain; pHba; pHba = pHba->next) {
3519 		error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3520 		if (error)
3521 			goto fail;
3522 		scsi_scan_host(pHba->host);
3523 	}
3524 	return 0;
3525 fail:
3526 	for (pHba = hba_chain; pHba; pHba = next) {
3527 		next = pHba->next;
3528 		scsi_remove_host(pHba->host);
3529 	}
3530 	return error;
3531 }
3532 
adpt_exit(void)3533 static void __exit adpt_exit(void)
3534 {
3535 	adpt_hba	*pHba, *next;
3536 
3537 	for (pHba = hba_chain; pHba; pHba = next) {
3538 		next = pHba->next;
3539 		adpt_release(pHba);
3540 	}
3541 }
3542 
3543 module_init(adpt_init);
3544 module_exit(adpt_exit);
3545 
3546 MODULE_LICENSE("GPL");
3547