arcmsr_hba.c 97.8 KB
Newer Older
1 2 3 4
/*
*******************************************************************************
**        O.S   : Linux
**   FILE NAME  : arcmsr_hba.c
5
**        BY    : Nick Cheng
6 7 8 9 10 11
**   Description: SCSI RAID Device Driver for
**                ARECA RAID Host adapter
*******************************************************************************
** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
**
**     Web site: www.areca.com.tw
12
**       E-mail: support@areca.com.tw
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License version 2 as
** published by the Free Software Foundation.
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
** GNU General Public License for more details.
*******************************************************************************
** Redistribution and use in source and binary forms, with or without
** modification, are permitted provided that the following conditions
** are met:
** 1. Redistributions of source code must retain the above copyright
**    notice, this list of conditions and the following disclaimer.
** 2. Redistributions in binary form must reproduce the above copyright
**    notice, this list of conditions and the following disclaimer in the
**    documentation and/or other materials provided with the distribution.
** 3. The name of the author may not be used to endorse or promote products
**    derived from this software without specific prior written permission.
**
** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************
** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
**     Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
*******************************************************************************
*/
#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/spinlock.h>
#include <linux/pci_ids.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/timer.h>
59
#include <linux/slab.h>
60
#include <linux/pci.h>
61
#include <linux/aer.h>
62 63 64 65 66 67 68 69 70 71 72 73
#include <asm/dma.h>
#include <asm/io.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsicam.h>
#include "arcmsr.h"
74
MODULE_AUTHOR("Nick Cheng <support@areca.com.tw>");
75
MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/16xx/1880) SATA/SAS RAID Host Bus Adapter");
76 77
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(ARCMSR_DRIVER_VERSION);
78 79 80 81

#define	ARCMSR_SLEEPTIME	10
#define	ARCMSR_RETRYCOUNT	12

82
wait_queue_head_t wait_q;
83 84 85
static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
					struct scsi_cmnd *cmd);
static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
86 87 88
static int arcmsr_abort(struct scsi_cmnd *);
static int arcmsr_bus_reset(struct scsi_cmnd *);
static int arcmsr_bios_param(struct scsi_device *sdev,
89
		struct block_device *bdev, sector_t capacity, int *info);
J
Jeff Garzik 已提交
90
static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
91 92 93 94 95 96
static int arcmsr_probe(struct pci_dev *pdev,
				const struct pci_device_id *id);
static void arcmsr_remove(struct pci_dev *pdev);
static void arcmsr_shutdown(struct pci_dev *pdev);
static void arcmsr_iop_init(struct AdapterControlBlock *acb);
static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
97
static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
98
static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
99 100
static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb);
static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb);
101 102 103
static void arcmsr_request_device_map(unsigned long pacb);
static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb);
static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb);
104
static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb);
105
static void arcmsr_message_isr_bh_fn(struct work_struct *work);
106
static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
107
static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
108 109
static void arcmsr_hbc_message_isr(struct AdapterControlBlock *pACB);
static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
110 111
static const char *arcmsr_info(struct Scsi_Host *);
static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
112
static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
113
					  int queue_depth, int reason)
114
{
115 116 117
	if (reason != SCSI_QDEPTH_DEFAULT)
		return -EOPNOTSUPP;

118 119 120 121 122 123 124 125
	if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
		queue_depth = ARCMSR_MAX_CMD_PERLUN;
	scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
	return queue_depth;
}

static struct scsi_host_template arcmsr_scsi_host_template = {
	.module			= THIS_MODULE,
126 127
	.name			= "ARCMSR ARECA SATA/SAS RAID Controller"
				ARCMSR_DRIVER_VERSION,
128 129
	.info			= arcmsr_info,
	.queuecommand		= arcmsr_queue_command,
130
	.eh_abort_handler		= arcmsr_abort,
131 132 133
	.eh_bus_reset_handler	= arcmsr_bus_reset,
	.bios_param		= arcmsr_bios_param,
	.change_queue_depth	= arcmsr_adjust_disk_queue_depth,
134
	.can_queue		= ARCMSR_MAX_FREECCB_NUM,
135 136 137
	.this_id			= ARCMSR_SCSI_INITIATOR_ID,
	.sg_tablesize	        	= ARCMSR_DEFAULT_SG_ENTRIES, 
	.max_sectors    	    	= ARCMSR_MAX_XFER_SECTORS_C, 
138 139 140 141 142 143 144 145 146 147
	.cmd_per_lun		= ARCMSR_MAX_CMD_PERLUN,
	.use_clustering		= ENABLE_CLUSTERING,
	.shost_attrs		= arcmsr_host_attrs,
};
static struct pci_device_id arcmsr_device_id_table[] = {
	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)},
	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)},
	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)},
	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)},
148 149 150
	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200)},
	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201)},
	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202)},
151 152 153 154 155 156 157 158 159 160
	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)},
	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)},
	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)},
	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260)},
	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270)},
	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280)},
	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380)},
	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)},
	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)},
	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)},
161
	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880)},
162 163 164 165 166
	{0, 0}, /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
static struct pci_driver arcmsr_pci_driver = {
	.name			= "arcmsr",
167
	.id_table			= arcmsr_device_id_table,
168 169
	.probe			= arcmsr_probe,
	.remove			= arcmsr_remove,
170
	.shutdown		= arcmsr_shutdown,
171
};
172 173 174 175
/*
****************************************************************************
****************************************************************************
*/
176

177
static void arcmsr_free_hbb_mu(struct AdapterControlBlock *acb)
178 179 180
{
	switch (acb->adapter_type) {
	case ACB_ADAPTER_TYPE_A:
181
	case ACB_ADAPTER_TYPE_C:
182 183
		break;
	case ACB_ADAPTER_TYPE_B:{
184 185 186
		dma_free_coherent(&acb->pdev->dev,
			sizeof(struct MessageUnit_B),
			acb->pmuB, acb->dma_coherent_handle_hbb_mu);
187 188 189 190 191 192 193
	}
	}
}

static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
{
	struct pci_dev *pdev = acb->pdev;
194
	switch (acb->adapter_type){
195
	case ACB_ADAPTER_TYPE_A:{
196
		acb->pmuA = ioremap(pci_resource_start(pdev,0), pci_resource_len(pdev,0));
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
		if (!acb->pmuA) {
			printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
			return false;
		}
		break;
	}
	case ACB_ADAPTER_TYPE_B:{
		void __iomem *mem_base0, *mem_base1;
		mem_base0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
		if (!mem_base0) {
			printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
			return false;
		}
		mem_base1 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2));
		if (!mem_base1) {
			iounmap(mem_base0);
			printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
			return false;
		}
		acb->mem_base0 = mem_base0;
		acb->mem_base1 = mem_base1;
218 219 220 221 222 223 224 225 226 227 228 229 230
		break;
	}
	case ACB_ADAPTER_TYPE_C:{
		acb->pmuC = ioremap_nocache(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
		if (!acb->pmuC) {
			printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no);
			return false;
		}
		if (readl(&acb->pmuC->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
			writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &acb->pmuC->outbound_doorbell_clear);/*clear interrupt*/
			return true;
		}
		break;
231 232 233 234 235 236 237 238
	}
	}
	return true;
}

static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
{
	switch (acb->adapter_type) {
239 240 241 242 243 244 245 246 247 248 249 250 251
	case ACB_ADAPTER_TYPE_A:{
		iounmap(acb->pmuA);
	}
	break;
	case ACB_ADAPTER_TYPE_B:{
		iounmap(acb->mem_base0);
		iounmap(acb->mem_base1);
	}

	break;
	case ACB_ADAPTER_TYPE_C:{
		iounmap(acb->pmuC);
	}
252 253 254
	}
}

255
static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
256 257
{
	irqreturn_t handle_state;
258
	struct AdapterControlBlock *acb = dev_id;
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291

	handle_state = arcmsr_interrupt(acb);
	return handle_state;
}

static int arcmsr_bios_param(struct scsi_device *sdev,
		struct block_device *bdev, sector_t capacity, int *geom)
{
	int ret, heads, sectors, cylinders, total_capacity;
	unsigned char *buffer;/* return copy of block device's partition table */

	buffer = scsi_bios_ptable(bdev);
	if (buffer) {
		ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
		kfree(buffer);
		if (ret != -1)
			return ret;
	}
	total_capacity = capacity;
	heads = 64;
	sectors = 32;
	cylinders = total_capacity / (heads * sectors);
	if (cylinders > 1024) {
		heads = 255;
		sectors = 63;
		cylinders = total_capacity / (heads * sectors);
	}
	geom[0] = heads;
	geom[1] = sectors;
	geom[2] = cylinders;
	return 0;
}

292
static void arcmsr_define_adapter_type(struct AdapterControlBlock *acb)
293 294
{
	struct pci_dev *pdev = acb->pdev;
295 296
	u16 dev_id;
	pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id);
297
	acb->dev_id = dev_id;
298
	switch (dev_id) {
299 300 301 302 303
	case 0x1880: {
		acb->adapter_type = ACB_ADAPTER_TYPE_C;
		}
		break;
	case 0x1201: {
304 305 306 307
		acb->adapter_type = ACB_ADAPTER_TYPE_B;
		}
		break;

308
	default: acb->adapter_type = ACB_ADAPTER_TYPE_A;
309
	}
310
}
311

312 313 314
static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
{
	struct MessageUnit_A __iomem *reg = acb->pmuA;
315 316 317 318 319 320 321 322 323 324 325
	int i;

	for (i = 0; i < 2000; i++) {
		if (readl(&reg->outbound_intstatus) &
				ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
			writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
				&reg->outbound_intstatus);
			return true;
		}
		msleep(10);
	} /* max 20 seconds */
326

327
	return false;
328 329 330
}

static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
331
{
332
	struct MessageUnit_B *reg = acb->pmuB;
333 334 335 336 337 338 339 340 341 342 343 344 345
	int i;

	for (i = 0; i < 2000; i++) {
		if (readl(reg->iop2drv_doorbell)
			& ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
			writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN,
					reg->iop2drv_doorbell);
			writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT,
					reg->drv2iop_doorbell);
			return true;
		}
		msleep(10);
	} /* max 20 seconds */
346

347
	return false;
348 349
}

350 351 352
static uint8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *pACB)
{
	struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC;
353 354 355 356 357 358 359 360 361 362 363 364
	int i;

	for (i = 0; i < 2000; i++) {
		if (readl(&phbcmu->outbound_doorbell)
				& ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
			writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR,
				&phbcmu->outbound_doorbell_clear); /*clear interrupt*/
			return true;
		}
		msleep(10);
	} /* max 20 seconds */

365 366
	return false;
}
367

368 369 370 371 372 373
static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
{
	struct MessageUnit_A __iomem *reg = acb->pmuA;
	int retry_count = 30;
	writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
	do {
374
		if (arcmsr_hba_wait_msgint_ready(acb))
375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
			break;
		else {
			retry_count--;
			printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
			timeout, retry count down = %d \n", acb->host->host_no, retry_count);
		}
	} while (retry_count != 0);
}

static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
{
	struct MessageUnit_B *reg = acb->pmuB;
	int retry_count = 30;
	writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell);
	do {
390
		if (arcmsr_hbb_wait_msgint_ready(acb))
391 392 393 394 395 396 397 398 399
			break;
		else {
			retry_count--;
			printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
			timeout,retry count down = %d \n", acb->host->host_no, retry_count);
		}
	} while (retry_count != 0);
}

400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *pACB)
{
	struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
	int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
	writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
	writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
	do {
		if (arcmsr_hbc_wait_msgint_ready(pACB)) {
			break;
		} else {
			retry_count--;
			printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
			timeout,retry count down = %d \n", pACB->host->host_no, retry_count);
		}
	} while (retry_count != 0);
	return;
}
417 418
static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
{
419
	switch (acb->adapter_type) {
420

421
	case ACB_ADAPTER_TYPE_A: {
422 423 424
		arcmsr_flush_hba_cache(acb);
		}
		break;
425

426 427
	case ACB_ADAPTER_TYPE_B: {
		arcmsr_flush_hbb_cache(acb);
428
		}
429 430 431 432
		break;
	case ACB_ADAPTER_TYPE_C: {
		arcmsr_flush_hbc_cache(acb);
		}
433 434
	}
}
435

436 437
static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
{
438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
	struct pci_dev *pdev = acb->pdev;
	void *dma_coherent;
	dma_addr_t dma_coherent_handle;
	struct CommandControlBlock *ccb_tmp;
	int i = 0, j = 0;
	dma_addr_t cdb_phyaddr;
	unsigned long roundup_ccbsize = 0, offset;
	unsigned long max_xfer_len;
	unsigned long max_sg_entrys;
	uint32_t  firm_config_version;
	for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
		for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
			acb->devstate[i][j] = ARECA_RAID_GONE;

	max_xfer_len = ARCMSR_MAX_XFER_LEN;
	max_sg_entrys = ARCMSR_DEFAULT_SG_ENTRIES;
	firm_config_version = acb->firm_cfg_version;
	if((firm_config_version & 0xFF) >= 3){
		max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */
		max_sg_entrys = (max_xfer_len/4096);	
	}
	acb->host->max_sectors = max_xfer_len/512;
	acb->host->sg_tablesize = max_sg_entrys;
	roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32);
	acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM + 32;
	dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL);
	if(!dma_coherent){
		printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error \n", acb->host->host_no);
		return -ENOMEM;
	}
	acb->dma_coherent = dma_coherent;
	acb->dma_coherent_handle = dma_coherent_handle;
	memset(dma_coherent, 0, acb->uncache_size);
	offset = roundup((unsigned long)dma_coherent, 32) - (unsigned long)dma_coherent;
	dma_coherent_handle = dma_coherent_handle + offset;
	dma_coherent = (struct CommandControlBlock *)dma_coherent + offset;
	ccb_tmp = dma_coherent;
	acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
	for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){
		cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
		ccb_tmp->cdb_phyaddr_pattern = ((acb->adapter_type == ACB_ADAPTER_TYPE_C) ? cdb_phyaddr : (cdb_phyaddr >> 5));
		acb->pccb_pool[i] = ccb_tmp;
		ccb_tmp->acb = acb;
		INIT_LIST_HEAD(&ccb_tmp->list);
		list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
		ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize);
		dma_coherent_handle = dma_coherent_handle + roundup_ccbsize;
485
	}
486 487
	return 0;
}
488

489 490 491
static void arcmsr_message_isr_bh_fn(struct work_struct *work) 
{
	struct AdapterControlBlock *acb = container_of(work,struct AdapterControlBlock, arcmsr_do_message_isr_bh);
492 493 494 495 496
	switch (acb->adapter_type) {
		case ACB_ADAPTER_TYPE_A: {

			struct MessageUnit_A __iomem *reg  = acb->pmuA;
			char *acb_dev_map = (char *)acb->device_map;
497 498
			uint32_t __iomem *signature = (uint32_t __iomem*) (&reg->message_rwbuffer[0]);
			char __iomem *devicemap = (char __iomem*) (&reg->message_rwbuffer[21]);
499 500 501 502 503 504
			int target, lun;
			struct scsi_device *psdev;
			char diff;

			atomic_inc(&acb->rq_map_token);
			if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
505
				for(target = 0; target < ARCMSR_MAX_TARGETID -1; target++) {
506 507 508 509
					diff = (*acb_dev_map)^readb(devicemap);
					if (diff != 0) {
						char temp;
						*acb_dev_map = readb(devicemap);
510 511 512
						temp =*acb_dev_map;
						for(lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
							if((temp & 0x01)==1 && (diff & 0x01) == 1) {	
513
								scsi_add_device(acb->host, 0, target, lun);
514
							}else if((temp & 0x01) == 0 && (diff & 0x01) == 1) {
515
								psdev = scsi_device_lookup(acb->host, 0, target, lun);
516
								if (psdev != NULL ) {
517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
									scsi_remove_device(psdev);
									scsi_device_put(psdev);
								}
							}
							temp >>= 1;
							diff >>= 1;
						}
					}
					devicemap++;
					acb_dev_map++;
				}
			}
			break;
		}

		case ACB_ADAPTER_TYPE_B: {
			struct MessageUnit_B *reg  = acb->pmuB;
			char *acb_dev_map = (char *)acb->device_map;
535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573
			uint32_t __iomem *signature = (uint32_t __iomem*)(&reg->message_rwbuffer[0]);
			char __iomem *devicemap = (char __iomem*)(&reg->message_rwbuffer[21]);
			int target, lun;
			struct scsi_device *psdev;
			char diff;

			atomic_inc(&acb->rq_map_token);
			if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
				for(target = 0; target < ARCMSR_MAX_TARGETID -1; target++) {
					diff = (*acb_dev_map)^readb(devicemap);
					if (diff != 0) {
						char temp;
						*acb_dev_map = readb(devicemap);
						temp =*acb_dev_map;
						for(lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
							if((temp & 0x01)==1 && (diff & 0x01) == 1) {	
								scsi_add_device(acb->host, 0, target, lun);
							}else if((temp & 0x01) == 0 && (diff & 0x01) == 1) {
								psdev = scsi_device_lookup(acb->host, 0, target, lun);
								if (psdev != NULL ) {
									scsi_remove_device(psdev);
									scsi_device_put(psdev);
								}
							}
							temp >>= 1;
							diff >>= 1;
						}
					}
					devicemap++;
					acb_dev_map++;
				}
			}
		}
		break;
		case ACB_ADAPTER_TYPE_C: {
			struct MessageUnit_C *reg  = acb->pmuC;
			char *acb_dev_map = (char *)acb->device_map;
			uint32_t __iomem *signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
			char __iomem *devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
			int target, lun;
			struct scsi_device *psdev;
			char diff;

			atomic_inc(&acb->rq_map_token);
			if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
				for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
					diff = (*acb_dev_map)^readb(devicemap);
					if (diff != 0) {
						char temp;
						*acb_dev_map = readb(devicemap);
						temp = *acb_dev_map;
						for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
							if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
								scsi_add_device(acb->host, 0, target, lun);
							} else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) {
								psdev = scsi_device_lookup(acb->host, 0, target, lun);
								if (psdev != NULL) {
									scsi_remove_device(psdev);
									scsi_device_put(psdev);
								}
							}
							temp >>= 1;
							diff >>= 1;
						}
					}
					devicemap++;
					acb_dev_map++;
				}
			}
		}
	}
}
607

608
static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
609 610 611
{
	struct Scsi_Host *host;
	struct AdapterControlBlock *acb;
612
	uint8_t bus,dev_fun;
613 614
	int error;
	error = pci_enable_device(pdev);
615
	if(error){
616 617 618
		return -ENODEV;
	}
	host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof(struct AdapterControlBlock));
619 620
	if(!host){
    		goto pci_disable_dev;
621
	}
622
	error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
623
	if(error){
624
		error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
625
		if(error){
626 627 628
			printk(KERN_WARNING
			       "scsi%d: No suitable DMA mask available\n",
			       host->host_no);
629
			goto scsi_host_release;
630 631
		}
	}
632
	init_waitqueue_head(&wait_q);
633 634
	bus = pdev->bus->number;
	dev_fun = pdev->devfn;
635
	acb = (struct AdapterControlBlock *) host->hostdata;
636
	memset(acb,0,sizeof(struct AdapterControlBlock));
637
	acb->pdev = pdev;
638
	acb->host = host;
639
	host->max_lun = ARCMSR_MAX_TARGETLUN;
640 641 642 643
	host->max_id = ARCMSR_MAX_TARGETID;		/*16:8*/
	host->max_cmd_len = 16;	 			/*this is issue of 64bit LBA ,over 2T byte*/
	host->can_queue = ARCMSR_MAX_FREECCB_NUM;	/* max simultaneous cmds */		
	host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;	    
644 645
	host->this_id = ARCMSR_SCSI_INITIATOR_ID;
	host->unique_id = (bus << 8) | dev_fun;
646 647
	pci_set_drvdata(pdev, host);
	pci_set_master(pdev);
648
	error = pci_request_regions(pdev, "arcmsr");
649
	if(error){
650
		goto scsi_host_release;
651
	}
652 653
	spin_lock_init(&acb->eh_lock);
	spin_lock_init(&acb->ccblist_lock);
654
	acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
655 656
			ACB_F_MESSAGE_RQBUFFER_CLEARED |
			ACB_F_MESSAGE_WQBUFFER_READED);
657 658
	acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
	INIT_LIST_HEAD(&acb->ccb_free_list);
659 660
	arcmsr_define_adapter_type(acb);
	error = arcmsr_remap_pciregion(acb);
661
	if(!error){
662 663 664
		goto pci_release_regs;
	}
	error = arcmsr_get_firmware_spec(acb);
665
	if(!error){
666 667
		goto unmap_pci_region;
	}
668
	error = arcmsr_alloc_ccb_pool(acb);
669
	if(error){
670 671
		goto free_hbb_mu;
	}
672
	arcmsr_iop_init(acb);
673
	error = scsi_add_host(host, &pdev->dev);
674
	if(error){
675 676 677
		goto RAID_controller_stop;
	}
	error = request_irq(pdev->irq, arcmsr_do_interrupt, IRQF_SHARED, "arcmsr", acb);
678
	if(error){
679 680 681
		goto scsi_host_remove;
	}
	host->irq = pdev->irq;
682
    	scsi_scan_host(host);
683
	INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
684
	atomic_set(&acb->rq_map_token, 16);
685 686
	atomic_set(&acb->ante_token_value, 16);
	acb->fw_flag = FW_NORMAL;
687
	init_timer(&acb->eternal_timer);
688
	acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
689 690 691
	acb->eternal_timer.data = (unsigned long) acb;
	acb->eternal_timer.function = &arcmsr_request_device_map;
	add_timer(&acb->eternal_timer);
692
	if(arcmsr_alloc_sysfs_attr(acb))
693
		goto out_free_sysfs;
694
	return 0;
695
out_free_sysfs:
696 697 698 699 700
scsi_host_remove:
	scsi_remove_host(host);
RAID_controller_stop:
	arcmsr_stop_adapter_bgrb(acb);
	arcmsr_flush_adapter_cache(acb);
701
	arcmsr_free_ccb_pool(acb);
702
free_hbb_mu:
703
	arcmsr_free_hbb_mu(acb);
704 705 706
unmap_pci_region:
	arcmsr_unmap_pciregion(acb);
pci_release_regs:
707
	pci_release_regions(pdev);
708
scsi_host_release:
709
	scsi_host_put(host);
710
pci_disable_dev:
711
	pci_disable_device(pdev);
712
	return -ENODEV;
713 714
}

715
static uint8_t arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
716
{
A
Al Viro 已提交
717
	struct MessageUnit_A __iomem *reg = acb->pmuA;
718
	writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
719
	if (!arcmsr_hba_wait_msgint_ready(acb)) {
720 721 722
		printk(KERN_NOTICE
			"arcmsr%d: wait 'abort all outstanding command' timeout \n"
			, acb->host->host_no);
723
		return false;
724
	}
725
	return true;
726 727
}

728
static uint8_t arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
729
{
A
Al Viro 已提交
730
	struct MessageUnit_B *reg = acb->pmuB;
731

732
	writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell);
733
	if (!arcmsr_hbb_wait_msgint_ready(acb)) {
734 735 736
		printk(KERN_NOTICE
			"arcmsr%d: wait 'abort all outstanding command' timeout \n"
			, acb->host->host_no);
737
		return false;
738
	}
739 740 741 742 743 744 745 746 747 748 749 750 751 752
	return true;
}
static uint8_t arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *pACB)
{
	struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
	writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
	writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
	if (!arcmsr_hbc_wait_msgint_ready(pACB)) {
		printk(KERN_NOTICE
			"arcmsr%d: wait 'abort all outstanding command' timeout \n"
			, pACB->host->host_no);
		return false;
	}
	return true;
753
}
754
static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
755
{
756
	uint8_t rtnval = 0;
757 758
	switch (acb->adapter_type) {
	case ACB_ADAPTER_TYPE_A: {
759
		rtnval = arcmsr_abort_hba_allcmd(acb);
760 761 762 763
		}
		break;

	case ACB_ADAPTER_TYPE_B: {
764
		rtnval = arcmsr_abort_hbb_allcmd(acb);
765
		}
766 767 768 769 770
		break;

	case ACB_ADAPTER_TYPE_C: {
		rtnval = arcmsr_abort_hbc_allcmd(acb);
		}
771
	}
772
	return rtnval;
773 774
}

775 776 777 778
static bool arcmsr_hbb_enable_driver_mode(struct AdapterControlBlock *pacb)
{
	struct MessageUnit_B *reg = pacb->pmuB;
	writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
779
	if (!arcmsr_hbb_wait_msgint_ready(pacb)) {
780 781
		printk(KERN_ERR "arcmsr%d: can't set driver mode. \n", pacb->host->host_no);
		return false;
782 783
	}
    	return true;
784 785
}

786 787 788 789
static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
{
	struct scsi_cmnd *pcmd = ccb->pcmd;

790
	scsi_dma_unmap(pcmd);
791
}
792

793
static void arcmsr_ccb_complete(struct CommandControlBlock *ccb)
794 795 796
{
	struct AdapterControlBlock *acb = ccb->acb;
	struct scsi_cmnd *pcmd = ccb->pcmd;
797 798
	unsigned long flags;
	atomic_dec(&acb->ccboutstandingcount);
799 800
	arcmsr_pci_unmap_dma(ccb);
	ccb->startdone = ARCMSR_CCB_DONE;
801
	spin_lock_irqsave(&acb->ccblist_lock, flags);
802
	list_add_tail(&ccb->list, &acb->ccb_free_list);
803
	spin_unlock_irqrestore(&acb->ccblist_lock, flags);
804 805 806
	pcmd->scsi_done(pcmd);
}

807 808 809 810 811 812 813 814
static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
{

	struct scsi_cmnd *pcmd = ccb->pcmd;
	struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
	pcmd->result = DID_OK << 16;
	if (sensebuffer) {
		int sense_data_length =
815 816 817
			sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
			? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE;
		memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE);
818 819 820 821 822 823 824 825 826
		memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
		sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
		sensebuffer->Valid = 1;
	}
}

static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
{
	u32 orig_mask = 0;
827
	switch (acb->adapter_type) {	
828
	case ACB_ADAPTER_TYPE_A : {
A
Al Viro 已提交
829
		struct MessageUnit_A __iomem *reg = acb->pmuA;
830
		orig_mask = readl(&reg->outbound_intmask);
831 832 833 834 835
		writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
						&reg->outbound_intmask);
		}
		break;
	case ACB_ADAPTER_TYPE_B : {
A
Al Viro 已提交
836
		struct MessageUnit_B *reg = acb->pmuB;
837 838
		orig_mask = readl(reg->iop2drv_doorbell_mask);
		writel(0, reg->iop2drv_doorbell_mask);
839 840
		}
		break;
841 842 843 844 845 846 847
	case ACB_ADAPTER_TYPE_C:{
		struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
		/* disable all outbound interrupt */
		orig_mask = readl(&reg->host_int_mask); /* disable outbound message0 int */
		writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
		}
		break;
848 849 850 851
	}
	return orig_mask;
}

852 853
static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, 
			struct CommandControlBlock *ccb, bool error)
854 855 856 857
{
	uint8_t id, lun;
	id = ccb->pcmd->device->id;
	lun = ccb->pcmd->device->lun;
858
	if (!error) {
859 860
		if (acb->devstate[id][lun] == ARECA_RAID_GONE)
			acb->devstate[id][lun] = ARECA_RAID_GOOD;
861 862
		ccb->pcmd->result = DID_OK << 16;
		arcmsr_ccb_complete(ccb);
863
	}else{
864 865 866 867
		switch (ccb->arcmsr_cdb.DeviceStatus) {
		case ARCMSR_DEV_SELECT_TIMEOUT: {
			acb->devstate[id][lun] = ARECA_RAID_GONE;
			ccb->pcmd->result = DID_NO_CONNECT << 16;
868
			arcmsr_ccb_complete(ccb);
869 870 871 872 873 874 875 876
			}
			break;

		case ARCMSR_DEV_ABORTED:

		case ARCMSR_DEV_INIT_FAIL: {
			acb->devstate[id][lun] = ARECA_RAID_GONE;
			ccb->pcmd->result = DID_BAD_TARGET << 16;
877
			arcmsr_ccb_complete(ccb);
878 879 880 881 882 883
			}
			break;

		case ARCMSR_DEV_CHECK_CONDITION: {
			acb->devstate[id][lun] = ARECA_RAID_GOOD;
			arcmsr_report_sense_info(ccb);
884
			arcmsr_ccb_complete(ccb);
885 886 887 888
			}
			break;

		default:
889 890 891 892 893 894 895 896 897 898
			printk(KERN_NOTICE
				"arcmsr%d: scsi id = %d lun = %d isr get command error done, \
				but got unknown DeviceStatus = 0x%x \n"
				, acb->host->host_no
				, id
				, lun
				, ccb->arcmsr_cdb.DeviceStatus);
				acb->devstate[id][lun] = ARECA_RAID_GONE;
				ccb->pcmd->result = DID_NO_CONNECT << 16;
				arcmsr_ccb_complete(ccb);
899 900 901 902 903
			break;
		}
	}
}

904
static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
905
{
906
	int id, lun;
907 908 909
	if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
		if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
			struct scsi_cmnd *abortcmd = pCCB->pcmd;
910
			if (abortcmd) {
911
				id = abortcmd->device->id;
912
				lun = abortcmd->device->lun;				
913
				abortcmd->result |= DID_ABORT << 16;
914 915 916
				arcmsr_ccb_complete(pCCB);
				printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
				acb->host->host_no, pCCB);
917
			}
918
			return;
919 920 921 922 923 924 925
		}
		printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \
				done acb = '0x%p'"
				"ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
				" ccboutstandingcount = %d \n"
				, acb->host->host_no
				, acb
926 927 928
				, pCCB
				, pCCB->acb
				, pCCB->startdone
929
				, atomic_read(&acb->ccboutstandingcount));
930
		  return;
931
	}
932
	arcmsr_report_ccb_state(acb, pCCB, error);
933 934 935 936 937 938
}

static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
{
	int i = 0;
	uint32_t flag_ccb;
939 940 941
	struct ARCMSR_CDB *pARCMSR_CDB;
	bool error;
	struct CommandControlBlock *pCCB;
942 943 944
	switch (acb->adapter_type) {

	case ACB_ADAPTER_TYPE_A: {
A
Al Viro 已提交
945
		struct MessageUnit_A __iomem *reg = acb->pmuA;
946
		uint32_t outbound_intstatus;
A
Al Viro 已提交
947
		outbound_intstatus = readl(&reg->outbound_intstatus) &
948 949 950
					acb->outbound_int_enable;
		/*clear and abort all outbound posted Q*/
		writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
951
		while(((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF)
952
				&& (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
953 954 955 956
			pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
			pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
			error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
			arcmsr_drain_donequeue(acb, pCCB, error);
957 958 959 960 961
		}
		}
		break;

	case ACB_ADAPTER_TYPE_B: {
A
Al Viro 已提交
962
		struct MessageUnit_B *reg = acb->pmuB;
963
		/*clear all outbound posted Q*/
964
		writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */
965 966 967
		for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
			if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) {
				writel(0, &reg->done_qbuffer[i]);
968 969 970 971
				pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
				pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
				error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
				arcmsr_drain_donequeue(acb, pCCB, error);
972
			}
973
			reg->post_qbuffer[i] = 0;
974 975 976 977 978
		}
		reg->doneq_index = 0;
		reg->postq_index = 0;
		}
		break;
979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994
	case ACB_ADAPTER_TYPE_C: {
		struct MessageUnit_C *reg = acb->pmuC;
		struct  ARCMSR_CDB *pARCMSR_CDB;
		uint32_t flag_ccb, ccb_cdb_phy;
		bool error;
		struct CommandControlBlock *pCCB;
		while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
			/*need to do*/
			flag_ccb = readl(&reg->outbound_queueport_low);
			ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
			pARCMSR_CDB = (struct  ARCMSR_CDB *)(acb->vir2phy_offset+ccb_cdb_phy);/*frame must be 32 bytes aligned*/
			pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
			error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
			arcmsr_drain_donequeue(acb, pCCB, error);
		}
	}
995 996
	}
}
997 998 999 1000 1001 1002 1003 1004
static void arcmsr_remove(struct pci_dev *pdev)
{
	struct Scsi_Host *host = pci_get_drvdata(pdev);
	struct AdapterControlBlock *acb =
		(struct AdapterControlBlock *) host->hostdata;
	int poll_count = 0;
	arcmsr_free_sysfs_attr(acb);
	scsi_remove_host(host);
1005
	flush_work_sync(&acb->arcmsr_do_message_isr_bh);
1006 1007
	del_timer_sync(&acb->eternal_timer);
	arcmsr_disable_outbound_ints(acb);
1008
	arcmsr_stop_adapter_bgrb(acb);
1009
	arcmsr_flush_adapter_cache(acb);	
1010 1011 1012
	acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
	acb->acb_flags &= ~ACB_F_IOP_INITED;

1013
	for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++){
1014 1015
		if (!atomic_read(&acb->ccboutstandingcount))
			break;
1016
		arcmsr_interrupt(acb);/* FIXME: need spinlock */
1017 1018 1019 1020 1021 1022 1023
		msleep(25);
	}

	if (atomic_read(&acb->ccboutstandingcount)) {
		int i;

		arcmsr_abort_allcmd(acb);
1024
		arcmsr_done4abort_postqueue(acb);
1025 1026 1027 1028 1029
		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
			struct CommandControlBlock *ccb = acb->pccb_pool[i];
			if (ccb->startdone == ARCMSR_CCB_START) {
				ccb->startdone = ARCMSR_CCB_ABORTED;
				ccb->pcmd->result = DID_ABORT << 16;
1030
				arcmsr_ccb_complete(ccb);
1031 1032 1033 1034 1035
			}
		}
	}
	free_irq(pdev->irq, acb);
	arcmsr_free_ccb_pool(acb);
1036 1037
	arcmsr_free_hbb_mu(acb);
	arcmsr_unmap_pciregion(acb);
1038
	pci_release_regions(pdev);
1039
	scsi_host_put(host);
1040 1041 1042 1043 1044 1045 1046 1047 1048
	pci_disable_device(pdev);
	pci_set_drvdata(pdev, NULL);
}

static void arcmsr_shutdown(struct pci_dev *pdev)
{
	struct Scsi_Host *host = pci_get_drvdata(pdev);
	struct AdapterControlBlock *acb =
		(struct AdapterControlBlock *)host->hostdata;
1049 1050
	del_timer_sync(&acb->eternal_timer);
	arcmsr_disable_outbound_ints(acb);
1051
	flush_work_sync(&acb->arcmsr_do_message_isr_bh);
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
	arcmsr_stop_adapter_bgrb(acb);
	arcmsr_flush_adapter_cache(acb);
}

static int arcmsr_module_init(void)
{
	int error = 0;
	error = pci_register_driver(&arcmsr_pci_driver);
	return error;
}

static void arcmsr_module_exit(void)
{
	pci_unregister_driver(&arcmsr_pci_driver);
}
module_init(arcmsr_module_init);
module_exit(arcmsr_module_exit);

1070
static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
1071
						u32 intmask_org)
1072 1073
{
	u32 mask;
1074
	switch (acb->adapter_type) {
1075

1076
	case ACB_ADAPTER_TYPE_A: {
A
Al Viro 已提交
1077
		struct MessageUnit_A __iomem *reg = acb->pmuA;
1078
		mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
1079 1080
			     ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|
			     ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
1081 1082 1083 1084
		writel(mask, &reg->outbound_intmask);
		acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
		}
		break;
1085

1086
	case ACB_ADAPTER_TYPE_B: {
A
Al Viro 已提交
1087
		struct MessageUnit_B *reg = acb->pmuB;
1088 1089 1090 1091
		mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK |
			ARCMSR_IOP2DRV_DATA_READ_OK |
			ARCMSR_IOP2DRV_CDB_DONE |
			ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
1092
		writel(mask, reg->iop2drv_doorbell_mask);
1093 1094
		acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
		}
1095 1096 1097 1098 1099 1100 1101
		break;
	case ACB_ADAPTER_TYPE_C: {
		struct MessageUnit_C *reg = acb->pmuC;
		mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
		writel(intmask_org & mask, &reg->host_int_mask);
		acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
		}
1102 1103 1104
	}
}

N
Nick Cheng 已提交
1105
static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
1106
	struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
1107
{
1108 1109
	struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
	int8_t *psge = (int8_t *)&arcmsr_cdb->u;
A
Al Viro 已提交
1110
	__le32 address_lo, address_hi;
1111
	int arccdbsize = 0x30;
1112
	__le32 length = 0;
1113
	int i;
1114
	struct scatterlist *sg;
1115
	int nseg;
1116
	ccb->pcmd = pcmd;
1117
	memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
1118 1119 1120
	arcmsr_cdb->TargetID = pcmd->device->id;
	arcmsr_cdb->LUN = pcmd->device->lun;
	arcmsr_cdb->Function = 1;
1121
	arcmsr_cdb->Context = 0;
1122
	memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
1123 1124

	nseg = scsi_dma_map(pcmd);
1125
	if (unlikely(nseg > acb->host->sg_tablesize || nseg < 0))
N
Nick Cheng 已提交
1126
		return FAILED;
1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140
	scsi_for_each_sg(pcmd, sg, nseg, i) {
		/* Get the physical address of the current data pointer */
		length = cpu_to_le32(sg_dma_len(sg));
		address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
		address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
		if (address_hi == 0) {
			struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;

			pdma_sg->address = address_lo;
			pdma_sg->length = length;
			psge += sizeof (struct SG32ENTRY);
			arccdbsize += sizeof (struct SG32ENTRY);
		} else {
			struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
1141

1142 1143 1144 1145 1146
			pdma_sg->addresshigh = address_hi;
			pdma_sg->address = address_lo;
			pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR);
			psge += sizeof (struct SG64ENTRY);
			arccdbsize += sizeof (struct SG64ENTRY);
1147
		}
1148 1149 1150
	}
	arcmsr_cdb->sgcount = (uint8_t)nseg;
	arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
1151
	arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0);
1152 1153
	if ( arccdbsize > 256)
		arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
1154
	if (pcmd->sc_data_direction == DMA_TO_DEVICE)
1155
		arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
1156
	ccb->arc_cdb_size = arccdbsize;
N
Nick Cheng 已提交
1157
	return SUCCESS;
1158 1159 1160 1161
}

static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
{
1162
	uint32_t cdb_phyaddr_pattern = ccb->cdb_phyaddr_pattern;
1163 1164 1165
	struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
	atomic_inc(&acb->ccboutstandingcount);
	ccb->startdone = ARCMSR_CCB_START;
1166 1167
	switch (acb->adapter_type) {
	case ACB_ADAPTER_TYPE_A: {
A
Al Viro 已提交
1168
		struct MessageUnit_A __iomem *reg = acb->pmuA;
1169 1170

		if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
1171
			writel(cdb_phyaddr_pattern | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
1172
			&reg->inbound_queueport);
1173
		else {
1174
				writel(cdb_phyaddr_pattern, &reg->inbound_queueport);
1175 1176 1177
		}
		}
		break;
1178

1179
	case ACB_ADAPTER_TYPE_B: {
A
Al Viro 已提交
1180
		struct MessageUnit_B *reg = acb->pmuB;
1181
		uint32_t ending_index, index = reg->postq_index;
1182

1183 1184 1185
		ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
		writel(0, &reg->post_qbuffer[ending_index]);
		if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1186
			writel(cdb_phyaddr_pattern | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,\
1187
						 &reg->post_qbuffer[index]);
1188 1189
		} else {
			writel(cdb_phyaddr_pattern, &reg->post_qbuffer[index]);
1190 1191 1192 1193
		}
		index++;
		index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
		reg->postq_index = index;
1194
		writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell);
1195
		}
1196
		break;
1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
	case ACB_ADAPTER_TYPE_C: {
		struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)acb->pmuC;
		uint32_t ccb_post_stamp, arc_cdb_size;

		arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
		ccb_post_stamp = (cdb_phyaddr_pattern | ((arc_cdb_size - 1) >> 6) | 1);
		if (acb->cdb_phyaddr_hi32) {
			writel(acb->cdb_phyaddr_hi32, &phbcmu->inbound_queueport_high);
			writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
		} else {
			writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
		}
		}
1210 1211 1212
	}
}

1213
static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
1214
{
A
Al Viro 已提交
1215
	struct MessageUnit_A __iomem *reg = acb->pmuA;
1216 1217
	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
	writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
1218
	if (!arcmsr_hba_wait_msgint_ready(acb)) {
1219 1220 1221 1222 1223 1224 1225 1226
		printk(KERN_NOTICE
			"arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
			, acb->host->host_no);
	}
}

static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
{
A
Al Viro 已提交
1227
	struct MessageUnit_B *reg = acb->pmuB;
1228
	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1229
	writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell);
1230

1231
	if (!arcmsr_hbb_wait_msgint_ready(acb)) {
1232 1233 1234
		printk(KERN_NOTICE
			"arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
			, acb->host->host_no);
1235 1236 1237
	}
}

1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250
static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *pACB)
{
	struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
	pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
	writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
	writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
	if (!arcmsr_hbc_wait_msgint_ready(pACB)) {
		printk(KERN_NOTICE
			"arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
			, pACB->host->host_no);
	}
	return;
}
1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262
static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
{
	switch (acb->adapter_type) {
	case ACB_ADAPTER_TYPE_A: {
		arcmsr_stop_hba_bgrb(acb);
		}
		break;

	case ACB_ADAPTER_TYPE_B: {
		arcmsr_stop_hbb_bgrb(acb);
		}
		break;
1263 1264 1265
	case ACB_ADAPTER_TYPE_C: {
		arcmsr_stop_hbc_bgrb(acb);
		}
1266
	}
1267 1268 1269 1270
}

static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
{
1271
	dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle);
1272 1273
}

1274
void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
1275
{
1276 1277
	switch (acb->adapter_type) {
	case ACB_ADAPTER_TYPE_A: {
A
Al Viro 已提交
1278
		struct MessageUnit_A __iomem *reg = acb->pmuA;
1279 1280 1281
		writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
		}
		break;
1282

1283
	case ACB_ADAPTER_TYPE_B: {
A
Al Viro 已提交
1284
		struct MessageUnit_B *reg = acb->pmuB;
1285
		writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
1286
		}
1287
		break;
1288 1289 1290 1291
	case ACB_ADAPTER_TYPE_C: {
		struct MessageUnit_C __iomem *reg = acb->pmuC;
		writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
		}
1292
	}
1293 1294 1295 1296 1297 1298
}

static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
{
	switch (acb->adapter_type) {
	case ACB_ADAPTER_TYPE_A: {
A
Al Viro 已提交
1299
		struct MessageUnit_A __iomem *reg = acb->pmuA;
1300
		/*
1301 1302
		** push inbound doorbell tell iop, driver data write ok
		** and wait reply on next hwinterrupt for next Qbuffer post
1303
		*/
1304 1305 1306 1307 1308
		writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, &reg->inbound_doorbell);
		}
		break;

	case ACB_ADAPTER_TYPE_B: {
A
Al Viro 已提交
1309
		struct MessageUnit_B *reg = acb->pmuB;
1310 1311 1312 1313
		/*
		** push inbound doorbell tell iop, driver data write ok
		** and wait reply on next hwinterrupt for next Qbuffer post
		*/
1314
		writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell);
1315 1316
		}
		break;
1317 1318 1319 1320 1321 1322 1323 1324 1325
	case ACB_ADAPTER_TYPE_C: {
		struct MessageUnit_C __iomem *reg = acb->pmuC;
		/*
		** push inbound doorbell tell iop, driver data write ok
		** and wait reply on next hwinterrupt for next Qbuffer post
		*/
		writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, &reg->inbound_doorbell);
		}
		break;
1326 1327 1328
	}
}

A
Al Viro 已提交
1329
struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
1330
{
1331
	struct QBUFFER __iomem *qbuffer = NULL;
1332 1333 1334
	switch (acb->adapter_type) {

	case ACB_ADAPTER_TYPE_A: {
A
Al Viro 已提交
1335 1336
		struct MessageUnit_A __iomem *reg = acb->pmuA;
		qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
1337 1338 1339 1340
		}
		break;

	case ACB_ADAPTER_TYPE_B: {
A
Al Viro 已提交
1341
		struct MessageUnit_B *reg = acb->pmuB;
1342
		qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
1343 1344
		}
		break;
1345 1346 1347 1348
	case ACB_ADAPTER_TYPE_C: {
		struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)acb->pmuC;
		qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer;
		}
1349 1350 1351 1352
	}
	return qbuffer;
}

A
Al Viro 已提交
1353
static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
1354
{
1355
	struct QBUFFER __iomem *pqbuffer = NULL;
1356 1357 1358
	switch (acb->adapter_type) {

	case ACB_ADAPTER_TYPE_A: {
A
Al Viro 已提交
1359 1360
		struct MessageUnit_A __iomem *reg = acb->pmuA;
		pqbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer;
1361 1362 1363 1364
		}
		break;

	case ACB_ADAPTER_TYPE_B: {
A
Al Viro 已提交
1365
		struct MessageUnit_B  *reg = acb->pmuB;
1366
		pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
1367 1368
		}
		break;
1369 1370 1371 1372 1373
	case ACB_ADAPTER_TYPE_C: {
		struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
		pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer;
	}

1374 1375 1376 1377 1378 1379
	}
	return pqbuffer;
}

static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
{
A
Al Viro 已提交
1380
	struct QBUFFER __iomem *prbuffer;
1381
	struct QBUFFER *pQbuffer;
A
Al Viro 已提交
1382
	uint8_t __iomem *iop_data;
1383 1384 1385 1386
	int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
	rqbuf_lastindex = acb->rqbuf_lastindex;
	rqbuf_firstindex = acb->rqbuf_firstindex;
	prbuffer = arcmsr_get_iop_rqbuffer(acb);
A
Al Viro 已提交
1387
	iop_data = (uint8_t __iomem *)prbuffer->data;
1388
	iop_len = prbuffer->data_len;
1389
	my_empty_len = (rqbuf_firstindex - rqbuf_lastindex - 1) & (ARCMSR_MAX_QBUFFER - 1);
1390 1391 1392 1393 1394

	if (my_empty_len >= iop_len)
	{
		while (iop_len > 0) {
			pQbuffer = (struct QBUFFER *)&acb->rqbuffer[rqbuf_lastindex];
1395
			memcpy(pQbuffer, iop_data, 1);
1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414
			rqbuf_lastindex++;
			rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
			iop_data++;
			iop_len--;
		}
		acb->rqbuf_lastindex = rqbuf_lastindex;
		arcmsr_iop_message_read(acb);
	}

	else {
		acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
	}
}

static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
{
	acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
	if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
		uint8_t *pQbuffer;
A
Al Viro 已提交
1415 1416
		struct QBUFFER __iomem *pwbuffer;
		uint8_t __iomem *iop_data;
1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444
		int32_t allxfer_len = 0;

		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
		pwbuffer = arcmsr_get_iop_wqbuffer(acb);
		iop_data = (uint8_t __iomem *)pwbuffer->data;

		while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && \
							(allxfer_len < 124)) {
			pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex];
			memcpy(iop_data, pQbuffer, 1);
			acb->wqbuf_firstindex++;
			acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
			iop_data++;
			allxfer_len++;
		}
		pwbuffer->data_len = allxfer_len;

		arcmsr_iop_message_wrote(acb);
	}

	if (acb->wqbuf_firstindex == acb->wqbuf_lastindex) {
		acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
	}
}

static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
{
	uint32_t outbound_doorbell;
A
Al Viro 已提交
1445
	struct MessageUnit_A __iomem *reg = acb->pmuA;
1446 1447 1448 1449 1450 1451
	outbound_doorbell = readl(&reg->outbound_doorbell);
	writel(outbound_doorbell, &reg->outbound_doorbell);
	if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
		arcmsr_iop2drv_data_wrote_handle(acb);
	}

1452
	if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
1453 1454 1455
		arcmsr_iop2drv_data_read_handle(acb);
	}
}
1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479
static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *pACB)
{
	uint32_t outbound_doorbell;
	struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
	/*
	*******************************************************************
	**  Maybe here we need to check wrqbuffer_lock is lock or not
	**  DOORBELL: din! don!
	**  check if there are any mail need to pack from firmware
	*******************************************************************
	*/
	outbound_doorbell = readl(&reg->outbound_doorbell);
	writel(outbound_doorbell, &reg->outbound_doorbell_clear);/*clear interrupt*/
	if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
		arcmsr_iop2drv_data_wrote_handle(pACB);
	}
	if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
		arcmsr_iop2drv_data_read_handle(pACB);
	}
	if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
		arcmsr_hbc_message_isr(pACB);    /* messenger of "driver to iop commands" */
	}
	return;
}
1480 1481 1482
static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
{
	uint32_t flag_ccb;
A
Al Viro 已提交
1483
	struct MessageUnit_A __iomem *reg = acb->pmuA;
1484 1485 1486
	struct ARCMSR_CDB *pARCMSR_CDB;
	struct CommandControlBlock *pCCB;
	bool error;
1487
	while ((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) {
1488 1489 1490 1491
		pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
		pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
		arcmsr_drain_donequeue(acb, pCCB, error);
1492 1493 1494 1495 1496 1497
	}
}
static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
{
	uint32_t index;
	uint32_t flag_ccb;
A
Al Viro 已提交
1498
	struct MessageUnit_B *reg = acb->pmuB;
1499 1500 1501
	struct ARCMSR_CDB *pARCMSR_CDB;
	struct CommandControlBlock *pCCB;
	bool error;
1502 1503 1504
	index = reg->doneq_index;
	while ((flag_ccb = readl(&reg->done_qbuffer[index])) != 0) {
		writel(0, &reg->done_qbuffer[index]);
1505 1506 1507 1508
		pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
		pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
		arcmsr_drain_donequeue(acb, pCCB, error);
1509 1510 1511 1512 1513
		index++;
		index %= ARCMSR_MAX_HBB_POSTQUEUE;
		reg->doneq_index = index;
	}
}
1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543

static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb)
{
	struct MessageUnit_C *phbcmu;
	struct ARCMSR_CDB *arcmsr_cdb;
	struct CommandControlBlock *ccb;
	uint32_t flag_ccb, ccb_cdb_phy, throttling = 0;
	int error;

	phbcmu = (struct MessageUnit_C *)acb->pmuC;
	/* areca cdb command done */
	/* Use correct offset and size for syncing */

	while (readl(&phbcmu->host_int_status) &
	ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR){
	/* check if command done with no error*/
	flag_ccb = readl(&phbcmu->outbound_queueport_low);
	ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);/*frame must be 32 bytes aligned*/
	arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
	ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
	error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
	/* check if command done with no error */
	arcmsr_drain_donequeue(acb, ccb, error);
	if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
		writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING, &phbcmu->inbound_doorbell);
		break;
	}
	throttling++;
	}
}
1544 1545 1546 1547
/*
**********************************************************************************
** Handle a message interrupt
**
1548
** The only message interrupt we expect is in response to a query for the current adapter config.  
1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561
** We want this in order to compare the drivemap so that we can detect newly-attached drives.
**********************************************************************************
*/
static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb)
{
	struct MessageUnit_A *reg  = acb->pmuA;
	/*clear interrupt and message state*/
	writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus);
	schedule_work(&acb->arcmsr_do_message_isr_bh);
}
static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb)
{
	struct MessageUnit_B *reg  = acb->pmuB;
1562

1563
	/*clear interrupt and message state*/
1564
	writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
1565 1566
	schedule_work(&acb->arcmsr_do_message_isr_bh);
}
1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583
/*
**********************************************************************************
** Handle a message interrupt
**
** The only message interrupt we expect is in response to a query for the
** current adapter config.
** We want this in order to compare the drivemap so that we can detect newly-attached drives.
**********************************************************************************
*/
static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb)
{
	struct MessageUnit_C *reg  = acb->pmuC;
	/*clear interrupt and message state*/
	writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);
	schedule_work(&acb->arcmsr_do_message_isr_bh);
}

1584 1585 1586
static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb)
{
	uint32_t outbound_intstatus;
A
Al Viro 已提交
1587
	struct MessageUnit_A __iomem *reg = acb->pmuA;
1588
	outbound_intstatus = readl(&reg->outbound_intstatus) &
1589
		acb->outbound_int_enable;
1590 1591 1592 1593 1594 1595 1596 1597 1598 1599
	if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT))	{
		return 1;
	}
	writel(outbound_intstatus, &reg->outbound_intstatus);
	if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)	{
		arcmsr_hba_doorbell_isr(acb);
	}
	if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
		arcmsr_hba_postqueue_isr(acb);
	}
1600
	if(outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) 	{
1601 1602 1603
		/* messenger of "driver to iop commands" */
		arcmsr_hba_message_isr(acb);
	}
1604 1605 1606 1607 1608 1609
	return 0;
}

static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
{
	uint32_t outbound_doorbell;
A
Al Viro 已提交
1610
	struct MessageUnit_B *reg = acb->pmuB;
1611
	outbound_doorbell = readl(reg->iop2drv_doorbell) &
1612
				acb->outbound_int_enable;
1613 1614 1615
	if (!outbound_doorbell)
		return 1;

1616
	writel(~outbound_doorbell, reg->iop2drv_doorbell);
1617 1618
	/*in case the last action of doorbell interrupt clearance is cached,
	this action can push HW to write down the clear bit*/
1619 1620
	readl(reg->iop2drv_doorbell);
	writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
1621
	if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
1622 1623 1624 1625 1626 1627 1628 1629
		arcmsr_iop2drv_data_wrote_handle(acb);
	}
	if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
		arcmsr_iop2drv_data_read_handle(acb);
	}
	if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
		arcmsr_hbb_postqueue_isr(acb);
	}
1630
	if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
1631 1632 1633
		/* messenger of "driver to iop commands" */
		arcmsr_hbb_message_isr(acb);
	}
1634 1635 1636
	return 0;
}

1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660
static int arcmsr_handle_hbc_isr(struct AdapterControlBlock *pACB)
{
	uint32_t host_interrupt_status;
	struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC;
	/*
	*********************************************
	**   check outbound intstatus
	*********************************************
	*/
	host_interrupt_status = readl(&phbcmu->host_int_status);
	if (!host_interrupt_status) {
		/*it must be share irq*/
		return 1;
	}
	/* MU ioctl transfer doorbell interrupts*/
	if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
		arcmsr_hbc_doorbell_isr(pACB);   /* messenger of "ioctl message read write" */
	}
	/* MU post queue interrupts*/
	if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
		arcmsr_hbc_postqueue_isr(pACB);  /* messenger of "scsi commands" */
	}
	return 0;
}
1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676
static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
{
	switch (acb->adapter_type) {
	case ACB_ADAPTER_TYPE_A: {
		if (arcmsr_handle_hba_isr(acb)) {
			return IRQ_NONE;
		}
		}
		break;

	case ACB_ADAPTER_TYPE_B: {
		if (arcmsr_handle_hbb_isr(acb)) {
			return IRQ_NONE;
		}
		}
		break;
1677 1678 1679 1680 1681
	 case ACB_ADAPTER_TYPE_C: {
		if (arcmsr_handle_hbc_isr(acb)) {
			return IRQ_NONE;
		}
		}
1682 1683 1684 1685 1686 1687 1688 1689 1690
	}
	return IRQ_HANDLED;
}

static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
{
	if (acb) {
		/* stop adapter background rebuild */
		if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
1691
			uint32_t intmask_org;
1692
			acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1693
			intmask_org = arcmsr_disable_outbound_ints(acb);
1694 1695
			arcmsr_stop_adapter_bgrb(acb);
			arcmsr_flush_adapter_cache(acb);
1696 1697 1698 1699 1700 1701 1702 1703 1704
			arcmsr_enable_outbound_ints(acb, intmask_org);
		}
	}
}

void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
{
	int32_t wqbuf_firstindex, wqbuf_lastindex;
	uint8_t *pQbuffer;
A
Al Viro 已提交
1705 1706
	struct QBUFFER __iomem *pwbuffer;
	uint8_t __iomem *iop_data;
1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720
	int32_t allxfer_len = 0;
	pwbuffer = arcmsr_get_iop_wqbuffer(acb);
	iop_data = (uint8_t __iomem *)pwbuffer->data;
	if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
		wqbuf_firstindex = acb->wqbuf_firstindex;
		wqbuf_lastindex = acb->wqbuf_lastindex;
		while ((wqbuf_firstindex != wqbuf_lastindex) && (allxfer_len < 124)) {
			pQbuffer = &acb->wqbuffer[wqbuf_firstindex];
			memcpy(iop_data, pQbuffer, 1);
			wqbuf_firstindex++;
			wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
			iop_data++;
			allxfer_len++;
1721
		}
1722 1723 1724
		acb->wqbuf_firstindex = wqbuf_firstindex;
		pwbuffer->data_len = allxfer_len;
		arcmsr_iop_message_wrote(acb);
1725 1726 1727
	}
}

1728
static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
1729
					struct scsi_cmnd *cmd)
1730 1731 1732 1733
{
	struct CMD_MESSAGE_FIELD *pcmdmessagefld;
	int retvalue = 0, transfer_len = 0;
	char *buffer;
1734
	struct scatterlist *sg;
1735 1736 1737 1738
	uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 |
						(uint32_t ) cmd->cmnd[6] << 16 |
						(uint32_t ) cmd->cmnd[7] << 8  |
						(uint32_t ) cmd->cmnd[8];
1739
						/* 4 bytes: Areca io control code */
1740
	sg = scsi_sglist(cmd);
J
Jens Axboe 已提交
1741
	buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
1742 1743 1744
	if (scsi_sg_count(cmd) > 1) {
		retvalue = ARCMSR_MESSAGE_FAIL;
		goto message_out;
1745
	}
1746 1747
	transfer_len += sg->length;

1748 1749 1750 1751 1752 1753
	if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
		retvalue = ARCMSR_MESSAGE_FAIL;
		goto message_out;
	}
	pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
	switch(controlcode) {
1754

1755
	case ARCMSR_MESSAGE_READ_RQBUFFER: {
1756
		unsigned char *ver_addr;
1757 1758 1759
		uint8_t *pQbuffer, *ptmpQbuffer;
		int32_t allxfer_len = 0;

1760 1761
		ver_addr = kmalloc(1032, GFP_ATOMIC);
		if (!ver_addr) {
1762 1763 1764
			retvalue = ARCMSR_MESSAGE_FAIL;
			goto message_out;
		}
1765
				
1766
		ptmpQbuffer = ver_addr;
1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
		while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
			&& (allxfer_len < 1031)) {
			pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
			memcpy(ptmpQbuffer, pQbuffer, 1);
			acb->rqbuf_firstindex++;
			acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
			ptmpQbuffer++;
			allxfer_len++;
		}
		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1777

A
Al Viro 已提交
1778 1779
			struct QBUFFER __iomem *prbuffer;
			uint8_t __iomem *iop_data;
1780 1781 1782 1783
			int32_t iop_len;

			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
			prbuffer = arcmsr_get_iop_rqbuffer(acb);
A
Al Viro 已提交
1784
			iop_data = prbuffer->data;
1785 1786 1787 1788 1789 1790 1791
			iop_len = readl(&prbuffer->data_len);
			while (iop_len > 0) {
				acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
				acb->rqbuf_lastindex++;
				acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
				iop_data++;
				iop_len--;
1792
			}
1793 1794
			arcmsr_iop_message_read(acb);
		}
1795
		memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, allxfer_len);
1796
		pcmdmessagefld->cmdmessage.Length = allxfer_len;
1797
		if(acb->fw_flag == FW_DEADLOCK) {
1798
			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1799 1800
		}else{
			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1801
		}
1802
		kfree(ver_addr);
1803 1804 1805
		}
		break;

1806
	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
1807
		unsigned char *ver_addr;
1808 1809 1810
		int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
		uint8_t *pQbuffer, *ptmpuserbuffer;

1811 1812
		ver_addr = kmalloc(1032, GFP_ATOMIC);
		if (!ver_addr) {
1813 1814 1815
			retvalue = ARCMSR_MESSAGE_FAIL;
			goto message_out;
		}
1816 1817
		if(acb->fw_flag == FW_DEADLOCK) {
			pcmdmessagefld->cmdmessage.ReturnCode = 
1818
			ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1819 1820
		}else{
			pcmdmessagefld->cmdmessage.ReturnCode = 
1821
			ARCMSR_MESSAGE_RETURNCODE_OK;
1822
		}
1823
		ptmpuserbuffer = ver_addr;
1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858
		user_len = pcmdmessagefld->cmdmessage.Length;
		memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
		wqbuf_lastindex = acb->wqbuf_lastindex;
		wqbuf_firstindex = acb->wqbuf_firstindex;
		if (wqbuf_lastindex != wqbuf_firstindex) {
			struct SENSE_DATA *sensebuffer =
				(struct SENSE_DATA *)cmd->sense_buffer;
			arcmsr_post_ioctldata2iop(acb);
			/* has error report sensedata */
			sensebuffer->ErrorCode = 0x70;
			sensebuffer->SenseKey = ILLEGAL_REQUEST;
			sensebuffer->AdditionalSenseLength = 0x0A;
			sensebuffer->AdditionalSenseCode = 0x20;
			sensebuffer->Valid = 1;
			retvalue = ARCMSR_MESSAGE_FAIL;
		} else {
			my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
				&(ARCMSR_MAX_QBUFFER - 1);
			if (my_empty_len >= user_len) {
				while (user_len > 0) {
					pQbuffer =
					&acb->wqbuffer[acb->wqbuf_lastindex];
					memcpy(pQbuffer, ptmpuserbuffer, 1);
					acb->wqbuf_lastindex++;
					acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
					ptmpuserbuffer++;
					user_len--;
				}
				if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
					acb->acb_flags &=
						~ACB_F_MESSAGE_WQBUFFER_CLEARED;
					arcmsr_post_ioctldata2iop(acb);
				}
			} else {
				/* has error report sensedata */
1859 1860 1861 1862 1863 1864 1865 1866
				struct SENSE_DATA *sensebuffer =
					(struct SENSE_DATA *)cmd->sense_buffer;
				sensebuffer->ErrorCode = 0x70;
				sensebuffer->SenseKey = ILLEGAL_REQUEST;
				sensebuffer->AdditionalSenseLength = 0x0A;
				sensebuffer->AdditionalSenseCode = 0x20;
				sensebuffer->Valid = 1;
				retvalue = ARCMSR_MESSAGE_FAIL;
1867
			}
1868
			}
1869
			kfree(ver_addr);
1870 1871
		}
		break;
1872

1873
	case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
1874 1875 1876 1877 1878 1879 1880 1881 1882
		uint8_t *pQbuffer = acb->rqbuffer;
		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
			arcmsr_iop_message_read(acb);
		}
		acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
		acb->rqbuf_firstindex = 0;
		acb->rqbuf_lastindex = 0;
		memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1883
		if(acb->fw_flag == FW_DEADLOCK) {
1884 1885
			pcmdmessagefld->cmdmessage.ReturnCode =
			ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1886
		}else{
1887 1888 1889
			pcmdmessagefld->cmdmessage.ReturnCode =
			ARCMSR_MESSAGE_RETURNCODE_OK;
		}
1890 1891
		}
		break;
1892

1893
	case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
1894
		uint8_t *pQbuffer = acb->wqbuffer;
1895
		if(acb->fw_flag == FW_DEADLOCK) {
1896 1897
			pcmdmessagefld->cmdmessage.ReturnCode =
			ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1898
		}else{
1899 1900
			pcmdmessagefld->cmdmessage.ReturnCode =
			ARCMSR_MESSAGE_RETURNCODE_OK;
1901
		}
1902

1903 1904 1905 1906 1907 1908 1909 1910 1911 1912
		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
			arcmsr_iop_message_read(acb);
		}
		acb->acb_flags |=
			(ACB_F_MESSAGE_WQBUFFER_CLEARED |
				ACB_F_MESSAGE_WQBUFFER_READED);
		acb->wqbuf_firstindex = 0;
		acb->wqbuf_lastindex = 0;
		memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1913 1914
		}
		break;
1915

1916
	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
1917
		uint8_t *pQbuffer;
1918

1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934
		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
			arcmsr_iop_message_read(acb);
		}
		acb->acb_flags |=
			(ACB_F_MESSAGE_WQBUFFER_CLEARED
			| ACB_F_MESSAGE_RQBUFFER_CLEARED
			| ACB_F_MESSAGE_WQBUFFER_READED);
		acb->rqbuf_firstindex = 0;
		acb->rqbuf_lastindex = 0;
		acb->wqbuf_firstindex = 0;
		acb->wqbuf_lastindex = 0;
		pQbuffer = acb->rqbuffer;
		memset(pQbuffer, 0, sizeof(struct QBUFFER));
		pQbuffer = acb->wqbuffer;
		memset(pQbuffer, 0, sizeof(struct QBUFFER));
1935
		if(acb->fw_flag == FW_DEADLOCK) {
1936 1937
			pcmdmessagefld->cmdmessage.ReturnCode =
			ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1938
		}else{
1939 1940 1941
			pcmdmessagefld->cmdmessage.ReturnCode =
			ARCMSR_MESSAGE_RETURNCODE_OK;
		}
1942 1943
		}
		break;
1944

1945
	case ARCMSR_MESSAGE_RETURN_CODE_3F: {
1946
		if(acb->fw_flag == FW_DEADLOCK) {
1947 1948
			pcmdmessagefld->cmdmessage.ReturnCode =
			ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1949
		}else{
1950 1951
			pcmdmessagefld->cmdmessage.ReturnCode =
			ARCMSR_MESSAGE_RETURNCODE_3F;
1952 1953
		}
		break;
1954
		}
1955
	case ARCMSR_MESSAGE_SAY_HELLO: {
1956
		int8_t *hello_string = "Hello! I am ARCMSR";
1957
		if(acb->fw_flag == FW_DEADLOCK) {
1958 1959
			pcmdmessagefld->cmdmessage.ReturnCode =
			ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1960
		}else{
1961 1962
			pcmdmessagefld->cmdmessage.ReturnCode =
			ARCMSR_MESSAGE_RETURNCODE_OK;
1963
		}
1964 1965
		memcpy(pcmdmessagefld->messagedatabuffer, hello_string
			, (int16_t)strlen(hello_string));
1966 1967
		}
		break;
1968

1969
	case ARCMSR_MESSAGE_SAY_GOODBYE:
1970
		if(acb->fw_flag == FW_DEADLOCK) {
1971 1972 1973
			pcmdmessagefld->cmdmessage.ReturnCode =
			ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
		}
1974 1975
		arcmsr_iop_parking(acb);
		break;
1976

1977
	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
1978
		if(acb->fw_flag == FW_DEADLOCK) {
1979 1980 1981
			pcmdmessagefld->cmdmessage.ReturnCode =
			ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
		}
1982 1983
		arcmsr_flush_adapter_cache(acb);
		break;
1984

1985 1986 1987
	default:
		retvalue = ARCMSR_MESSAGE_FAIL;
	}
1988
	message_out:
1989 1990
	sg = scsi_sglist(cmd);
	kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1991 1992 1993 1994 1995 1996 1997
	return retvalue;
}

static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
{
	struct list_head *head = &acb->ccb_free_list;
	struct CommandControlBlock *ccb = NULL;
1998 1999
	unsigned long flags;
	spin_lock_irqsave(&acb->ccblist_lock, flags);
2000 2001
	if (!list_empty(head)) {
		ccb = list_entry(head->next, struct CommandControlBlock, list);
2002
		list_del_init(&ccb->list);
2003
	}else{
2004 2005
		spin_unlock_irqrestore(&acb->ccblist_lock, flags);
		return 0;
2006
	}
2007
	spin_unlock_irqrestore(&acb->ccblist_lock, flags);
2008 2009 2010 2011 2012 2013 2014 2015 2016 2017
	return ccb;
}

static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
		struct scsi_cmnd *cmd)
{
	switch (cmd->cmnd[0]) {
	case INQUIRY: {
		unsigned char inqdata[36];
		char *buffer;
2018
		struct scatterlist *sg;
2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029

		if (cmd->device->lun) {
			cmd->result = (DID_TIME_OUT << 16);
			cmd->scsi_done(cmd);
			return;
		}
		inqdata[0] = TYPE_PROCESSOR;
		/* Periph Qualifier & Periph Dev Type */
		inqdata[1] = 0;
		/* rem media bit & Dev Type Modifier */
		inqdata[2] = 0;
2030
		/* ISO, ECMA, & ANSI versions */
2031 2032 2033 2034 2035 2036 2037 2038
		inqdata[4] = 31;
		/* length of additional data */
		strncpy(&inqdata[8], "Areca   ", 8);
		/* Vendor Identification */
		strncpy(&inqdata[16], "RAID controller ", 16);
		/* Product Identification */
		strncpy(&inqdata[32], "R001", 4); /* Product Revision */

2039
		sg = scsi_sglist(cmd);
J
Jens Axboe 已提交
2040
		buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
2041

2042
		memcpy(buffer, inqdata, sizeof(inqdata));
2043 2044
		sg = scsi_sglist(cmd);
		kunmap_atomic(buffer - sg->offset, KM_IRQ0);
2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060

		cmd->scsi_done(cmd);
	}
	break;
	case WRITE_BUFFER:
	case READ_BUFFER: {
		if (arcmsr_iop_message_xfer(acb, cmd))
			cmd->result = (DID_ERROR << 16);
		cmd->scsi_done(cmd);
	}
	break;
	default:
		cmd->scsi_done(cmd);
	}
}

J
Jeff Garzik 已提交
2061
static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
2062 2063 2064
	void (* done)(struct scsi_cmnd *))
{
	struct Scsi_Host *host = cmd->device->host;
2065
	struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
2066 2067 2068
	struct CommandControlBlock *ccb;
	int target = cmd->device->id;
	int lun = cmd->device->lun;
2069
	uint8_t scsicmd = cmd->cmnd[0];
2070 2071 2072
	cmd->scsi_done = done;
	cmd->host_scribble = NULL;
	cmd->result = 0;
2073 2074 2075
	if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){
		if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
    			cmd->result = (DID_NO_CONNECT << 16);
2076 2077 2078 2079
		}
		cmd->scsi_done(cmd);
		return 0;
	}
2080
	if (target == 16) {
2081 2082 2083 2084 2085 2086 2087 2088 2089 2090
		/* virtual device for iop message transfer */
		arcmsr_handle_virtual_command(acb, cmd);
		return 0;
	}
	if (atomic_read(&acb->ccboutstandingcount) >=
			ARCMSR_MAX_OUTSTANDING_CMD)
		return SCSI_MLQUEUE_HOST_BUSY;
	ccb = arcmsr_get_freeccb(acb);
	if (!ccb)
		return SCSI_MLQUEUE_HOST_BUSY;
2091
	if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) {
N
Nick Cheng 已提交
2092 2093 2094 2095
		cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
		cmd->scsi_done(cmd);
		return 0;
	}
2096 2097 2098 2099
	arcmsr_post_ccb(acb, ccb);
	return 0;
}

J
Jeff Garzik 已提交
2100 2101
static DEF_SCSI_QCMD(arcmsr_queue_command)

2102
static bool arcmsr_get_hba_config(struct AdapterControlBlock *acb)
2103
{
A
Al Viro 已提交
2104
	struct MessageUnit_A __iomem *reg = acb->pmuA;
2105 2106
	char *acb_firm_model = acb->firm_model;
	char *acb_firm_version = acb->firm_version;
2107
	char *acb_device_map = acb->device_map;
A
Al Viro 已提交
2108 2109
	char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);
	char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);
2110
	char __iomem *iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]);
2111 2112
	int count;
	writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
2113
	if (!arcmsr_hba_wait_msgint_ready(acb)) {
2114 2115
		printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
			miscellaneous data' timeout \n", acb->host->host_no);
2116
		return false;
2117
	}
2118
	count = 8;
2119
	while (count){
2120 2121 2122 2123 2124
		*acb_firm_model = readb(iop_firm_model);
		acb_firm_model++;
		iop_firm_model++;
		count--;
	}
2125

2126
	count = 16;
2127
	while (count){
2128 2129 2130 2131 2132
		*acb_firm_version = readb(iop_firm_version);
		acb_firm_version++;
		iop_firm_version++;
		count--;
	}
2133

2134 2135 2136 2137 2138 2139 2140 2141
	count=16;
	while(count){
		*acb_device_map = readb(iop_device_map);
		acb_device_map++;
		iop_device_map++;
		count--;
	}
	printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n", 
2142 2143 2144
		acb->host->host_no,
		acb->firm_version,
		acb->firm_model);
2145
	acb->signature = readl(&reg->message_rwbuffer[0]);
2146 2147 2148 2149
	acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
	acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
	acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
	acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
2150 2151
	acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]);  /*firm_cfg_version,25,100-103*/
	return true;
2152
}
2153
static bool arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
2154
{
A
Al Viro 已提交
2155
	struct MessageUnit_B *reg = acb->pmuB;
2156 2157 2158
	struct pci_dev *pdev = acb->pdev;
	void *dma_coherent;
	dma_addr_t dma_coherent_handle;
2159 2160
	char *acb_firm_model = acb->firm_model;
	char *acb_firm_version = acb->firm_version;
2161
	char *acb_device_map = acb->device_map;
2162
	char __iomem *iop_firm_model;
2163
	/*firm_model,15,60-67*/
2164
	char __iomem *iop_firm_version;
2165
	/*firm_version,17,68-83*/
2166
	char __iomem *iop_device_map;
2167
	/*firm_version,21,84-99*/
2168
	int count;
2169
	dma_coherent = dma_alloc_coherent(&pdev->dev, sizeof(struct MessageUnit_B), &dma_coherent_handle, GFP_KERNEL);
2170
	if (!dma_coherent){
2171 2172 2173 2174 2175 2176
		printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error for hbb mu\n", acb->host->host_no);
		return false;
	}
	acb->dma_coherent_handle_hbb_mu = dma_coherent_handle;
	reg = (struct MessageUnit_B *)dma_coherent;
	acb->pmuB = reg;
2177
	reg->drv2iop_doorbell= (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL);
2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188
	reg->drv2iop_doorbell_mask = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL_MASK);
	reg->iop2drv_doorbell = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_IOP2DRV_DOORBELL);
	reg->iop2drv_doorbell_mask = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_IOP2DRV_DOORBELL_MASK);
	reg->message_wbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_WBUFFER);
	reg->message_rbuffer =  (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_RBUFFER);
	reg->message_rwbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_RWBUFFER);
	iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);	/*firm_model,15,60-67*/
	iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);	/*firm_version,17,68-83*/
	iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]);	/*firm_version,21,84-99*/

	writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
2189
	if (!arcmsr_hbb_wait_msgint_ready(acb)) {
2190 2191
		printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
			miscellaneous data' timeout \n", acb->host->host_no);
2192
		return false;
2193 2194
	}
	count = 8;
2195
	while (count){
2196 2197 2198 2199 2200 2201
		*acb_firm_model = readb(iop_firm_model);
		acb_firm_model++;
		iop_firm_model++;
		count--;
	}
	count = 16;
2202
	while (count){
2203 2204 2205 2206 2207 2208
		*acb_firm_version = readb(iop_firm_version);
		acb_firm_version++;
		iop_firm_version++;
		count--;
	}

2209 2210 2211 2212 2213 2214 2215 2216
	count = 16;
	while(count){
		*acb_device_map = readb(iop_device_map);
		acb_device_map++;
		iop_device_map++;
		count--;
	}
	
2217
	printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n",
2218
		acb->host->host_no,
2219 2220
		acb->firm_version,
		acb->firm_model);
2221

2222
	acb->signature = readl(&reg->message_rwbuffer[1]);
2223
	/*firm_signature,1,00-03*/
2224
	acb->firm_request_len = readl(&reg->message_rwbuffer[2]);
2225
	/*firm_request_len,1,04-07*/
2226
	acb->firm_numbers_queue = readl(&reg->message_rwbuffer[3]);
2227
	/*firm_numbers_queue,2,08-11*/
2228
	acb->firm_sdram_size = readl(&reg->message_rwbuffer[4]);
2229
	/*firm_sdram_size,3,12-15*/
2230
	acb->firm_hd_channels = readl(&reg->message_rwbuffer[5]);
2231
	/*firm_ide_channels,4,16-19*/
2232 2233 2234
	acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]);  /*firm_cfg_version,25,100-103*/
	/*firm_ide_channels,4,16-19*/
	return true;
2235
}
2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294

static bool arcmsr_get_hbc_config(struct AdapterControlBlock *pACB)
{
	uint32_t intmask_org, Index, firmware_state = 0;
	struct MessageUnit_C *reg = pACB->pmuC;
	char *acb_firm_model = pACB->firm_model;
	char *acb_firm_version = pACB->firm_version;
	char *iop_firm_model = (char *)(&reg->msgcode_rwbuffer[15]);    /*firm_model,15,60-67*/
	char *iop_firm_version = (char *)(&reg->msgcode_rwbuffer[17]);  /*firm_version,17,68-83*/
	int count;
	/* disable all outbound interrupt */
	intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
	writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
	/* wait firmware ready */
	do {
		firmware_state = readl(&reg->outbound_msgaddr1);
	} while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
	/* post "get config" instruction */
	writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
	writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
	/* wait message ready */
	for (Index = 0; Index < 2000; Index++) {
		if (readl(&reg->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
			writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);/*clear interrupt*/
			break;
		}
		udelay(10);
	} /*max 1 seconds*/
	if (Index >= 2000) {
		printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
			miscellaneous data' timeout \n", pACB->host->host_no);
		return false;
	}
	count = 8;
	while (count) {
		*acb_firm_model = readb(iop_firm_model);
		acb_firm_model++;
		iop_firm_model++;
		count--;
	}
	count = 16;
	while (count) {
		*acb_firm_version = readb(iop_firm_version);
		acb_firm_version++;
		iop_firm_version++;
		count--;
	}
	printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n",
		pACB->host->host_no,
		pACB->firm_version,
		pACB->firm_model);
	pACB->firm_request_len = readl(&reg->msgcode_rwbuffer[1]);   /*firm_request_len,1,04-07*/
	pACB->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]); /*firm_numbers_queue,2,08-11*/
	pACB->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]);    /*firm_sdram_size,3,12-15*/
	pACB->firm_hd_channels = readl(&reg->msgcode_rwbuffer[4]);  /*firm_ide_channels,4,16-19*/
	pACB->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]);  /*firm_cfg_version,25,100-103*/
	/*all interrupt service will be enable at arcmsr_iop_init*/
	return true;
}
2295
static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
2296
{
2297 2298
	if (acb->adapter_type == ACB_ADAPTER_TYPE_A)
		return arcmsr_get_hba_config(acb);
2299
	else if (acb->adapter_type == ACB_ADAPTER_TYPE_B)
2300
		return arcmsr_get_hbb_config(acb);
2301 2302
	else
		return arcmsr_get_hbc_config(acb);
2303 2304
}

2305
static int arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
2306 2307
	struct CommandControlBlock *poll_ccb)
{
A
Al Viro 已提交
2308
	struct MessageUnit_A __iomem *reg = acb->pmuA;
2309
	struct CommandControlBlock *ccb;
2310
	struct ARCMSR_CDB *arcmsr_cdb;
2311
	uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
2312
	int rtn;
2313
	bool error;
2314
	polling_hba_ccb_retry:
2315
	poll_count++;
2316
	outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable;
2317 2318 2319
	writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
	while (1) {
		if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) {
2320
			if (poll_ccb_done){
2321
				rtn = SUCCESS;
2322
				break;
2323 2324 2325
			}else {
				msleep(25);
				if (poll_count > 100){
2326
					rtn = FAILED;
2327
					break;
2328
				}
2329
				goto polling_hba_ccb_retry;
2330 2331
			}
		}
2332 2333
		arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
		ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
2334 2335 2336 2337
		poll_ccb_done = (ccb == poll_ccb) ? 1:0;
		if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
			if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
				printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
2338 2339 2340 2341 2342 2343
					" poll command abort successfully \n"
					, acb->host->host_no
					, ccb->pcmd->device->id
					, ccb->pcmd->device->lun
					, ccb);
				ccb->pcmd->result = DID_ABORT << 16;
2344
				arcmsr_ccb_complete(ccb);
2345 2346
				continue;
			}
2347 2348
			printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
				" command done ccb = '0x%p'"
2349
				"ccboutstandingcount = %d \n"
2350 2351 2352 2353
				, acb->host->host_no
				, ccb
				, atomic_read(&acb->ccboutstandingcount));
			continue;
2354 2355 2356
		}
		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
		arcmsr_report_ccb_state(acb, ccb, error);
2357
	}
2358 2359
	return rtn;
}
2360

2361
static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
2362 2363
					struct CommandControlBlock *poll_ccb)
{
2364
	struct MessageUnit_B *reg = acb->pmuB;
2365
	struct ARCMSR_CDB *arcmsr_cdb;
2366 2367
	struct CommandControlBlock *ccb;
	uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
2368
	int index, rtn;
2369
	bool error;
2370
	polling_hbb_ccb_retry:
2371

2372 2373
	poll_count++;
	/* clear doorbell interrupt */
2374
	writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
2375 2376 2377 2378
	while(1){
		index = reg->doneq_index;
		if ((flag_ccb = readl(&reg->done_qbuffer[index])) == 0) {
			if (poll_ccb_done){
2379
				rtn = SUCCESS;
2380 2381 2382 2383
				break;
			}else {
				msleep(25);
				if (poll_count > 100){
2384
					rtn = FAILED;
2385
					break;
2386
				}
2387
				goto polling_hbb_ccb_retry;
2388
			}
2389 2390 2391 2392 2393 2394 2395
		}
		writel(0, &reg->done_qbuffer[index]);
		index++;
		/*if last index number set it to 0 */
		index %= ARCMSR_MAX_HBB_POSTQUEUE;
		reg->doneq_index = index;
		/* check if command done with no error*/
2396 2397
		arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
		ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
2398 2399 2400
		poll_ccb_done = (ccb == poll_ccb) ? 1:0;
		if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
			if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
2401 2402
				printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
					" poll command abort successfully \n"
2403 2404 2405 2406 2407
					,acb->host->host_no
					,ccb->pcmd->device->id
					,ccb->pcmd->device->lun
					,ccb);
				ccb->pcmd->result = DID_ABORT << 16;
2408
				arcmsr_ccb_complete(ccb);
2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445
				continue;
			}
			printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
				" command done ccb = '0x%p'"
				"ccboutstandingcount = %d \n"
				, acb->host->host_no
				, ccb
				, atomic_read(&acb->ccboutstandingcount));
			continue;
		} 
		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
		arcmsr_report_ccb_state(acb, ccb, error);
	}
	return rtn;
}

static int arcmsr_polling_hbc_ccbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_ccb)
{
	struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
	uint32_t flag_ccb, ccb_cdb_phy;
	struct ARCMSR_CDB *arcmsr_cdb;
	bool error;
	struct CommandControlBlock *pCCB;
	uint32_t poll_ccb_done = 0, poll_count = 0;
	int rtn;
polling_hbc_ccb_retry:
	poll_count++;
	while (1) {
		if ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) == 0) {
			if (poll_ccb_done) {
				rtn = SUCCESS;
				break;
			} else {
				msleep(25);
				if (poll_count > 100) {
					rtn = FAILED;
					break;
2446
				}
2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459
				goto polling_hbc_ccb_retry;
			}
		}
		flag_ccb = readl(&reg->outbound_queueport_low);
		ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
		arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);/*frame must be 32 bytes aligned*/
		pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
		poll_ccb_done = (pCCB == poll_ccb) ? 1 : 0;
		/* check ifcommand done with no error*/
		if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
			if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
				printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
					" poll command abort successfully \n"
2460
					, acb->host->host_no
2461 2462 2463 2464 2465
					, pCCB->pcmd->device->id
					, pCCB->pcmd->device->lun
					, pCCB);
					pCCB->pcmd->result = DID_ABORT << 16;
					arcmsr_ccb_complete(pCCB);
2466
				continue;
2467 2468 2469 2470 2471 2472 2473 2474
			}
			printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
				" command done ccb = '0x%p'"
				"ccboutstandingcount = %d \n"
				, acb->host->host_no
				, pCCB
				, atomic_read(&acb->ccboutstandingcount));
			continue;
2475
		}
2476 2477 2478
		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
		arcmsr_report_ccb_state(acb, pCCB, error);
	}
2479
	return rtn;
2480
}
2481
static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
2482 2483
					struct CommandControlBlock *poll_ccb)
{
2484
	int rtn = 0;
2485 2486 2487
	switch (acb->adapter_type) {

	case ACB_ADAPTER_TYPE_A: {
2488
		rtn = arcmsr_polling_hba_ccbdone(acb, poll_ccb);
2489 2490 2491 2492
		}
		break;

	case ACB_ADAPTER_TYPE_B: {
2493
		rtn = arcmsr_polling_hbb_ccbdone(acb, poll_ccb);
2494
		}
2495 2496 2497 2498
		break;
	case ACB_ADAPTER_TYPE_C: {
		rtn = arcmsr_polling_hbc_ccbdone(acb, poll_ccb);
		}
2499
	}
2500
	return rtn;
2501
}
2502 2503

static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
2504
{
2505
	uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
2506 2507 2508 2509 2510 2511 2512 2513 2514
	dma_addr_t dma_coherent_handle;
	/*
	********************************************************************
	** here we need to tell iop 331 our freeccb.HighPart
	** if freeccb.HighPart is not zero
	********************************************************************
	*/
	dma_coherent_handle = acb->dma_coherent_handle;
	cdb_phyaddr = (uint32_t)(dma_coherent_handle);
2515
	cdb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16);
2516
	acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
2517 2518 2519 2520 2521 2522 2523 2524
	/*
	***********************************************************************
	**    if adapter type B, set window of "post command Q"
	***********************************************************************
	*/
	switch (acb->adapter_type) {

	case ACB_ADAPTER_TYPE_A: {
2525
		if (cdb_phyaddr_hi32 != 0) {
A
Al Viro 已提交
2526
			struct MessageUnit_A __iomem *reg = acb->pmuA;
2527 2528 2529 2530
			uint32_t intmask_org;
			intmask_org = arcmsr_disable_outbound_ints(acb);
			writel(ARCMSR_SIGNATURE_SET_CONFIG, \
						&reg->message_rwbuffer[0]);
2531
			writel(cdb_phyaddr_hi32, &reg->message_rwbuffer[1]);
2532 2533
			writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \
							&reg->inbound_msgaddr0);
2534
			if (!arcmsr_hba_wait_msgint_ready(acb)) {
2535 2536 2537 2538
				printk(KERN_NOTICE "arcmsr%d: ""set ccb high \
				part physical address timeout\n",
				acb->host->host_no);
				return 1;
2539
			}
2540 2541 2542 2543
			arcmsr_enable_outbound_ints(acb, intmask_org);
		}
		}
		break;
2544

2545 2546
	case ACB_ADAPTER_TYPE_B: {
		unsigned long post_queue_phyaddr;
A
Al Viro 已提交
2547
		uint32_t __iomem *rwbuffer;
2548

A
Al Viro 已提交
2549
		struct MessageUnit_B *reg = acb->pmuB;
2550 2551 2552 2553
		uint32_t intmask_org;
		intmask_org = arcmsr_disable_outbound_ints(acb);
		reg->postq_index = 0;
		reg->doneq_index = 0;
2554
		writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell);
2555
		if (!arcmsr_hbb_wait_msgint_ready(acb)) {
2556 2557 2558 2559
			printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \
				acb->host->host_no);
			return 1;
		}
2560 2561
		post_queue_phyaddr = acb->dma_coherent_handle_hbb_mu;
		rwbuffer = reg->message_rwbuffer;
2562 2563 2564
		/* driver "set config" signature */
		writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
		/* normal should be zero */
2565
		writel(cdb_phyaddr_hi32, rwbuffer++);
2566 2567 2568 2569 2570 2571 2572
		/* postQ size (256 + 8)*4	 */
		writel(post_queue_phyaddr, rwbuffer++);
		/* doneQ size (256 + 8)*4	 */
		writel(post_queue_phyaddr + 1056, rwbuffer++);
		/* ccb maxQ size must be --> [(256 + 8)*4]*/
		writel(1056, rwbuffer);

2573
		writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell);
2574
		if (!arcmsr_hbb_wait_msgint_ready(acb)) {
2575 2576 2577 2578
			printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
			timeout \n",acb->host->host_no);
			return 1;
		}
2579
		arcmsr_hbb_enable_driver_mode(acb);
2580 2581 2582
		arcmsr_enable_outbound_ints(acb, intmask_org);
		}
		break;
2583 2584 2585 2586
	case ACB_ADAPTER_TYPE_C: {
		if (cdb_phyaddr_hi32 != 0) {
			struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;

2587 2588
			printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n",
					acb->adapter_index, cdb_phyaddr_hi32);
2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599
			writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->msgcode_rwbuffer[0]);
			writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[1]);
			writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
			writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
			if (!arcmsr_hbc_wait_msgint_ready(acb)) {
				printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
				timeout \n", acb->host->host_no);
				return 1;
			}
		}
		}
2600 2601 2602
	}
	return 0;
}
2603

2604 2605 2606 2607 2608 2609
static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
{
	uint32_t firmware_state = 0;
	switch (acb->adapter_type) {

	case ACB_ADAPTER_TYPE_A: {
A
Al Viro 已提交
2610
		struct MessageUnit_A __iomem *reg = acb->pmuA;
2611 2612 2613 2614 2615 2616 2617
		do {
			firmware_state = readl(&reg->outbound_msgaddr1);
		} while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0);
		}
		break;

	case ACB_ADAPTER_TYPE_B: {
A
Al Viro 已提交
2618
		struct MessageUnit_B *reg = acb->pmuB;
2619
		do {
2620
			firmware_state = readl(reg->iop2drv_doorbell);
2621
		} while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
2622
		writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
2623 2624
		}
		break;
2625 2626 2627 2628 2629 2630
	case ACB_ADAPTER_TYPE_C: {
		struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
		do {
			firmware_state = readl(&reg->outbound_msgaddr1);
		} while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
		}
2631
	}
2632 2633
}

2634 2635 2636
static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb)
{
	struct MessageUnit_A __iomem *reg = acb->pmuA;
2637
	if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
2638
		mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2639
		return;
2640
	} else {
2641
		acb->fw_flag = FW_NORMAL;
2642
		if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)){
2643 2644
			atomic_set(&acb->rq_map_token, 16);
		}
2645
		atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
2646 2647
		if (atomic_dec_and_test(&acb->rq_map_token)) {
			mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2648
			return;
2649
		}
2650
		writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
2651
		mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2652 2653 2654 2655 2656 2657 2658
	}
	return;
}

static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb)
{
	struct MessageUnit_B __iomem *reg = acb->pmuB;
2659
	if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
2660
		mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2661 2662 2663 2664
		return;
	} else {
		acb->fw_flag = FW_NORMAL;
		if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
2665
			atomic_set(&acb->rq_map_token, 16);
2666 2667
		}
		atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
2668 2669
		if (atomic_dec_and_test(&acb->rq_map_token)) {
			mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2670
			return;
2671
		}
2672 2673 2674 2675 2676
		writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
		mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
	}
	return;
}
2677

2678 2679 2680
static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb)
{
	struct MessageUnit_C __iomem *reg = acb->pmuC;
2681
	if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) {
2682
		mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2683
		return;
2684
	} else {
2685 2686
		acb->fw_flag = FW_NORMAL;
		if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
2687 2688
			atomic_set(&acb->rq_map_token, 16);
		}
2689
		atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
2690 2691
		if (atomic_dec_and_test(&acb->rq_map_token)) {
			mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2692
			return;
2693
		}
2694 2695 2696
		writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
		writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
		mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712
	}
	return;
}

static void arcmsr_request_device_map(unsigned long pacb)
{
	struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb;
	switch (acb->adapter_type) {
		case ACB_ADAPTER_TYPE_A: {
			arcmsr_request_hba_device_map(acb);
		}
		break;
		case ACB_ADAPTER_TYPE_B: {
			arcmsr_request_hbb_device_map(acb);
		}
		break;
2713 2714 2715
		case ACB_ADAPTER_TYPE_C: {
			arcmsr_request_hbc_device_map(acb);
		}
2716 2717 2718
	}
}

2719 2720
static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
{
A
Al Viro 已提交
2721
	struct MessageUnit_A __iomem *reg = acb->pmuA;
2722 2723
	acb->acb_flags |= ACB_F_MSG_START_BGRB;
	writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
2724
	if (!arcmsr_hba_wait_msgint_ready(acb)) {
2725 2726
		printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
				rebulid' timeout \n", acb->host->host_no);
2727 2728 2729
	}
}

2730 2731
static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
{
A
Al Viro 已提交
2732
	struct MessageUnit_B *reg = acb->pmuB;
2733
	acb->acb_flags |= ACB_F_MSG_START_BGRB;
2734
	writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell);
2735
	if (!arcmsr_hbb_wait_msgint_ready(acb)) {
2736 2737 2738 2739
		printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
				rebulid' timeout \n",acb->host->host_no);
	}
}
2740

2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752
static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *pACB)
{
	struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC;
	pACB->acb_flags |= ACB_F_MSG_START_BGRB;
	writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0);
	writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell);
	if (!arcmsr_hbc_wait_msgint_ready(pACB)) {
		printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
				rebulid' timeout \n", pACB->host->host_no);
	}
	return;
}
2753
static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
2754
{
2755 2756 2757 2758 2759 2760 2761
	switch (acb->adapter_type) {
	case ACB_ADAPTER_TYPE_A:
		arcmsr_start_hba_bgrb(acb);
		break;
	case ACB_ADAPTER_TYPE_B:
		arcmsr_start_hbb_bgrb(acb);
		break;
2762 2763
	case ACB_ADAPTER_TYPE_C:
		arcmsr_start_hbc_bgrb(acb);
2764 2765
	}
}
2766

2767 2768 2769 2770
static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
{
	switch (acb->adapter_type) {
	case ACB_ADAPTER_TYPE_A: {
A
Al Viro 已提交
2771
		struct MessageUnit_A __iomem *reg = acb->pmuA;
2772 2773 2774 2775 2776 2777 2778 2779
		uint32_t outbound_doorbell;
		/* empty doorbell Qbuffer if door bell ringed */
		outbound_doorbell = readl(&reg->outbound_doorbell);
		/*clear doorbell interrupt */
		writel(outbound_doorbell, &reg->outbound_doorbell);
		writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
		}
		break;
2780

2781
	case ACB_ADAPTER_TYPE_B: {
A
Al Viro 已提交
2782
		struct MessageUnit_B *reg = acb->pmuB;
2783
		/*clear interrupt and message state*/
2784 2785
		writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
		writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell);
2786 2787 2788
		/* let IOP know data has been read */
		}
		break;
2789 2790 2791 2792 2793 2794 2795 2796
	case ACB_ADAPTER_TYPE_C: {
		struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
		uint32_t outbound_doorbell;
		/* empty doorbell Qbuffer if door bell ringed */
		outbound_doorbell = readl(&reg->outbound_doorbell);
		writel(outbound_doorbell, &reg->outbound_doorbell_clear);
		writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
		}
2797
	}
2798
}
2799

N
Nick Cheng 已提交
2800 2801 2802 2803 2804 2805 2806 2807
static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
{
	switch (acb->adapter_type) {
	case ACB_ADAPTER_TYPE_A:
		return;
	case ACB_ADAPTER_TYPE_B:
		{
			struct MessageUnit_B *reg = acb->pmuB;
2808
			writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell);
2809
			if (!arcmsr_hbb_wait_msgint_ready(acb)) {
N
Nick Cheng 已提交
2810 2811 2812 2813 2814
				printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
				return;
			}
		}
		break;
2815 2816
	case ACB_ADAPTER_TYPE_C:
		return;
N
Nick Cheng 已提交
2817 2818 2819 2820
	}
	return;
}

2821 2822 2823
static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
{
	uint8_t value[64];
2824 2825 2826 2827
	int i, count = 0;
	struct MessageUnit_A __iomem *pmuA = acb->pmuA;
	struct MessageUnit_C __iomem *pmuC = acb->pmuC;
	u32 temp = 0;
2828
	/* backup pci config data */
2829
	printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no);
2830 2831 2832 2833
	for (i = 0; i < 64; i++) {
		pci_read_config_byte(acb->pdev, i, &value[i]);
	}
	/* hardware reset signal */
2834
	if ((acb->dev_id == 0x1680)) {
2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846
		writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]);
	} else if ((acb->dev_id == 0x1880)) {
		do {
			count++;
			writel(0xF, &pmuC->write_sequence);
			writel(0x4, &pmuC->write_sequence);
			writel(0xB, &pmuC->write_sequence);
			writel(0x2, &pmuC->write_sequence);
			writel(0x7, &pmuC->write_sequence);
			writel(0xD, &pmuC->write_sequence);
		} while ((((temp = readl(&pmuC->host_diagnostic)) | ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
		writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
2847
	} else {
2848
		pci_write_config_byte(acb->pdev, 0x84, 0x20);
2849
	}
2850
	msleep(2000);
2851 2852 2853 2854 2855 2856 2857
	/* write back pci config data */
	for (i = 0; i < 64; i++) {
		pci_write_config_byte(acb->pdev, i, value[i]);
	}
	msleep(1000);
	return;
}
2858 2859 2860
static void arcmsr_iop_init(struct AdapterControlBlock *acb)
{
	uint32_t intmask_org;
2861 2862
	/* disable all outbound interrupt */
	intmask_org = arcmsr_disable_outbound_ints(acb);
N
Nick Cheng 已提交
2863 2864
	arcmsr_wait_firmware_ready(acb);
	arcmsr_iop_confirm(acb);
2865 2866 2867 2868
	/*start background rebuild*/
	arcmsr_start_adapter_bgrb(acb);
	/* empty doorbell Qbuffer if door bell ringed */
	arcmsr_clear_doorbell_queue_buffer(acb);
N
Nick Cheng 已提交
2869
	arcmsr_enable_eoi_mode(acb);
2870 2871
	/* enable outbound Post Queue,outbound doorbell Interrupt */
	arcmsr_enable_outbound_ints(acb, intmask_org);
2872 2873 2874
	acb->acb_flags |= ACB_F_IOP_INITED;
}

2875
static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
2876 2877 2878
{
	struct CommandControlBlock *ccb;
	uint32_t intmask_org;
2879
	uint8_t rtnval = 0x00;
2880
	int i = 0;
2881 2882
	unsigned long flags;

2883
	if (atomic_read(&acb->ccboutstandingcount) != 0) {
2884 2885
		/* disable all outbound interrupt */
		intmask_org = arcmsr_disable_outbound_ints(acb);
2886
		/* talk to iop 331 outstanding command aborted */
2887
		rtnval = arcmsr_abort_allcmd(acb);
2888
		/* clear all outbound posted Q */
2889
		arcmsr_done4abort_postqueue(acb);
2890 2891
		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
			ccb = acb->pccb_pool[i];
2892
			if (ccb->startdone == ARCMSR_CCB_START) {
2893 2894 2895 2896 2897 2898
				scsi_dma_unmap(ccb->pcmd);
				ccb->startdone = ARCMSR_CCB_DONE;
				ccb->ccb_flags = 0;
				spin_lock_irqsave(&acb->ccblist_lock, flags);
				list_add_tail(&ccb->list, &acb->ccb_free_list);
				spin_unlock_irqrestore(&acb->ccblist_lock, flags);
2899 2900
			}
		}
2901
		atomic_set(&acb->ccboutstandingcount, 0);
2902 2903
		/* enable all outbound interrupt */
		arcmsr_enable_outbound_ints(acb, intmask_org);
2904
		return rtnval;
2905
	}
2906
	return rtnval;
2907 2908 2909 2910
}

static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
{
2911
	struct AdapterControlBlock *acb;
2912 2913 2914 2915
	uint32_t intmask_org, outbound_doorbell;
	int retry_count = 0;
	int rtn = FAILED;
	acb = (struct AdapterControlBlock *) cmd->device->host->hostdata;
2916
	printk(KERN_ERR "arcmsr: executing bus reset eh.....num_resets = %d, num_aborts = %d \n", acb->num_resets, acb->num_aborts);
2917 2918
	acb->num_resets++;

2919 2920 2921
	switch(acb->adapter_type){
		case ACB_ADAPTER_TYPE_A:{
			if (acb->acb_flags & ACB_F_BUS_RESET){
2922
				long timeout;
2923 2924
				printk(KERN_ERR "arcmsr: there is an  bus reset eh proceeding.......\n");
				timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
2925 2926 2927 2928 2929
				if (timeout) {
					return SUCCESS;
				}
			}
			acb->acb_flags |= ACB_F_BUS_RESET;
2930
			if (!arcmsr_iop_reset(acb)) {
2931 2932
				struct MessageUnit_A __iomem *reg;
				reg = acb->pmuA;
2933 2934
				arcmsr_hardware_reset(acb);
				acb->acb_flags &= ~ACB_F_IOP_INITED;
2935
sleep_again:
2936
				ssleep(ARCMSR_SLEEPTIME);
2937
				if ((readl(&reg->outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
2938 2939
					printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
					if (retry_count > ARCMSR_RETRYCOUNT) {
2940
						acb->fw_flag = FW_DEADLOCK;
2941
						printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
2942
						return FAILED;
2943 2944 2945 2946 2947 2948 2949
					}
					retry_count++;
					goto sleep_again;
				}
				acb->acb_flags |= ACB_F_IOP_INITED;
				/* disable all outbound interrupt */
				intmask_org = arcmsr_disable_outbound_ints(acb);
2950
				arcmsr_get_firmware_spec(acb);
2951 2952 2953 2954 2955 2956 2957 2958
				arcmsr_start_adapter_bgrb(acb);
				/* clear Qbuffer if door bell ringed */
				outbound_doorbell = readl(&reg->outbound_doorbell);
				writel(outbound_doorbell, &reg->outbound_doorbell); /*clear interrupt */
   				writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
				/* enable outbound Post Queue,outbound doorbell Interrupt */
				arcmsr_enable_outbound_ints(acb, intmask_org);
				atomic_set(&acb->rq_map_token, 16);
2959 2960
				atomic_set(&acb->ante_token_value, 16);
				acb->fw_flag = FW_NORMAL;
2961
				mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2962 2963
				acb->acb_flags &= ~ACB_F_BUS_RESET;
				rtn = SUCCESS;
2964
				printk(KERN_ERR "arcmsr: scsi  bus reset eh returns with success\n");
2965 2966
			} else {
				acb->acb_flags &= ~ACB_F_BUS_RESET;
2967 2968 2969 2970
				atomic_set(&acb->rq_map_token, 16);
				atomic_set(&acb->ante_token_value, 16);
				acb->fw_flag = FW_NORMAL;
				mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
2971
				rtn = SUCCESS;
2972
			}
2973
			break;
2974
		}
2975 2976
		case ACB_ADAPTER_TYPE_B:{
			acb->acb_flags |= ACB_F_BUS_RESET;
2977
			if (!arcmsr_iop_reset(acb)) {
2978 2979
				acb->acb_flags &= ~ACB_F_BUS_RESET;
				rtn = FAILED;
2980 2981
			} else {
				acb->acb_flags &= ~ACB_F_BUS_RESET;
2982 2983 2984 2985
				atomic_set(&acb->rq_map_token, 16);
				atomic_set(&acb->ante_token_value, 16);
				acb->fw_flag = FW_NORMAL;
				mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2986
				rtn = SUCCESS;
2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005
			}
			break;
		}
		case ACB_ADAPTER_TYPE_C:{
			if (acb->acb_flags & ACB_F_BUS_RESET) {
				long timeout;
				printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
				timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ);
				if (timeout) {
					return SUCCESS;
				}
			}
			acb->acb_flags |= ACB_F_BUS_RESET;
			if (!arcmsr_iop_reset(acb)) {
				struct MessageUnit_C __iomem *reg;
				reg = acb->pmuC;
				arcmsr_hardware_reset(acb);
				acb->acb_flags &= ~ACB_F_IOP_INITED;
sleep:
3006
				ssleep(ARCMSR_SLEEPTIME);
3007
				if ((readl(&reg->host_diagnostic) & 0x04) != 0) {
3008 3009
					printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count);
					if (retry_count > ARCMSR_RETRYCOUNT) {
3010
						acb->fw_flag = FW_DEADLOCK;
3011
						printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no);
3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030
						return FAILED;
					}
					retry_count++;
					goto sleep;
				}
				acb->acb_flags |= ACB_F_IOP_INITED;
				/* disable all outbound interrupt */
				intmask_org = arcmsr_disable_outbound_ints(acb);
				arcmsr_get_firmware_spec(acb);
				arcmsr_start_adapter_bgrb(acb);
				/* clear Qbuffer if door bell ringed */
				outbound_doorbell = readl(&reg->outbound_doorbell);
				writel(outbound_doorbell, &reg->outbound_doorbell_clear); /*clear interrupt */
				writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
				/* enable outbound Post Queue,outbound doorbell Interrupt */
				arcmsr_enable_outbound_ints(acb, intmask_org);
				atomic_set(&acb->rq_map_token, 16);
				atomic_set(&acb->ante_token_value, 16);
				acb->fw_flag = FW_NORMAL;
3031
				mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
3032 3033 3034 3035 3036
				acb->acb_flags &= ~ACB_F_BUS_RESET;
				rtn = SUCCESS;
				printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
			} else {
				acb->acb_flags &= ~ACB_F_BUS_RESET;
3037 3038 3039 3040
				atomic_set(&acb->rq_map_token, 16);
				atomic_set(&acb->ante_token_value, 16);
				acb->fw_flag = FW_NORMAL;
				mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
3041 3042 3043
				rtn = SUCCESS;
			}
			break;
3044 3045 3046
		}
	}
	return rtn;
3047 3048
}

3049
static int arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
3050 3051
		struct CommandControlBlock *ccb)
{
3052 3053 3054
	int rtn;
	rtn = arcmsr_polling_ccbdone(acb, ccb);
	return rtn;
3055 3056 3057 3058 3059 3060 3061
}

static int arcmsr_abort(struct scsi_cmnd *cmd)
{
	struct AdapterControlBlock *acb =
		(struct AdapterControlBlock *)cmd->device->host->hostdata;
	int i = 0;
3062
	int rtn = FAILED;
3063
	printk(KERN_NOTICE
3064
		"arcmsr%d: abort device command of scsi id = %d lun = %d \n",
3065
		acb->host->host_no, cmd->device->id, cmd->device->lun);
3066
	acb->acb_flags |= ACB_F_ABORT;
3067 3068 3069 3070 3071 3072 3073 3074
	acb->num_aborts++;
	/*
	************************************************
	** the all interrupt service routine is locked
	** we need to handle it as soon as possible and exit
	************************************************
	*/
	if (!atomic_read(&acb->ccboutstandingcount))
3075
		return rtn;
3076 3077 3078 3079

	for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
		struct CommandControlBlock *ccb = acb->pccb_pool[i];
		if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
3080 3081
			ccb->startdone = ARCMSR_CCB_ABORTED;
			rtn = arcmsr_abort_one_cmd(acb, ccb);
3082 3083 3084
			break;
		}
	}
3085 3086
	acb->acb_flags &= ~ACB_F_ABORT;
	return rtn;
3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097
}

static const char *arcmsr_info(struct Scsi_Host *host)
{
	struct AdapterControlBlock *acb =
		(struct AdapterControlBlock *) host->hostdata;
	static char buf[256];
	char *type;
	int raid6 = 1;
	switch (acb->pdev->device) {
	case PCI_DEVICE_ID_ARECA_1110:
3098 3099
	case PCI_DEVICE_ID_ARECA_1200:
	case PCI_DEVICE_ID_ARECA_1202:
3100 3101 3102 3103 3104 3105 3106
	case PCI_DEVICE_ID_ARECA_1210:
		raid6 = 0;
		/*FALLTHRU*/
	case PCI_DEVICE_ID_ARECA_1120:
	case PCI_DEVICE_ID_ARECA_1130:
	case PCI_DEVICE_ID_ARECA_1160:
	case PCI_DEVICE_ID_ARECA_1170:
3107
	case PCI_DEVICE_ID_ARECA_1201:
3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118
	case PCI_DEVICE_ID_ARECA_1220:
	case PCI_DEVICE_ID_ARECA_1230:
	case PCI_DEVICE_ID_ARECA_1260:
	case PCI_DEVICE_ID_ARECA_1270:
	case PCI_DEVICE_ID_ARECA_1280:
		type = "SATA";
		break;
	case PCI_DEVICE_ID_ARECA_1380:
	case PCI_DEVICE_ID_ARECA_1381:
	case PCI_DEVICE_ID_ARECA_1680:
	case PCI_DEVICE_ID_ARECA_1681:
3119
	case PCI_DEVICE_ID_ARECA_1880:
3120 3121 3122 3123 3124 3125
		type = "SAS";
		break;
	default:
		type = "X-TYPE";
		break;
	}
3126
	sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s",
3127 3128 3129 3130
			type, raid6 ? "( RAID6 capable)" : "",
			ARCMSR_DRIVER_VERSION);
	return buf;
}