hisi_sas_main.c 66.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
2 3 4 5 6 7 8 9
/*
 * Copyright (c) 2015 Linaro Ltd.
 * Copyright (c) 2015 Hisilicon Limited.
 */

#include "hisi_sas.h"
#define DRV_NAME "hisi_sas"

J
John Garry 已提交
10 11 12
#define DEV_IS_GONE(dev) \
	((!dev) || (dev->dev_type == SAS_PHY_UNUSED))

13
static int hisi_sas_softreset_ata_disk(struct domain_device *device);
14 15
static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
				void *funcdata);
16 17 18
static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
				  struct domain_device *device);
static void hisi_sas_dev_gone(struct domain_device *device);
J
John Garry 已提交
19

20 21 22 23
struct hisi_sas_internal_abort_data {
	bool rst_ha_timeout; /* reset the HA for timeout */
};

24
u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
25
{
26
	switch (fis->command) {
27 28 29 30 31
	case ATA_CMD_FPDMA_WRITE:
	case ATA_CMD_FPDMA_READ:
	case ATA_CMD_FPDMA_RECV:
	case ATA_CMD_FPDMA_SEND:
	case ATA_CMD_NCQ_NON_DATA:
32
		return HISI_SAS_SATA_PROTOCOL_FPDMA;
33 34 35 36 37 38 39 40 41 42 43

	case ATA_CMD_DOWNLOAD_MICRO:
	case ATA_CMD_ID_ATA:
	case ATA_CMD_PMP_READ:
	case ATA_CMD_READ_LOG_EXT:
	case ATA_CMD_PIO_READ:
	case ATA_CMD_PIO_READ_EXT:
	case ATA_CMD_PMP_WRITE:
	case ATA_CMD_WRITE_LOG_EXT:
	case ATA_CMD_PIO_WRITE:
	case ATA_CMD_PIO_WRITE_EXT:
44
		return HISI_SAS_SATA_PROTOCOL_PIO;
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61

	case ATA_CMD_DSM:
	case ATA_CMD_DOWNLOAD_MICRO_DMA:
	case ATA_CMD_PMP_READ_DMA:
	case ATA_CMD_PMP_WRITE_DMA:
	case ATA_CMD_READ:
	case ATA_CMD_READ_EXT:
	case ATA_CMD_READ_LOG_DMA_EXT:
	case ATA_CMD_READ_STREAM_DMA_EXT:
	case ATA_CMD_TRUSTED_RCV_DMA:
	case ATA_CMD_TRUSTED_SND_DMA:
	case ATA_CMD_WRITE:
	case ATA_CMD_WRITE_EXT:
	case ATA_CMD_WRITE_FUA_EXT:
	case ATA_CMD_WRITE_QUEUED:
	case ATA_CMD_WRITE_LOG_DMA_EXT:
	case ATA_CMD_WRITE_STREAM_DMA_EXT:
62
	case ATA_CMD_ZAC_MGMT_IN:
63
		return HISI_SAS_SATA_PROTOCOL_DMA;
64 65 66 67 68 69 70 71 72 73 74

	case ATA_CMD_CHK_POWER:
	case ATA_CMD_DEV_RESET:
	case ATA_CMD_EDD:
	case ATA_CMD_FLUSH:
	case ATA_CMD_FLUSH_EXT:
	case ATA_CMD_VERIFY:
	case ATA_CMD_VERIFY_EXT:
	case ATA_CMD_SET_FEATURES:
	case ATA_CMD_STANDBY:
	case ATA_CMD_STANDBYNOW1:
75
	case ATA_CMD_ZAC_MGMT_OUT:
76
		return HISI_SAS_SATA_PROTOCOL_NONDATA;
77

78 79 80 81 82
	case ATA_CMD_SET_MAX:
		switch (fis->features) {
		case ATA_SET_MAX_PASSWD:
		case ATA_SET_MAX_LOCK:
			return HISI_SAS_SATA_PROTOCOL_PIO;
83

84 85 86 87 88 89
		case ATA_SET_MAX_PASSWD_DMA:
		case ATA_SET_MAX_UNLOCK_DMA:
			return HISI_SAS_SATA_PROTOCOL_DMA;

		default:
			return HISI_SAS_SATA_PROTOCOL_NONDATA;
90
		}
91 92 93

	default:
	{
94 95 96 97
		if (direction == DMA_NONE)
			return HISI_SAS_SATA_PROTOCOL_NONDATA;
		return HISI_SAS_SATA_PROTOCOL_PIO;
	}
98
	}
99 100 101
}
EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);

102 103 104 105 106
void hisi_sas_sata_done(struct sas_task *task,
			    struct hisi_sas_slot *slot)
{
	struct task_status_struct *ts = &task->task_status;
	struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
107 108 109 110
	struct hisi_sas_status_buffer *status_buf =
			hisi_sas_status_buf_addr_mem(slot);
	u8 *iu = &status_buf->iu[0];
	struct dev_to_host_fis *d2h =  (struct dev_to_host_fis *)iu;
111 112 113 114 115 116 117 118

	resp->frame_len = sizeof(struct dev_to_host_fis);
	memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));

	ts->buf_valid_size = sizeof(*resp);
}
EXPORT_SYMBOL_GPL(hisi_sas_sata_done);

119 120 121 122 123 124
/*
 * This function assumes linkrate mask fits in 8 bits, which it
 * does for all HW versions supported.
 */
u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
{
125
	u8 rate = 0;
126 127 128 129 130 131 132 133 134
	int i;

	max -= SAS_LINK_RATE_1_5_GBPS;
	for (i = 0; i <= max; i++)
		rate |= 1 << (i * 2);
	return rate;
}
EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);

J
John Garry 已提交
135 136 137 138 139
static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
{
	return device->port->ha->lldd_ha;
}

140 141 142 143 144 145
struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
{
	return container_of(sas_port, struct hisi_sas_port, sas_port);
}
EXPORT_SYMBOL_GPL(to_hisi_sas_port);

146 147 148 149 150
void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
{
	int phy_no;

	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
151
		hisi_sas_phy_enable(hisi_hba, phy_no, 0);
152 153 154
}
EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);

J
John Garry 已提交
155 156 157 158
static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
{
	void *bitmap = hisi_hba->slot_index_tags;

159
	__clear_bit(slot_idx, bitmap);
J
John Garry 已提交
160 161
}

J
John Garry 已提交
162 163
static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
{
164 165
	if (hisi_hba->hw->slot_index_alloc ||
	    slot_idx >= HISI_SAS_UNRESERVED_IPTT) {
166
		spin_lock(&hisi_hba->lock);
167
		hisi_sas_slot_index_clear(hisi_hba, slot_idx);
168
		spin_unlock(&hisi_hba->lock);
169
	}
J
John Garry 已提交
170 171 172 173 174 175
}

static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
{
	void *bitmap = hisi_hba->slot_index_tags;

176
	__set_bit(slot_idx, bitmap);
J
John Garry 已提交
177 178
}

179 180
static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
				     struct scsi_cmnd *scsi_cmnd)
J
John Garry 已提交
181
{
182
	int index;
J
John Garry 已提交
183 184
	void *bitmap = hisi_hba->slot_index_tags;

185
	if (scsi_cmnd)
186
		return scsi_cmd_to_rq(scsi_cmnd)->tag;
187

188
	spin_lock(&hisi_hba->lock);
189
	index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
190
				   hisi_hba->last_slot_index + 1);
191
	if (index >= hisi_hba->slot_index_count) {
192 193
		index = find_next_zero_bit(bitmap,
				hisi_hba->slot_index_count,
194
				HISI_SAS_UNRESERVED_IPTT);
195
		if (index >= hisi_hba->slot_index_count) {
196
			spin_unlock(&hisi_hba->lock);
197
			return -SAS_QUEUE_FULL;
198
		}
199
	}
J
John Garry 已提交
200
	hisi_sas_slot_index_set(hisi_hba, index);
201
	hisi_hba->last_slot_index = index;
202
	spin_unlock(&hisi_hba->lock);
203

204
	return index;
J
John Garry 已提交
205 206
}

J
John Garry 已提交
207 208 209
void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
			     struct hisi_sas_slot *slot)
{
210 211
	int device_id = slot->device_id;
	struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id];
J
John Garry 已提交
212

213
	if (task) {
214
		struct device *dev = hisi_hba->dev;
J
John Garry 已提交
215

216 217 218 219 220
		if (!task->lldd_task)
			return;

		task->lldd_task = NULL;

221
		if (!sas_protocol_ata(task->task_proto)) {
222
			if (slot->n_elem) {
223 224 225 226
				if (task->task_proto & SAS_PROTOCOL_SSP)
					dma_unmap_sg(dev, task->scatter,
						     task->num_scatter,
						     task->data_dir);
227 228 229 230
				else
					dma_unmap_sg(dev, &task->smp_task.smp_req,
						     1, DMA_TO_DEVICE);
			}
231 232 233 234
			if (slot->n_elem_dif) {
				struct sas_ssp_task *ssp_task = &task->ssp_task;
				struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;

235 236 237
				dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
					     scsi_prot_sg_count(scsi_cmnd),
					     task->data_dir);
238
			}
239
		}
240
	}
J
John Garry 已提交
241

242
	spin_lock(&sas_dev->lock);
J
John Garry 已提交
243
	list_del_init(&slot->entry);
244
	spin_unlock(&sas_dev->lock);
245 246 247

	memset(slot, 0, offsetof(struct hisi_sas_slot, buf));

J
John Garry 已提交
248 249 250 251
	hisi_sas_slot_index_free(hisi_hba, slot->idx);
}
EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);

252
static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
J
John Garry 已提交
253 254
				  struct hisi_sas_slot *slot)
{
255
	hisi_hba->hw->prep_smp(hisi_hba, slot);
J
John Garry 已提交
256 257
}

258
static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
259
				  struct hisi_sas_slot *slot)
J
John Garry 已提交
260
{
261
	hisi_hba->hw->prep_ssp(hisi_hba, slot);
J
John Garry 已提交
262 263
}

264
static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
265 266
				  struct hisi_sas_slot *slot)
{
267
	hisi_hba->hw->prep_stp(hisi_hba, slot);
268 269
}

270
static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
271
				     struct hisi_sas_slot *slot)
272
{
273
	hisi_hba->hw->prep_abort(hisi_hba, slot);
274 275
}

276
static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
277
			       struct sas_task *task, int n_elem)
278 279 280
{
	struct device *dev = hisi_hba->dev;

281
	if (!sas_protocol_ata(task->task_proto) && n_elem) {
282
		if (task->num_scatter) {
283 284
			dma_unmap_sg(dev, task->scatter, task->num_scatter,
				     task->data_dir);
285
		} else if (task->task_proto & SAS_PROTOCOL_SMP) {
286 287
			dma_unmap_sg(dev, &task->smp_task.smp_req,
				     1, DMA_TO_DEVICE);
288 289 290 291 292
		}
	}
}

static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
293
			    struct sas_task *task, int *n_elem)
294 295 296 297 298 299 300
{
	struct device *dev = hisi_hba->dev;
	int rc;

	if (sas_protocol_ata(task->task_proto)) {
		*n_elem = task->num_scatter;
	} else {
301
		unsigned int req_len;
302 303 304 305 306 307 308 309 310

		if (task->num_scatter) {
			*n_elem = dma_map_sg(dev, task->scatter,
					     task->num_scatter, task->data_dir);
			if (!*n_elem) {
				rc = -ENOMEM;
				goto prep_out;
			}
		} else if (task->task_proto & SAS_PROTOCOL_SMP) {
311 312 313
			*n_elem = dma_map_sg(dev, &task->smp_task.smp_req,
					     1, DMA_TO_DEVICE);
			if (!*n_elem) {
314 315 316 317 318 319 320 321 322 323 324 325
				rc = -ENOMEM;
				goto prep_out;
			}
			req_len = sg_dma_len(&task->smp_task.smp_req);
			if (req_len & 0x3) {
				rc = -EINVAL;
				goto err_out_dma_unmap;
			}
		}
	}

	if (*n_elem > HISI_SAS_SGE_PAGE_CNT) {
326
		dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT\n",
327 328 329 330 331 332 333 334
			*n_elem);
		rc = -EINVAL;
		goto err_out_dma_unmap;
	}
	return 0;

err_out_dma_unmap:
	/* It would be better to call dma_unmap_sg() here, but it's messy */
335
	hisi_sas_dma_unmap(hisi_hba, task, *n_elem);
336 337 338 339
prep_out:
	return rc;
}

340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba,
				   struct sas_task *task, int n_elem_dif)
{
	struct device *dev = hisi_hba->dev;

	if (n_elem_dif) {
		struct sas_ssp_task *ssp_task = &task->ssp_task;
		struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;

		dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
			     scsi_prot_sg_count(scsi_cmnd),
			     task->data_dir);
	}
}

static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba,
				int *n_elem_dif, struct sas_task *task)
{
	struct device *dev = hisi_hba->dev;
	struct sas_ssp_task *ssp_task;
	struct scsi_cmnd *scsi_cmnd;
	int rc;

	if (task->num_scatter) {
		ssp_task = &task->ssp_task;
		scsi_cmnd = ssp_task->cmd;

		if (scsi_prot_sg_count(scsi_cmnd)) {
			*n_elem_dif = dma_map_sg(dev,
						 scsi_prot_sglist(scsi_cmnd),
						 scsi_prot_sg_count(scsi_cmnd),
						 task->data_dir);

			if (!*n_elem_dif)
				return -ENOMEM;

			if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) {
				dev_err(dev, "task prep: n_elem_dif(%d) too large\n",
					*n_elem_dif);
				rc = -EINVAL;
				goto err_out_dif_dma_unmap;
			}
		}
	}

	return 0;

err_out_dif_dma_unmap:
	dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
		     scsi_prot_sg_count(scsi_cmnd), task->data_dir);
	return rc;
}

393 394 395 396
static
void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
			   struct hisi_sas_slot *slot,
			   struct hisi_sas_dq *dq,
397
			   struct hisi_sas_device *sas_dev)
J
John Garry 已提交
398
{
399 400 401
	struct hisi_sas_cmd_hdr *cmd_hdr_base;
	int dlvry_queue_slot, dlvry_queue;
	struct sas_task *task = slot->task;
402
	int wr_q_index;
J
John Garry 已提交
403

404
	spin_lock(&dq->lock);
405 406
	wr_q_index = dq->wr_point;
	dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
407
	list_add_tail(&slot->delivery, &dq->list);
408 409
	spin_unlock(&dq->lock);
	spin_lock(&sas_dev->lock);
410
	list_add_tail(&slot->entry, &sas_dev->list);
411
	spin_unlock(&sas_dev->lock);
J
John Garry 已提交
412

413
	dlvry_queue = dq->id;
414
	dlvry_queue_slot = wr_q_index;
J
John Garry 已提交
415

416
	slot->device_id = sas_dev->device_id;
J
John Garry 已提交
417 418 419 420
	slot->dlvry_queue = dlvry_queue;
	slot->dlvry_queue_slot = dlvry_queue_slot;
	cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
	slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
421

J
John Garry 已提交
422 423 424
	task->lldd_task = slot;

	memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
425
	memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
426 427
	memset(hisi_sas_status_buf_addr_mem(slot), 0,
	       sizeof(struct hisi_sas_err_record));
J
John Garry 已提交
428 429

	switch (task->task_proto) {
J
John Garry 已提交
430
	case SAS_PROTOCOL_SMP:
431
		hisi_sas_task_prep_smp(hisi_hba, slot);
J
John Garry 已提交
432
		break;
J
John Garry 已提交
433
	case SAS_PROTOCOL_SSP:
434
		hisi_sas_task_prep_ssp(hisi_hba, slot);
J
John Garry 已提交
435 436 437
		break;
	case SAS_PROTOCOL_SATA:
	case SAS_PROTOCOL_STP:
438
	case SAS_PROTOCOL_STP_ALL:
439
		hisi_sas_task_prep_ata(hisi_hba, slot);
440
		break;
441 442 443
	case SAS_PROTOCOL_INTERNAL_ABORT:
		hisi_sas_task_prep_abort(hisi_hba, slot);
		break;
J
John Garry 已提交
444
	default:
445
		return;
J
John Garry 已提交
446 447
	}

448 449
	/* Make slot memories observable before marking as ready */
	smp_wmb();
450
	WRITE_ONCE(slot->ready, 1);
J
John Garry 已提交
451

452 453 454
	spin_lock(&dq->lock);
	hisi_hba->hw->start_delivery(dq);
	spin_unlock(&dq->lock);
J
John Garry 已提交
455 456
}

J
John Garry 已提交
457
static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
J
John Garry 已提交
458
{
459
	int n_elem = 0, n_elem_dif = 0;
460 461
	struct domain_device *device = task->dev;
	struct asd_sas_port *sas_port = device->port;
462
	struct hisi_sas_device *sas_dev = device->lldd_dev;
463
	bool internal_abort = sas_is_internal_abort(task);
464
	struct scsi_cmnd *scmd = NULL;
465
	struct hisi_sas_dq *dq = NULL;
466 467 468 469 470
	struct hisi_sas_port *port;
	struct hisi_hba *hisi_hba;
	struct hisi_sas_slot *slot;
	struct device *dev;
	int rc;
J
John Garry 已提交
471

472 473 474 475 476 477 478 479 480
	if (!sas_port) {
		struct task_status_struct *ts = &task->task_status;

		ts->resp = SAS_TASK_UNDELIVERED;
		ts->stat = SAS_PHY_DOWN;
		/*
		 * libsas will use dev->port, should
		 * not call task_done for sata
		 */
481
		if (device->dev_type != SAS_SATA_DEV && !internal_abort)
482 483 484 485 486 487 488
			task->task_done(task);
		return -ECOMM;
	}

	hisi_hba = dev_to_hisi_hba(device);
	dev = hisi_hba->dev;

489 490 491 492 493 494 495 496 497
	switch (task->task_proto) {
	case SAS_PROTOCOL_SSP:
	case SAS_PROTOCOL_SMP:
	case SAS_PROTOCOL_SATA:
	case SAS_PROTOCOL_STP:
	case SAS_PROTOCOL_STP_ALL:
		if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
			if (!gfpflags_allow_blocking(gfp_flags))
				return -EINVAL;
498

499 500 501
			down(&hisi_hba->sem);
			up(&hisi_hba->sem);
		}
502

503 504 505 506 507 508 509
		if (DEV_IS_GONE(sas_dev)) {
			if (sas_dev)
				dev_info(dev, "task prep: device %d not ready\n",
					 sas_dev->device_id);
			else
				dev_info(dev, "task prep: device %016llx not ready\n",
					 SAS_ADDR(device->sas_addr));
510

511 512
			return -ECOMM;
		}
513

514 515 516 517 518
		port = to_hisi_sas_port(sas_port);
		if (!port->port_attached) {
			dev_info(dev, "task prep: %s port%d not attach device\n",
				 dev_is_sata(device) ? "SATA/STP" : "SAS",
				 device->port->id);
519

520
				return -ECOMM;
521 522
		}

523 524
		if (task->uldd_task) {
			struct ata_queued_cmd *qc;
525

526 527 528 529 530 531 532
			if (dev_is_sata(device)) {
				qc = task->uldd_task;
				scmd = qc->scsicmd;
			} else {
				scmd = task->uldd_task;
			}
		}
533

534 535 536
		if (scmd) {
			unsigned int dq_index;
			u32 blk_tag;
537

538 539 540 541 542 543 544
			blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
			dq_index = blk_mq_unique_tag_to_hwq(blk_tag);
			dq = &hisi_hba->dq[dq_index];
		} else {
			struct Scsi_Host *shost = hisi_hba->shost;
			struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
			int queue = qmap->mq_map[raw_smp_processor_id()];
545

546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
			dq = &hisi_hba->dq[queue];
		}
		break;
	case SAS_PROTOCOL_INTERNAL_ABORT:
		if (!hisi_hba->hw->prep_abort)
			return TMF_RESP_FUNC_FAILED;

		if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags))
			return -EIO;

		hisi_hba = dev_to_hisi_hba(device);

		if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
			return -EINVAL;

		port = to_hisi_sas_port(sas_port);
		dq = &hisi_hba->dq[task->abort_task.qid];
		break;
	default:
		dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n",
			task->task_proto);
		return -EINVAL;
568 569
	}

570
	rc = hisi_sas_dma_map(hisi_hba, task, &n_elem);
571 572 573 574 575 576 577 578 579
	if (rc < 0)
		goto prep_out;

	if (!sas_protocol_ata(task->task_proto)) {
		rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task);
		if (rc < 0)
			goto err_out_dma_unmap;
	}

580
	if (!internal_abort && hisi_hba->hw->slot_index_alloc)
581 582 583 584 585 586 587 588 589 590 591 592 593
		rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
	else
		rc = hisi_sas_slot_index_alloc(hisi_hba, scmd);

	if (rc < 0)
		goto err_out_dif_dma_unmap;

	slot = &hisi_hba->slot_info[rc];
	slot->n_elem = n_elem;
	slot->n_elem_dif = n_elem_dif;
	slot->task = task;
	slot->port = port;

J
John Garry 已提交
594
	slot->tmf = task->tmf;
595
	slot->is_internal = !!task->tmf || internal_abort;
596

J
John Garry 已提交
597
	/* protect task_prep and start_delivery sequence */
598
	hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev);
J
John Garry 已提交
599

600 601 602 603 604 605
	return 0;

err_out_dif_dma_unmap:
	if (!sas_protocol_ata(task->task_proto))
		hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
err_out_dma_unmap:
606
	hisi_sas_dma_unmap(hisi_hba, task, n_elem);
607 608
prep_out:
	dev_err(dev, "task exec: failed[%d]!\n", rc);
J
John Garry 已提交
609 610
	return rc;
}
J
John Garry 已提交
611

612 613
static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no,
				 gfp_t gfp_flags)
614 615 616 617 618 619 620
{
	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
	struct asd_sas_phy *sas_phy = &phy->sas_phy;

	if (!phy->phy_attached)
		return;

621
	sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags);
622 623 624 625 626 627

	if (sas_phy->phy) {
		struct sas_phy *sphy = sas_phy->phy;

		sphy->negotiated_linkrate = sas_phy->linkrate;
		sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
628 629 630 631 632 633 634
		sphy->maximum_linkrate_hw =
			hisi_hba->hw->phy_get_max_linkrate();
		if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
			sphy->minimum_linkrate = phy->minimum_linkrate;

		if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
			sphy->maximum_linkrate = phy->maximum_linkrate;
635 636 637 638 639 640 641 642 643 644
	}

	if (phy->phy_type & PORT_TYPE_SAS) {
		struct sas_identify_frame *id;

		id = (struct sas_identify_frame *)phy->frame_rcvd;
		id->dev_type = phy->identify.device_type;
		id->initiator_bits = SAS_PROTOCOL_ALL;
		id->target_bits = phy->identify.target_port_protocols;
	} else if (phy->phy_type & PORT_TYPE_SATA) {
X
Xiang Chen 已提交
645
		/* Nothing */
646 647 648
	}

	sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
649
	sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags);
650 651
}

J
John Garry 已提交
652 653 654 655
static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
{
	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
	struct hisi_sas_device *sas_dev = NULL;
656 657
	int last = hisi_hba->last_dev_id;
	int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
J
John Garry 已提交
658 659
	int i;

660
	spin_lock(&hisi_hba->lock);
661
	for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
J
John Garry 已提交
662
		if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
663 664 665
			int queue = i % hisi_hba->queue_count;
			struct hisi_sas_dq *dq = &hisi_hba->dq[queue];

J
John Garry 已提交
666 667
			hisi_hba->devices[i].device_id = i;
			sas_dev = &hisi_hba->devices[i];
668
			sas_dev->dev_status = HISI_SAS_DEV_INIT;
J
John Garry 已提交
669 670 671
			sas_dev->dev_type = device->dev_type;
			sas_dev->hisi_hba = hisi_hba;
			sas_dev->sas_device = device;
672
			sas_dev->dq = dq;
673
			spin_lock_init(&sas_dev->lock);
674
			INIT_LIST_HEAD(&hisi_hba->devices[i].list);
J
John Garry 已提交
675 676
			break;
		}
677
		i++;
J
John Garry 已提交
678
	}
679
	hisi_hba->last_dev_id = i;
680
	spin_unlock(&hisi_hba->lock);
J
John Garry 已提交
681 682 683 684

	return sas_dev;
}

685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
static void hisi_sas_tmf_aborted(struct sas_task *task)
{
	struct hisi_sas_slot *slot = task->lldd_task;
	struct domain_device *device = task->dev;
	struct hisi_sas_device *sas_dev = device->lldd_dev;
	struct hisi_hba *hisi_hba = sas_dev->hisi_hba;

	if (slot) {
		struct hisi_sas_cq *cq =
			   &hisi_hba->cq[slot->dlvry_queue];
		/*
		 * sync irq to avoid free'ing task
		 * before using task in IO completion
		 */
		synchronize_irq(cq->irq_no);
		slot->task = NULL;
	}
}

704
#define HISI_SAS_DISK_RECOVER_CNT 3
705 706 707 708
static int hisi_sas_init_device(struct domain_device *device)
{
	int rc = TMF_RESP_FUNC_COMPLETE;
	struct scsi_lun lun;
709
	int retry = HISI_SAS_DISK_RECOVER_CNT;
710 711 712 713 714 715
	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);

	switch (device->dev_type) {
	case SAS_END_DEVICE:
		int_to_scsilun(0, &lun);

716
		while (retry-- > 0) {
717
			rc = sas_clear_task_set(device, lun.scsi_lun);
718 719 720 721 722
			if (rc == TMF_RESP_FUNC_COMPLETE) {
				hisi_sas_release_task(hisi_hba, device);
				break;
			}
		}
723 724 725 726 727
		break;
	case SAS_SATA_DEV:
	case SAS_SATA_PM:
	case SAS_SATA_PM_PORT:
	case SAS_SATA_PENDING:
728
		/*
729 730 731 732 733 734 735 736 737 738 739
		 * If an expander is swapped when a SATA disk is attached then
		 * we should issue a hard reset to clear previous affiliation
		 * of STP target port, see SPL (chapter 6.19.4).
		 *
		 * However we don't need to issue a hard reset here for these
		 * reasons:
		 * a. When probing the device, libsas/libata already issues a
		 * hard reset in sas_probe_sata() -> ata_sas_async_probe().
		 * Note that in hisi_sas_debug_I_T_nexus_reset() we take care
		 * to issue a hard reset by checking the dev status (== INIT).
		 * b. When resetting the controller, this is simply unnecessary.
740
		 */
741 742 743 744 745 746 747 748 749 750 751 752 753
		while (retry-- > 0) {
			rc = hisi_sas_softreset_ata_disk(device);
			if (!rc)
				break;
		}
		break;
	default:
		break;
	}

	return rc;
}

754 755
int hisi_sas_slave_alloc(struct scsi_device *sdev)
{
756 757
	struct domain_device *ddev = sdev_to_domain_dev(sdev);
	struct hisi_sas_device *sas_dev = ddev->lldd_dev;
758 759 760 761 762 763
	int rc;

	rc = sas_slave_alloc(sdev);
	if (rc)
		return rc;

764 765 766 767 768
	rc = hisi_sas_init_device(ddev);
	if (rc)
		return rc;
	sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
	return 0;
769 770 771
}
EXPORT_SYMBOL_GPL(hisi_sas_slave_alloc);

J
John Garry 已提交
772 773 774 775 776
static int hisi_sas_dev_found(struct domain_device *device)
{
	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
	struct domain_device *parent_dev = device->parent;
	struct hisi_sas_device *sas_dev;
777
	struct device *dev = hisi_hba->dev;
778
	int rc;
J
John Garry 已提交
779

780 781 782 783
	if (hisi_hba->hw->alloc_dev)
		sas_dev = hisi_hba->hw->alloc_dev(device);
	else
		sas_dev = hisi_sas_alloc_dev(device);
J
John Garry 已提交
784 785 786 787 788 789 790 791 792
	if (!sas_dev) {
		dev_err(dev, "fail alloc dev: max support %d devices\n",
			HISI_SAS_MAX_DEVICES);
		return -EINVAL;
	}

	device->lldd_dev = sas_dev;
	hisi_hba->hw->setup_itct(hisi_hba, sas_dev);

793
	if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
J
John Garry 已提交
794 795 796 797 798 799 800
		int phy_no;
		u8 phy_num = parent_dev->ex_dev.num_phys;
		struct ex_phy *phy;

		for (phy_no = 0; phy_no < phy_num; phy_no++) {
			phy = &parent_dev->ex_dev.ex_phy[phy_no];
			if (SAS_ADDR(phy->attached_sas_addr) ==
801
				SAS_ADDR(device->sas_addr))
J
John Garry 已提交
802 803 804 805 806 807 808 809
				break;
		}

		if (phy_no == phy_num) {
			dev_info(dev, "dev found: no attached "
				 "dev:%016llx at ex:%016llx\n",
				 SAS_ADDR(device->sas_addr),
				 SAS_ADDR(parent_dev->sas_addr));
810 811
			rc = -EINVAL;
			goto err_out;
J
John Garry 已提交
812 813 814
		}
	}

815 816 817
	dev_info(dev, "dev[%d:%x] found\n",
		sas_dev->device_id, sas_dev->dev_type);

J
John Garry 已提交
818
	return 0;
819 820 821 822

err_out:
	hisi_sas_dev_gone(device);
	return rc;
J
John Garry 已提交
823 824
}

825
int hisi_sas_slave_configure(struct scsi_device *sdev)
826 827 828 829 830 831 832 833 834 835 836
{
	struct domain_device *dev = sdev_to_domain_dev(sdev);
	int ret = sas_slave_configure(sdev);

	if (ret)
		return ret;
	if (!dev_is_sata(dev))
		sas_change_queue_depth(sdev, 64);

	return 0;
}
837
EXPORT_SYMBOL_GPL(hisi_sas_slave_configure);
838

839
void hisi_sas_scan_start(struct Scsi_Host *shost)
J
John Garry 已提交
840 841 842
{
	struct hisi_hba *hisi_hba = shost_priv(shost);

843
	hisi_hba->hw->phys_init(hisi_hba);
J
John Garry 已提交
844
}
845
EXPORT_SYMBOL_GPL(hisi_sas_scan_start);
J
John Garry 已提交
846

847
int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
J
John Garry 已提交
848 849 850 851
{
	struct hisi_hba *hisi_hba = shost_priv(shost);
	struct sas_ha_struct *sha = &hisi_hba->sha;

852 853
	/* Wait for PHY up interrupt to occur */
	if (time < HZ)
J
John Garry 已提交
854 855 856 857 858
		return 0;

	sas_drain_work(sha);
	return 1;
}
859
EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
J
John Garry 已提交
860

861 862
static void hisi_sas_phyup_work_common(struct work_struct *work,
		enum hisi_sas_phy_event event)
863 864
{
	struct hisi_sas_phy *phy =
865
		container_of(work, typeof(*phy), works[event]);
866 867 868 869
	struct hisi_hba *hisi_hba = phy->hisi_hba;
	struct asd_sas_phy *sas_phy = &phy->sas_phy;
	int phy_no = sas_phy->id;

870
	phy->wait_phyup_cnt = 0;
871 872
	if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP)
		hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no);
873
	hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL);
874
}
J
John Garry 已提交
875

876 877 878 879 880
static void hisi_sas_phyup_work(struct work_struct *work)
{
	hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP);
}

881 882 883 884 885 886 887 888 889
static void hisi_sas_linkreset_work(struct work_struct *work)
{
	struct hisi_sas_phy *phy =
		container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
	struct asd_sas_phy *sas_phy = &phy->sas_phy;

	hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
}

890 891 892 893 894 895 896 897 898 899 900
static void hisi_sas_phyup_pm_work(struct work_struct *work)
{
	struct hisi_sas_phy *phy =
		container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]);
	struct hisi_hba *hisi_hba = phy->hisi_hba;
	struct device *dev = hisi_hba->dev;

	hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP_PM);
	pm_runtime_put_sync(dev);
}

901 902
static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
	[HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
903
	[HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
904
	[HISI_PHYE_PHY_UP_PM] = hisi_sas_phyup_pm_work,
905 906 907 908 909 910 911 912 913 914 915 916 917 918
};

bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
				enum hisi_sas_phy_event event)
{
	struct hisi_hba *hisi_hba = phy->hisi_hba;

	if (WARN_ON(event >= HISI_PHYES_NUM))
		return false;

	return queue_work(hisi_hba->wq, &phy->works[event]);
}
EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);

919 920 921 922 923 924 925 926 927 928 929
static void hisi_sas_wait_phyup_timedout(struct timer_list *t)
{
	struct hisi_sas_phy *phy = from_timer(phy, t, timer);
	struct hisi_hba *hisi_hba = phy->hisi_hba;
	struct device *dev = hisi_hba->dev;
	int phy_no = phy->sas_phy.id;

	dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no);
	hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
}

930 931
#define HISI_SAS_WAIT_PHYUP_RETRIES	10

932 933 934 935
void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
{
	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
	struct device *dev = hisi_hba->dev;
Q
Qi Liu 已提交
936
	unsigned long flags;
937

938
	dev_dbg(dev, "phy%d OOB ready\n", phy_no);
Q
Qi Liu 已提交
939 940 941
	spin_lock_irqsave(&phy->lock, flags);
	if (phy->phy_attached) {
		spin_unlock_irqrestore(&phy->lock, flags);
942
		return;
Q
Qi Liu 已提交
943
	}
944

945
	if (!timer_pending(&phy->timer)) {
946 947 948
		if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) {
			phy->wait_phyup_cnt++;
			phy->timer.expires = jiffies +
949
					     HISI_SAS_WAIT_PHYUP_TIMEOUT;
950
			add_timer(&phy->timer);
Q
Qi Liu 已提交
951 952
			spin_unlock_irqrestore(&phy->lock, flags);
			return;
953
		}
Q
Qi Liu 已提交
954 955 956 957

		dev_warn(dev, "phy%d failed to come up %d times, giving up\n",
			 phy_no, phy->wait_phyup_cnt);
		phy->wait_phyup_cnt = 0;
958
	}
Q
Qi Liu 已提交
959
	spin_unlock_irqrestore(&phy->lock, flags);
960
}
Q
Qi Liu 已提交
961

962 963
EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready);

J
John Garry 已提交
964 965 966 967
static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
{
	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
	struct asd_sas_phy *sas_phy = &phy->sas_phy;
968
	int i;
J
John Garry 已提交
969 970 971

	phy->hisi_hba = hisi_hba;
	phy->port = NULL;
972 973
	phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
	phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
J
John Garry 已提交
974 975 976 977 978 979 980 981 982 983 984 985 986
	sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
	sas_phy->class = SAS;
	sas_phy->iproto = SAS_PROTOCOL_ALL;
	sas_phy->tproto = 0;
	sas_phy->type = PHY_TYPE_PHYSICAL;
	sas_phy->role = PHY_ROLE_INITIATOR;
	sas_phy->oob_mode = OOB_NOT_CONNECTED;
	sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
	sas_phy->id = phy_no;
	sas_phy->sas_addr = &hisi_hba->sas_addr[0];
	sas_phy->frame_rcvd = &phy->frame_rcvd[0];
	sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
	sas_phy->lldd_phy = phy;
987

988 989
	for (i = 0; i < HISI_PHYES_NUM; i++)
		INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
990 991

	spin_lock_init(&phy->lock);
992 993

	timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0);
J
John Garry 已提交
994 995
}

996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
/* Wrapper to ensure we track hisi_sas_phy.enable properly */
void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable)
{
	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
	struct asd_sas_phy *aphy = &phy->sas_phy;
	struct sas_phy *sphy = aphy->phy;
	unsigned long flags;

	spin_lock_irqsave(&phy->lock, flags);

	if (enable) {
		/* We may have been enabled already; if so, don't touch */
		if (!phy->enable)
			sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
		hisi_hba->hw->phy_start(hisi_hba, phy_no);
	} else {
		sphy->negotiated_linkrate = SAS_PHY_DISABLED;
		hisi_hba->hw->phy_disable(hisi_hba, phy_no);
	}
	phy->enable = enable;
	spin_unlock_irqrestore(&phy->lock, flags);
}
EXPORT_SYMBOL_GPL(hisi_sas_phy_enable);

J
John Garry 已提交
1020 1021 1022 1023 1024 1025
static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
{
	struct sas_ha_struct *sas_ha = sas_phy->ha;
	struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
	struct hisi_sas_phy *phy = sas_phy->lldd_phy;
	struct asd_sas_port *sas_port = sas_phy->port;
1026
	struct hisi_sas_port *port;
J
John Garry 已提交
1027 1028 1029 1030 1031
	unsigned long flags;

	if (!sas_port)
		return;

1032
	port = to_hisi_sas_port(sas_port);
J
John Garry 已提交
1033 1034 1035 1036 1037 1038 1039 1040
	spin_lock_irqsave(&hisi_hba->lock, flags);
	port->port_attached = 1;
	port->id = phy->port_id;
	phy->port = port;
	sas_port->lldd_port = port;
	spin_unlock_irqrestore(&hisi_hba->lock, flags);
}

1041
static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
1042
				     struct hisi_sas_slot *slot)
J
John Garry 已提交
1043
{
1044 1045 1046
	if (task) {
		unsigned long flags;
		struct task_status_struct *ts;
J
John Garry 已提交
1047

1048
		ts = &task->task_status;
J
John Garry 已提交
1049

1050 1051 1052
		ts->resp = SAS_TASK_COMPLETE;
		ts->stat = SAS_ABORTED_TASK;
		spin_lock_irqsave(&task->task_state_lock, flags);
1053
		task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
1054 1055
		if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP)
			task->task_state_flags |= SAS_TASK_STATE_DONE;
1056 1057
		spin_unlock_irqrestore(&task->task_state_lock, flags);
	}
J
John Garry 已提交
1058

1059
	hisi_sas_slot_task_free(hisi_hba, task, slot);
J
John Garry 已提交
1060 1061 1062 1063 1064
}

static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
			struct domain_device *device)
{
1065 1066
	struct hisi_sas_slot *slot, *slot2;
	struct hisi_sas_device *sas_dev = device->lldd_dev;
J
John Garry 已提交
1067

1068 1069
	list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
		hisi_sas_do_release_task(hisi_hba, slot->task, slot);
J
John Garry 已提交
1070 1071
}

1072
void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
1073
{
1074 1075
	struct hisi_sas_device *sas_dev;
	struct domain_device *device;
1076 1077
	int i;

1078 1079 1080
	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
		sas_dev = &hisi_hba->devices[i];
		device = sas_dev->sas_device;
1081

1082 1083
		if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
		    !device)
1084
			continue;
1085 1086

		hisi_sas_release_task(hisi_hba, device);
1087 1088
	}
}
1089
EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
1090

1091 1092 1093 1094 1095 1096 1097
static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
				struct domain_device *device)
{
	if (hisi_hba->hw->dereg_device)
		hisi_hba->hw->dereg_device(hisi_hba, device);
}

1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120
static int
hisi_sas_internal_task_abort_dev(struct hisi_sas_device *sas_dev,
				 bool rst_ha_timeout)
{
	struct hisi_sas_internal_abort_data data = { rst_ha_timeout };
	struct domain_device *device = sas_dev->sas_device;
	struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
	int i, rc;

	for (i = 0; i < hisi_hba->cq_nvecs; i++) {
		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
		const struct cpumask *mask = cq->irq_mask;

		if (mask && !cpumask_intersects(cpu_online_mask, mask))
			continue;
		rc = sas_execute_internal_abort_dev(device, i, &data);
		if (rc)
			return rc;
	}

	return 0;
}

J
John Garry 已提交
1121 1122 1123 1124
static void hisi_sas_dev_gone(struct domain_device *device)
{
	struct hisi_sas_device *sas_dev = device->lldd_dev;
	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1125
	struct device *dev = hisi_hba->dev;
1126
	int ret = 0;
J
John Garry 已提交
1127

1128
	dev_info(dev, "dev[%d:%x] is gone\n",
J
John Garry 已提交
1129 1130
		 sas_dev->device_id, sas_dev->dev_type);

1131
	down(&hisi_hba->sem);
1132
	if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
1133
		hisi_sas_internal_task_abort_dev(sas_dev, true);
1134

1135 1136
		hisi_sas_dereg_device(hisi_hba, device);

1137
		ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
1138 1139
		device->lldd_dev = NULL;
	}
1140

1141 1142
	if (hisi_hba->hw->free_device)
		hisi_hba->hw->free_device(sas_dev);
1143 1144 1145 1146

	/* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */
	if (!ret)
		sas_dev->dev_type = SAS_PHY_UNUSED;
1147 1148
	sas_dev->sas_device = NULL;
	up(&hisi_hba->sem);
J
John Garry 已提交
1149
}
J
John Garry 已提交
1150

1151
static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
1152 1153 1154 1155 1156 1157 1158 1159
			struct sas_phy_linkrates *r)
{
	struct sas_phy_linkrates _r;

	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
	struct asd_sas_phy *sas_phy = &phy->sas_phy;
	enum sas_linkrate min, max;

1160 1161 1162
	if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS)
		return -EINVAL;

1163 1164 1165 1166 1167 1168 1169
	if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
		max = sas_phy->phy->maximum_linkrate;
		min = r->minimum_linkrate;
	} else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
		max = r->maximum_linkrate;
		min = sas_phy->phy->minimum_linkrate;
	} else
1170
		return -EINVAL;
1171 1172 1173 1174

	_r.maximum_linkrate = max;
	_r.minimum_linkrate = min;

1175 1176 1177
	sas_phy->phy->maximum_linkrate = max;
	sas_phy->phy->minimum_linkrate = min;

1178
	hisi_sas_phy_enable(hisi_hba, phy_no, 0);
1179 1180
	msleep(100);
	hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
1181
	hisi_sas_phy_enable(hisi_hba, phy_no, 1);
1182 1183

	return 0;
1184 1185
}

J
John Garry 已提交
1186 1187 1188
static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
				void *funcdata)
{
1189 1190
	struct hisi_sas_phy *phy = container_of(sas_phy,
			struct hisi_sas_phy, sas_phy);
J
John Garry 已提交
1191 1192
	struct sas_ha_struct *sas_ha = sas_phy->ha;
	struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1193 1194
	struct device *dev = hisi_hba->dev;
	DECLARE_COMPLETION_ONSTACK(completion);
J
John Garry 已提交
1195
	int phy_no = sas_phy->id;
1196 1197 1198
	u8 sts = phy->phy_attached;
	int ret = 0;

1199
	down(&hisi_hba->sem);
1200
	phy->reset_completion = &completion;
J
John Garry 已提交
1201 1202 1203 1204 1205 1206 1207

	switch (func) {
	case PHY_FUNC_HARD_RESET:
		hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
		break;

	case PHY_FUNC_LINK_RESET:
1208
		hisi_sas_phy_enable(hisi_hba, phy_no, 0);
1209
		msleep(100);
1210
		hisi_sas_phy_enable(hisi_hba, phy_no, 1);
J
John Garry 已提交
1211 1212 1213
		break;

	case PHY_FUNC_DISABLE:
1214
		hisi_sas_phy_enable(hisi_hba, phy_no, 0);
1215
		goto out;
J
John Garry 已提交
1216 1217

	case PHY_FUNC_SET_LINK_RATE:
1218 1219 1220
		ret = hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
		break;

1221 1222 1223
	case PHY_FUNC_GET_EVENTS:
		if (hisi_hba->hw->get_events) {
			hisi_hba->hw->get_events(hisi_hba, phy_no);
1224
			goto out;
1225
		}
1226
		fallthrough;
J
John Garry 已提交
1227 1228
	case PHY_FUNC_RELEASE_SPINUP_HOLD:
	default:
1229 1230
		ret = -EOPNOTSUPP;
		goto out;
J
John Garry 已提交
1231
	}
1232

1233 1234
	if (sts && !wait_for_completion_timeout(&completion,
		HISI_SAS_WAIT_PHYUP_TIMEOUT)) {
1235 1236 1237 1238 1239 1240 1241 1242 1243
		dev_warn(dev, "phy%d wait phyup timed out for func %d\n",
			 phy_no, func);
		if (phy->in_reset)
			ret = -ETIMEDOUT;
	}

out:
	phy->reset_completion = NULL;

1244
	up(&hisi_hba->sem);
1245
	return ret;
J
John Garry 已提交
1246
}
J
John Garry 已提交
1247

1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
		bool reset, int pmp, u8 *fis)
{
	struct ata_taskfile tf;

	ata_tf_init(dev, &tf);
	if (reset)
		tf.ctl |= ATA_SRST;
	else
		tf.ctl &= ~ATA_SRST;
	tf.command = ATA_CMD_DEV_RESET;
	ata_tf_to_fis(&tf, pmp, 0, fis);
}

static int hisi_sas_softreset_ata_disk(struct domain_device *device)
{
	u8 fis[20] = {0};
	struct ata_port *ap = device->sata_dev.ap;
	struct ata_link *link;
	int rc = TMF_RESP_FUNC_FAILED;
	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1269
	struct device *dev = hisi_hba->dev;
1270 1271 1272 1273 1274

	ata_for_each_link(link, ap, EDGE) {
		int pmp = sata_srst_pmp(link);

		hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1275
		rc = sas_execute_ata_cmd(device, fis, -1);
1276 1277 1278 1279 1280 1281 1282 1283 1284
		if (rc != TMF_RESP_FUNC_COMPLETE)
			break;
	}

	if (rc == TMF_RESP_FUNC_COMPLETE) {
		ata_for_each_link(link, ap, EDGE) {
			int pmp = sata_srst_pmp(link);

			hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
1285
			rc = sas_execute_ata_cmd(device, fis, -1);
1286
			if (rc != TMF_RESP_FUNC_COMPLETE)
1287 1288
				dev_err(dev, "ata disk %016llx de-reset failed\n",
					SAS_ADDR(device->sas_addr));
1289 1290
		}
	} else {
1291 1292
		dev_err(dev, "ata disk %016llx reset failed\n",
			SAS_ADDR(device->sas_addr));
1293 1294
	}

1295
	if (rc == TMF_RESP_FUNC_COMPLETE)
1296 1297 1298 1299 1300
		hisi_sas_release_task(hisi_hba, device);

	return rc;
}

1301
static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1302
{
1303
	u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
1304 1305 1306
	int i;

	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1307 1308 1309 1310 1311 1312 1313
		struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
		struct domain_device *device = sas_dev->sas_device;
		struct asd_sas_port *sas_port;
		struct hisi_sas_port *port;
		struct hisi_sas_phy *phy = NULL;
		struct asd_sas_phy *sas_phy;

1314
		if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1315
				|| !device || !device->port)
1316 1317
			continue;

1318 1319 1320
		sas_port = device->port;
		port = to_hisi_sas_port(sas_port);

1321
		spin_lock(&sas_port->phy_list_lock);
1322 1323 1324 1325 1326
		list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
			if (state & BIT(sas_phy->id)) {
				phy = sas_phy->lldd_phy;
				break;
			}
1327
		spin_unlock(&sas_port->phy_list_lock);
1328 1329 1330

		if (phy) {
			port->id = phy->port_id;
1331

1332 1333 1334
			/* Update linkrate of directly attached device. */
			if (!device->parent)
				device->linkrate = phy->sas_phy.linkrate;
1335

1336 1337 1338
			hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
		} else
			port->id = 0xff;
1339 1340 1341
	}
}

1342
static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
1343
{
1344
	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1345 1346 1347 1348 1349 1350 1351
	struct asd_sas_port *_sas_port = NULL;
	int phy_no;

	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
		struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
		struct asd_sas_phy *sas_phy = &phy->sas_phy;
		struct asd_sas_port *sas_port = sas_phy->port;
1352
		bool do_port_check = _sas_port != sas_port;
1353 1354 1355 1356 1357

		if (!sas_phy->phy->enabled)
			continue;

		/* Report PHY state change to libsas */
1358 1359
		if (state & BIT(phy_no)) {
			if (do_port_check && sas_port && sas_port->port_dev) {
1360 1361 1362 1363
				struct domain_device *dev = sas_port->port_dev;

				_sas_port = sas_port;

1364
				if (dev_is_expander(dev->dev_type))
1365
					sas_notify_port_event(sas_phy,
1366 1367
							PORTE_BROADCAST_RCVD,
							GFP_KERNEL);
1368
			}
1369
		} else {
1370
			hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL);
1371
		}
1372
	}
1373 1374 1375 1376 1377 1378
	/*
	 * Ensure any bcast events are processed prior to calling async nexus
	 * reset calls from hisi_sas_clear_nexus_ha() ->
	 * hisi_sas_async_I_T_nexus_reset()
	 */
	sas_drain_work(sas_ha);
1379 1380
}

1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397
static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba)
{
	struct hisi_sas_device *sas_dev;
	struct domain_device *device;
	int i;

	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
		sas_dev = &hisi_hba->devices[i];
		device = sas_dev->sas_device;

		if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
			continue;

		hisi_sas_init_device(device);
	}
}

1398 1399 1400 1401 1402 1403 1404 1405 1406
static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
					     struct asd_sas_port *sas_port,
					     struct domain_device *device)
{
	struct ata_port *ap = device->sata_dev.ap;
	struct device *dev = hisi_hba->dev;
	int rc = TMF_RESP_FUNC_FAILED;
	struct ata_link *link;
	u8 fis[20] = {0};
1407
	int i;
1408

1409 1410 1411
	for (i = 0; i < hisi_hba->n_phy; i++) {
		if (!(sas_port->phy_mask & BIT(i)))
			continue;
1412 1413 1414 1415 1416

		ata_for_each_link(link, ap, EDGE) {
			int pmp = sata_srst_pmp(link);

			hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
1417
			rc = sas_execute_ata_cmd(device, fis, i);
1418 1419
			if (rc != TMF_RESP_FUNC_COMPLETE) {
				dev_err(dev, "phy%d ata reset failed rc=%d\n",
1420
					i, rc);
1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438
				break;
			}
		}
	}
}

static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
{
	struct device *dev = hisi_hba->dev;
	int port_no, rc, i;

	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
		struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
		struct domain_device *device = sas_dev->sas_device;

		if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
			continue;

1439
		rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
1440 1441 1442 1443 1444 1445 1446 1447 1448 1449
		if (rc < 0)
			dev_err(dev, "STP reject: abort dev failed %d\n", rc);
	}

	for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) {
		struct hisi_sas_port *port = &hisi_hba->port[port_no];
		struct asd_sas_port *sas_port = &port->sas_port;
		struct domain_device *port_dev = sas_port->port_dev;
		struct domain_device *device;

1450
		if (!port_dev || !dev_is_expander(port_dev->dev_type))
1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465
			continue;

		/* Try to find a SATA device */
		list_for_each_entry(device, &sas_port->dev_list,
				    dev_list_node) {
			if (dev_is_sata(device)) {
				hisi_sas_send_ata_reset_each_phy(hisi_hba,
								 sas_port,
								 device);
				break;
			}
		}
	}
}

1466
void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
1467
{
1468
	struct Scsi_Host *shost = hisi_hba->shost;
1469

1470
	hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
1471

1472
	scsi_block_requests(shost);
1473 1474
	hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);

1475
	del_timer_sync(&hisi_hba->timer);
1476

1477
	set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1478 1479 1480 1481 1482 1483
}
EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare);

void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
{
	struct Scsi_Host *shost = hisi_hba->shost;
1484 1485 1486 1487

	/* Init and wait for PHYs to come up and all libsas event finished. */
	hisi_hba->hw->phys_init(hisi_hba);
	msleep(1000);
1488
	hisi_sas_refresh_port_id(hisi_hba);
1489
	clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1490 1491 1492

	if (hisi_hba->reject_stp_links_msk)
		hisi_sas_terminate_stp_reject(hisi_hba);
1493
	hisi_sas_reset_init_all_devices(hisi_hba);
1494
	scsi_unblock_requests(shost);
1495
	clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
1496
	up(&hisi_hba->sem);
1497

1498
	hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state);
1499 1500 1501
}
EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);

1502
static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba)
1503 1504 1505 1506
{
	if (!hisi_hba->hw->soft_reset)
		return -1;

1507 1508 1509
	down(&hisi_hba->sem);
	if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
		up(&hisi_hba->sem);
1510
		return -1;
1511
	}
1512

1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
	if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
		hisi_hba->hw->debugfs_snapshot_regs(hisi_hba);

	return 0;
}

static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
{
	struct device *dev = hisi_hba->dev;
	struct Scsi_Host *shost = hisi_hba->shost;
	int rc;

1525 1526 1527 1528 1529 1530 1531 1532 1533
	dev_info(dev, "controller resetting...\n");
	hisi_sas_controller_reset_prepare(hisi_hba);

	rc = hisi_hba->hw->soft_reset(hisi_hba);
	if (rc) {
		dev_warn(dev, "controller reset failed (%d)\n", rc);
		clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
		up(&hisi_hba->sem);
		scsi_unblock_requests(shost);
1534
		clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
1535 1536
		return rc;
	}
1537
	clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
1538 1539

	hisi_sas_controller_reset_done(hisi_hba);
1540
	dev_info(dev, "controller reset complete\n");
1541

1542
	return 0;
1543 1544
}

J
John Garry 已提交
1545 1546
static int hisi_sas_abort_task(struct sas_task *task)
{
1547
	struct hisi_sas_internal_abort_data internal_abort_data = { false };
J
John Garry 已提交
1548 1549
	struct domain_device *device = task->dev;
	struct hisi_sas_device *sas_dev = device->lldd_dev;
1550
	struct hisi_sas_slot *slot = task->lldd_task;
1551 1552
	struct hisi_hba *hisi_hba;
	struct device *dev;
J
John Garry 已提交
1553 1554 1555
	int rc = TMF_RESP_FUNC_FAILED;
	unsigned long flags;

1556
	if (!sas_dev)
J
John Garry 已提交
1557
		return TMF_RESP_FUNC_FAILED;
1558 1559 1560

	hisi_hba = dev_to_hisi_hba(task->dev);
	dev = hisi_hba->dev;
J
John Garry 已提交
1561

1562
	spin_lock_irqsave(&task->task_state_lock, flags);
J
John Garry 已提交
1563
	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1564 1565 1566 1567
		struct hisi_sas_cq *cq;

		if (slot) {
			/*
1568
			 * sync irq to avoid free'ing task
1569 1570 1571
			 * before using task in IO completion
			 */
			cq = &hisi_hba->cq[slot->dlvry_queue];
1572
			synchronize_irq(cq->irq_no);
1573
		}
1574
		spin_unlock_irqrestore(&task->task_state_lock, flags);
J
John Garry 已提交
1575 1576 1577
		rc = TMF_RESP_FUNC_COMPLETE;
		goto out;
	}
1578 1579
	task->task_state_flags |= SAS_TASK_STATE_ABORTED;
	spin_unlock_irqrestore(&task->task_state_lock, flags);
J
John Garry 已提交
1580

1581
	if (slot && task->task_proto & SAS_PROTOCOL_SSP) {
1582
		u16 tag = slot->idx;
1583
		int rc2;
J
John Garry 已提交
1584

J
John Garry 已提交
1585
		rc = sas_abort_task(task, tag);
1586 1587
		rc2 = sas_execute_internal_abort_single(device, tag,
				slot->dlvry_queue, &internal_abort_data);
1588 1589 1590 1591 1592
		if (rc2 < 0) {
			dev_err(dev, "abort task: internal abort (%d)\n", rc2);
			return TMF_RESP_FUNC_FAILED;
		}

1593 1594 1595 1596 1597 1598 1599 1600
		/*
		 * If the TMF finds that the IO is not in the device and also
		 * the internal abort does not succeed, then it is safe to
		 * free the slot.
		 * Note: if the internal abort succeeds then the slot
		 * will have already been completed
		 */
		if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1601
			if (task->lldd_task)
1602
				hisi_sas_do_release_task(hisi_hba, task, slot);
J
John Garry 已提交
1603 1604 1605 1606
		}
	} else if (task->task_proto & SAS_PROTOCOL_SATA ||
		task->task_proto & SAS_PROTOCOL_STP) {
		if (task->dev->dev_type == SAS_SATA_DEV) {
1607 1608
			struct ata_queued_cmd *qc = task->uldd_task;

1609
			rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
1610 1611 1612 1613
			if (rc < 0) {
				dev_err(dev, "abort task: internal abort failed\n");
				goto out;
			}
1614
			hisi_sas_dereg_device(hisi_hba, device);
1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626

			/*
			 * If an ATA internal command times out in ATA EH, it
			 * need to execute soft reset, so check the scsicmd
			 */
			if ((sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR) &&
			    qc && qc->scsicmd) {
				hisi_sas_do_release_task(hisi_hba, task, slot);
				rc = TMF_RESP_FUNC_COMPLETE;
			} else {
				rc = hisi_sas_softreset_ata_disk(device);
			}
J
John Garry 已提交
1627
		}
1628
	} else if (slot && task->task_proto & SAS_PROTOCOL_SMP) {
1629 1630
		/* SMP */
		u32 tag = slot->idx;
1631
		struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
J
John Garry 已提交
1632

1633 1634 1635
		rc = sas_execute_internal_abort_single(device,
						       tag, slot->dlvry_queue,
						       &internal_abort_data);
1636
		if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1637 1638
					task->lldd_task) {
			/*
1639
			 * sync irq to avoid free'ing task
1640 1641
			 * before using task in IO completion
			 */
1642
			synchronize_irq(cq->irq_no);
1643 1644
			slot->task = NULL;
		}
J
John Garry 已提交
1645 1646 1647 1648 1649 1650 1651 1652 1653 1654
	}

out:
	if (rc != TMF_RESP_FUNC_COMPLETE)
		dev_notice(dev, "abort task: rc=%d\n", rc);
	return rc;
}

static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
{
1655
	struct hisi_sas_device *sas_dev = device->lldd_dev;
1656 1657
	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
	struct device *dev = hisi_hba->dev;
X
Xiang Chen 已提交
1658
	int rc;
1659

1660
	rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
1661 1662 1663 1664 1665
	if (rc < 0) {
		dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
		return TMF_RESP_FUNC_FAILED;
	}
	hisi_sas_dereg_device(hisi_hba, device);
J
John Garry 已提交
1666

1667
	rc = sas_abort_task_set(device, lun);
1668
	if (rc == TMF_RESP_FUNC_COMPLETE)
1669 1670
		hisi_sas_release_task(hisi_hba, device);

J
John Garry 已提交
1671 1672 1673 1674 1675
	return rc;
}

static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
{
1676
	struct sas_phy *local_phy = sas_get_local_phy(device);
1677
	struct hisi_sas_device *sas_dev = device->lldd_dev;
1678 1679
	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1680
	int rc, reset_type;
1681

1682 1683 1684 1685 1686
	if (!local_phy->enabled) {
		sas_put_local_phy(local_phy);
		return -ENODEV;
	}

1687
	if (scsi_is_sas_phy_local(local_phy)) {
1688 1689 1690 1691
		struct asd_sas_phy *sas_phy =
			sas_ha->sas_phy[local_phy->number];
		struct hisi_sas_phy *phy =
			container_of(sas_phy, struct hisi_sas_phy, sas_phy);
1692 1693 1694
		unsigned long flags;

		spin_lock_irqsave(&phy->lock, flags);
1695
		phy->in_reset = 1;
1696
		spin_unlock_irqrestore(&phy->lock, flags);
1697 1698
	}

1699
	reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT ||
1700
		      !dev_is_sata(device)) ? true : false;
1701

1702 1703 1704 1705
	rc = sas_phy_reset(local_phy, reset_type);
	sas_put_local_phy(local_phy);

	if (scsi_is_sas_phy_local(local_phy)) {
1706 1707 1708 1709
		struct asd_sas_phy *sas_phy =
			sas_ha->sas_phy[local_phy->number];
		struct hisi_sas_phy *phy =
			container_of(sas_phy, struct hisi_sas_phy, sas_phy);
1710 1711 1712 1713 1714 1715 1716
		unsigned long flags;

		spin_lock_irqsave(&phy->lock, flags);
		phy->in_reset = 0;
		spin_unlock_irqrestore(&phy->lock, flags);

		/* report PHY down if timed out */
1717
		if (rc == -ETIMEDOUT)
1718
			hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL);
1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730
		return rc;
	}

	if (rc)
		return rc;

	/* Remote phy */
	if (dev_is_sata(device)) {
		rc = sas_ata_wait_after_reset(device,
					HISI_SAS_WAIT_PHYUP_TIMEOUT);
	} else {
		msleep(2000);
1731
	}
1732

J
John Garry 已提交
1733 1734 1735 1736 1737
	return rc;
}

static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
{
1738
	struct hisi_sas_device *sas_dev = device->lldd_dev;
J
John Garry 已提交
1739
	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1740
	struct device *dev = hisi_hba->dev;
1741
	int rc;
J
John Garry 已提交
1742

1743 1744 1745
	if (sas_dev->dev_status == HISI_SAS_DEV_NCQ_ERR)
		sas_dev->dev_status = HISI_SAS_DEV_NORMAL;

1746
	rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
1747 1748 1749 1750
	if (rc < 0) {
		dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
		return TMF_RESP_FUNC_FAILED;
	}
1751 1752
	hisi_sas_dereg_device(hisi_hba, device);

1753 1754 1755 1756
	rc = hisi_sas_debug_I_T_nexus_reset(device);
	if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) {
		struct sas_phy *local_phy;

1757
		rc = hisi_sas_softreset_ata_disk(device);
1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777
		switch (rc) {
		case -ECOMM:
			rc = -ENODEV;
			break;
		case TMF_RESP_FUNC_FAILED:
		case -EMSGSIZE:
		case -EIO:
			local_phy = sas_get_local_phy(device);
			rc = sas_phy_enable(local_phy, 0);
			if (!rc) {
				local_phy->enabled = 0;
				dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n",
					SAS_ADDR(device->sas_addr), rc);
				rc = -ENODEV;
			}
			sas_put_local_phy(local_phy);
			break;
		default:
			break;
		}
1778 1779
	}

1780
	if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
1781
		hisi_sas_release_task(hisi_hba, device);
1782

1783
	return rc;
J
John Garry 已提交
1784 1785 1786 1787 1788 1789
}

static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
{
	struct hisi_sas_device *sas_dev = device->lldd_dev;
	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1790
	struct device *dev = hisi_hba->dev;
J
John Garry 已提交
1791 1792
	int rc = TMF_RESP_FUNC_FAILED;

1793
	/* Clear internal IO and then lu reset */
1794
	rc = hisi_sas_internal_task_abort_dev(sas_dev, false);
1795 1796 1797 1798 1799 1800
	if (rc < 0) {
		dev_err(dev, "lu_reset: internal abort failed\n");
		goto out;
	}
	hisi_sas_dereg_device(hisi_hba, device);

1801 1802 1803 1804 1805
	if (dev_is_sata(device)) {
		struct sas_phy *phy;

		phy = sas_get_local_phy(device);

1806
		rc = sas_phy_reset(phy, true);
1807

1808
		if (rc == 0)
1809 1810 1811
			hisi_sas_release_task(hisi_hba, device);
		sas_put_local_phy(phy);
	} else {
J
John Garry 已提交
1812
		rc = sas_lu_reset(device, lun);
1813
		if (rc == TMF_RESP_FUNC_COMPLETE)
1814 1815 1816
			hisi_sas_release_task(hisi_hba, device);
	}
out:
1817
	if (rc != TMF_RESP_FUNC_COMPLETE)
1818
		dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1819
			     sas_dev->device_id, rc);
J
John Garry 已提交
1820 1821 1822
	return rc;
}

1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834
static void hisi_sas_async_I_T_nexus_reset(void *data, async_cookie_t cookie)
{
	struct domain_device *device = data;
	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
	int rc;

	rc = hisi_sas_debug_I_T_nexus_reset(device);
	if (rc != TMF_RESP_FUNC_COMPLETE)
		dev_info(hisi_hba->dev, "I_T_nexus reset fail for dev:%016llx rc=%d\n",
			 SAS_ADDR(device->sas_addr), rc);
}

1835 1836 1837
static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
{
	struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1838
	HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1839
	ASYNC_DOMAIN_EXCLUSIVE(async);
1840
	int i, ret;
1841

1842 1843
	queue_work(hisi_hba->wq, &r.work);
	wait_for_completion(r.completion);
1844 1845 1846 1847
	if (!r.done) {
		ret = TMF_RESP_FUNC_FAILED;
		goto out;
	}
1848 1849 1850 1851 1852 1853

	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
		struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
		struct domain_device *device = sas_dev->sas_device;

		if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device ||
1854
		    dev_is_expander(device->dev_type))
1855 1856
			continue;

1857 1858
		async_schedule_domain(hisi_sas_async_I_T_nexus_reset,
				      device, &async);
1859 1860
	}

1861
	async_synchronize_full_domain(&async);
1862
	hisi_sas_release_tasks(hisi_hba);
1863

1864 1865 1866
	ret = TMF_RESP_FUNC_COMPLETE;
out:
	return ret;
1867 1868
}

J
John Garry 已提交
1869 1870 1871 1872 1873 1874 1875 1876
static int hisi_sas_query_task(struct sas_task *task)
{
	int rc = TMF_RESP_FUNC_FAILED;

	if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
		struct hisi_sas_slot *slot = task->lldd_task;
		u32 tag = slot->idx;

J
John Garry 已提交
1877
		rc = sas_query_task(task, tag);
J
John Garry 已提交
1878 1879 1880 1881 1882 1883 1884
		switch (rc) {
		/* The task is still in Lun, release it then */
		case TMF_RESP_FUNC_SUCC:
		/* The task is not in Lun or failed, reset the phy */
		case TMF_RESP_FUNC_FAILED:
		case TMF_RESP_FUNC_COMPLETE:
			break;
1885 1886 1887
		default:
			rc = TMF_RESP_FUNC_FAILED;
			break;
J
John Garry 已提交
1888 1889 1890 1891 1892
		}
	}
	return rc;
}

1893 1894
static bool hisi_sas_internal_abort_timeout(struct sas_task *task,
					    void *data)
1895 1896
{
	struct domain_device *device = task->dev;
1897 1898
	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
	struct hisi_sas_internal_abort_data *timeout = data;
1899

1900 1901
	if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
		queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
1902

1903 1904 1905 1906 1907
	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
		pr_err("Internal abort: timeout %016llx\n",
		       SAS_ADDR(device->sas_addr));
	} else {
		struct hisi_sas_slot *slot = task->lldd_task;
1908

1909
		set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
1910

1911 1912 1913 1914 1915 1916 1917 1918 1919 1920
		if (slot) {
			struct hisi_sas_cq *cq =
				&hisi_hba->cq[slot->dlvry_queue];
			/*
			 * sync irq to avoid free'ing task
			 * before using task in IO completion
			 */
			synchronize_irq(cq->irq_no);
			slot->task = NULL;
		}
1921

1922 1923 1924 1925 1926 1927 1928
		if (timeout->rst_ha_timeout) {
			pr_err("Internal abort: timeout and not done %016llx. Queuing reset.\n",
			       SAS_ADDR(device->sas_addr));
			queue_work(hisi_hba->wq, &hisi_hba->rst_work);
		} else {
			pr_err("Internal abort: timeout and not done %016llx.\n",
			       SAS_ADDR(device->sas_addr));
1929
		}
1930 1931

		return true;
1932 1933
	}

1934
	return false;
1935 1936
}

J
John Garry 已提交
1937 1938 1939 1940 1941
static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
{
	hisi_sas_port_notify_formed(sas_phy);
}

1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953
static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
			u8 reg_index, u8 reg_count, u8 *write_data)
{
	struct hisi_hba *hisi_hba = sha->lldd_ha;

	if (!hisi_hba->hw->write_gpio)
		return -EOPNOTSUPP;

	return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
				reg_index, reg_count, write_data);
}

J
John Garry 已提交
1954 1955
static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
{
1956 1957
	struct asd_sas_phy *sas_phy = &phy->sas_phy;
	struct sas_phy *sphy = sas_phy->phy;
1958
	unsigned long flags;
1959

J
John Garry 已提交
1960 1961 1962
	phy->phy_attached = 0;
	phy->phy_type = 0;
	phy->port = NULL;
1963

1964 1965
	spin_lock_irqsave(&phy->lock, flags);
	if (phy->enable)
1966 1967 1968
		sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
	else
		sphy->negotiated_linkrate = SAS_PHY_DISABLED;
1969
	spin_unlock_irqrestore(&phy->lock, flags);
J
John Garry 已提交
1970 1971
}

1972 1973
void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy,
		       gfp_t gfp_flags)
J
John Garry 已提交
1974 1975 1976
{
	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
	struct asd_sas_phy *sas_phy = &phy->sas_phy;
1977
	struct device *dev = hisi_hba->dev;
J
John Garry 已提交
1978 1979 1980

	if (rdy) {
		/* Phy down but ready */
1981
		hisi_sas_bytes_dmaed(hisi_hba, phy_no, gfp_flags);
J
John Garry 已提交
1982 1983 1984 1985
		hisi_sas_port_notify_formed(sas_phy);
	} else {
		struct hisi_sas_port *port  = phy->port;

1986
		if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) ||
1987
		    phy->in_reset) {
1988 1989 1990
			dev_info(dev, "ignore flutter phy%d down\n", phy_no);
			return;
		}
J
John Garry 已提交
1991
		/* Phy down and not ready */
1992
		sas_notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL, gfp_flags);
J
John Garry 已提交
1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009
		sas_phy_disconnected(sas_phy);

		if (port) {
			if (phy->phy_type & PORT_TYPE_SAS) {
				int port_id = port->id;

				if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
								       port_id))
					port->port_attached = 0;
			} else if (phy->phy_type & PORT_TYPE_SATA)
				port->port_attached = 0;
		}
		hisi_sas_phy_disconnected(phy);
	}
}
EXPORT_SYMBOL_GPL(hisi_sas_phy_down);

2010 2011 2012 2013
void hisi_sas_phy_bcast(struct hisi_sas_phy *phy)
{
	struct asd_sas_phy *sas_phy = &phy->sas_phy;
	struct hisi_hba	*hisi_hba = phy->hisi_hba;
2014
	struct sas_ha_struct *sha = &hisi_hba->sha;
2015 2016 2017 2018

	if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
		return;

2019 2020 2021
	if (test_bit(SAS_HA_FROZEN, &sha->state))
		return;

2022 2023 2024 2025
	sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD, GFP_ATOMIC);
}
EXPORT_SYMBOL_GPL(hisi_sas_phy_bcast);

2026
void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba)
2027 2028 2029
{
	int i;

2030
	for (i = 0; i < hisi_hba->cq_nvecs; i++) {
2031 2032
		struct hisi_sas_cq *cq = &hisi_hba->cq[i];

2033
		synchronize_irq(cq->irq_no);
2034 2035
	}
}
2036
EXPORT_SYMBOL_GPL(hisi_sas_sync_irqs);
2037

2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050
int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type)
{
	struct hisi_hba *hisi_hba = shost_priv(shost);

	if (reset_type != SCSI_ADAPTER_RESET)
		return -EOPNOTSUPP;

	queue_work(hisi_hba->wq, &hisi_hba->rst_work);

	return 0;
}
EXPORT_SYMBOL_GPL(hisi_sas_host_reset);

2051 2052
struct scsi_transport_template *hisi_sas_stt;
EXPORT_SYMBOL_GPL(hisi_sas_stt);
2053 2054

static struct sas_domain_function_template hisi_sas_transport_ops = {
J
John Garry 已提交
2055 2056
	.lldd_dev_found		= hisi_sas_dev_found,
	.lldd_dev_gone		= hisi_sas_dev_gone,
J
John Garry 已提交
2057
	.lldd_execute_task	= hisi_sas_queue_command,
J
John Garry 已提交
2058
	.lldd_control_phy	= hisi_sas_control_phy,
J
John Garry 已提交
2059 2060 2061 2062 2063
	.lldd_abort_task	= hisi_sas_abort_task,
	.lldd_abort_task_set	= hisi_sas_abort_task_set,
	.lldd_I_T_nexus_reset	= hisi_sas_I_T_nexus_reset,
	.lldd_lu_reset		= hisi_sas_lu_reset,
	.lldd_query_task	= hisi_sas_query_task,
2064
	.lldd_clear_nexus_ha	= hisi_sas_clear_nexus_ha,
J
John Garry 已提交
2065
	.lldd_port_formed	= hisi_sas_port_formed,
2066
	.lldd_write_gpio	= hisi_sas_write_gpio,
2067
	.lldd_tmf_aborted	= hisi_sas_tmf_aborted,
2068
	.lldd_abort_timeout	= hisi_sas_internal_abort_timeout,
2069 2070
};

2071 2072
void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
{
2073
	int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS;
J
John Garry 已提交
2074
	struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint;
2075 2076 2077 2078

	for (i = 0; i < hisi_hba->queue_count; i++) {
		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
		struct hisi_sas_dq *dq = &hisi_hba->dq[i];
J
John Garry 已提交
2079 2080 2081 2082 2083
		struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i];

		s = sizeof(struct hisi_sas_cmd_hdr);
		for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
			memset(&cmd_hdr[j], 0, s);
2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100

		dq->wr_point = 0;

		s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
		memset(hisi_hba->complete_hdr[i], 0, s);
		cq->rd_point = 0;
	}

	s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
	memset(hisi_hba->initial_fis, 0, s);

	s = max_command_entries * sizeof(struct hisi_sas_iost);
	memset(hisi_hba->iost, 0, s);

	s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
	memset(hisi_hba->breakpoint, 0, s);

J
John Garry 已提交
2101 2102 2103
	s = sizeof(struct hisi_sas_sata_breakpoint);
	for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++)
		memset(&sata_breakpoint[j], 0, s);
2104 2105 2106
}
EXPORT_SYMBOL_GPL(hisi_sas_init_mem);

2107
int hisi_sas_alloc(struct hisi_hba *hisi_hba)
2108
{
2109
	struct device *dev = hisi_hba->dev;
2110
	int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS;
2111 2112
	int max_command_entries_ru, sz_slot_buf_ru;
	int blk_cnt, slots_per_blk;
2113

2114
	sema_init(&hisi_hba->sem, 1);
J
John Garry 已提交
2115
	spin_lock_init(&hisi_hba->lock);
J
John Garry 已提交
2116 2117 2118 2119 2120 2121
	for (i = 0; i < hisi_hba->n_phy; i++) {
		hisi_sas_phy_init(hisi_hba, i);
		hisi_hba->port[i].port_attached = 0;
		hisi_hba->port[i].id = -1;
	}

J
John Garry 已提交
2122 2123 2124
	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
		hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
		hisi_hba->devices[i].device_id = i;
2125
		hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT;
J
John Garry 已提交
2126 2127
	}

2128
	for (i = 0; i < hisi_hba->queue_count; i++) {
2129
		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2130
		struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2131 2132 2133 2134 2135

		/* Completion queue structure */
		cq->id = i;
		cq->hisi_hba = hisi_hba;

2136
		/* Delivery queue structure */
2137
		spin_lock_init(&dq->lock);
2138
		INIT_LIST_HEAD(&dq->list);
2139 2140 2141
		dq->id = i;
		dq->hisi_hba = hisi_hba;

2142 2143
		/* Delivery queue */
		s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
2144 2145 2146
		hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s,
						&hisi_hba->cmd_hdr_dma[i],
						GFP_KERNEL);
2147 2148 2149 2150 2151
		if (!hisi_hba->cmd_hdr[i])
			goto err_out;

		/* Completion queue */
		s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2152 2153 2154
		hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s,
						&hisi_hba->complete_hdr_dma[i],
						GFP_KERNEL);
2155 2156 2157 2158 2159
		if (!hisi_hba->complete_hdr[i])
			goto err_out;
	}

	s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
2160
	hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma,
2161
					     GFP_KERNEL);
2162 2163 2164
	if (!hisi_hba->itct)
		goto err_out;

2165
	hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
2166 2167 2168 2169 2170
					   sizeof(struct hisi_sas_slot),
					   GFP_KERNEL);
	if (!hisi_hba->slot_info)
		goto err_out;

2171 2172
	/* roundup to avoid overly large block size */
	max_command_entries_ru = roundup(max_command_entries, 64);
2173 2174 2175 2176 2177
	if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK)
		sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table);
	else
		sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table);
	sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64);
2178
	s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE);
2179 2180
	blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
	slots_per_blk = s / sz_slot_buf_ru;
2181

2182 2183
	for (i = 0; i < blk_cnt; i++) {
		int slot_index = i * slots_per_blk;
2184 2185
		dma_addr_t buf_dma;
		void *buf;
2186

2187
		buf = dmam_alloc_coherent(dev, s, &buf_dma,
2188
					  GFP_KERNEL);
2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199
		if (!buf)
			goto err_out;

		for (j = 0; j < slots_per_blk; j++, slot_index++) {
			struct hisi_sas_slot *slot;

			slot = &hisi_hba->slot_info[slot_index];
			slot->buf = buf;
			slot->buf_dma = buf_dma;
			slot->idx = slot_index;

2200 2201
			buf += sz_slot_buf_ru;
			buf_dma += sz_slot_buf_ru;
2202 2203 2204
		}
	}

2205
	s = max_command_entries * sizeof(struct hisi_sas_iost);
2206 2207
	hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma,
					     GFP_KERNEL);
2208 2209 2210
	if (!hisi_hba->iost)
		goto err_out;

2211
	s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2212 2213 2214
	hisi_hba->breakpoint = dmam_alloc_coherent(dev, s,
						   &hisi_hba->breakpoint_dma,
						   GFP_KERNEL);
2215 2216 2217
	if (!hisi_hba->breakpoint)
		goto err_out;

2218 2219
	s = hisi_hba->slot_index_count = max_command_entries;
	hisi_hba->slot_index_tags = devm_bitmap_zalloc(dev, s, GFP_KERNEL);
J
John Garry 已提交
2220 2221 2222
	if (!hisi_hba->slot_index_tags)
		goto err_out;

2223
	s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
2224 2225 2226
	hisi_hba->initial_fis = dmam_alloc_coherent(dev, s,
						    &hisi_hba->initial_fis_dma,
						    GFP_KERNEL);
2227 2228 2229
	if (!hisi_hba->initial_fis)
		goto err_out;

2230
	s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
2231 2232 2233
	hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s,
					&hisi_hba->sata_breakpoint_dma,
					GFP_KERNEL);
2234 2235 2236
	if (!hisi_hba->sata_breakpoint)
		goto err_out;

2237
	hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT;
J
John Garry 已提交
2238

J
John Garry 已提交
2239 2240 2241 2242 2243 2244
	hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
	if (!hisi_hba->wq) {
		dev_err(dev, "sas_alloc: failed to create workqueue\n");
		goto err_out;
	}

2245 2246 2247 2248
	return 0;
err_out:
	return -ENOMEM;
}
2249
EXPORT_SYMBOL_GPL(hisi_sas_alloc);
2250

2251
void hisi_sas_free(struct hisi_hba *hisi_hba)
J
John Garry 已提交
2252
{
2253 2254 2255 2256 2257 2258 2259 2260
	int i;

	for (i = 0; i < hisi_hba->n_phy; i++) {
		struct hisi_sas_phy *phy = &hisi_hba->phy[i];

		del_timer_sync(&phy->timer);
	}

J
John Garry 已提交
2261 2262
	if (hisi_hba->wq)
		destroy_workqueue(hisi_hba->wq);
J
John Garry 已提交
2263
}
2264
EXPORT_SYMBOL_GPL(hisi_sas_free);
2265

2266
void hisi_sas_rst_work_handler(struct work_struct *work)
2267 2268 2269 2270
{
	struct hisi_hba *hisi_hba =
		container_of(work, struct hisi_hba, rst_work);

2271 2272 2273
	if (hisi_sas_controller_prereset(hisi_hba))
		return;

2274 2275
	hisi_sas_controller_reset(hisi_hba);
}
2276
EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
2277

2278 2279 2280 2281 2282
void hisi_sas_sync_rst_work_handler(struct work_struct *work)
{
	struct hisi_sas_rst *rst =
		container_of(work, struct hisi_sas_rst, work);

2283 2284 2285
	if (hisi_sas_controller_prereset(rst->hisi_hba))
		goto rst_complete;

2286 2287
	if (!hisi_sas_controller_reset(rst->hisi_hba))
		rst->done = true;
2288
rst_complete:
2289 2290 2291 2292
	complete(rst->completion);
}
EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);

2293
int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
J
John Garry 已提交
2294
{
2295 2296 2297
	struct device *dev = hisi_hba->dev;
	struct platform_device *pdev = hisi_hba->platform_dev;
	struct device_node *np = pdev ? pdev->dev.of_node : NULL;
2298
	struct clk *refclk;
J
John Garry 已提交
2299

2300
	if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
2301 2302 2303 2304
					  SAS_ADDR_SIZE)) {
		dev_err(dev, "could not get property sas-addr\n");
		return -ENOENT;
	}
J
John Garry 已提交
2305

2306
	if (np) {
2307 2308 2309 2310
		/*
		 * These properties are only required for platform device-based
		 * controller with DT firmware.
		 */
2311 2312
		hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
					"hisilicon,sas-syscon");
2313 2314 2315 2316
		if (IS_ERR(hisi_hba->ctrl)) {
			dev_err(dev, "could not get syscon\n");
			return -ENOENT;
		}
J
John Garry 已提交
2317

2318
		if (device_property_read_u32(dev, "ctrl-reset-reg",
2319
					     &hisi_hba->ctrl_reset_reg)) {
X
Xiang Chen 已提交
2320
			dev_err(dev, "could not get property ctrl-reset-reg\n");
2321 2322
			return -ENOENT;
		}
J
John Garry 已提交
2323

2324
		if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
2325
					     &hisi_hba->ctrl_reset_sts_reg)) {
X
Xiang Chen 已提交
2326
			dev_err(dev, "could not get property ctrl-reset-sts-reg\n");
2327 2328
			return -ENOENT;
		}
J
John Garry 已提交
2329

2330
		if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
2331
					     &hisi_hba->ctrl_clock_ena_reg)) {
X
Xiang Chen 已提交
2332
			dev_err(dev, "could not get property ctrl-clock-ena-reg\n");
2333 2334
			return -ENOENT;
		}
2335 2336
	}

2337
	refclk = devm_clk_get(dev, NULL);
2338
	if (IS_ERR(refclk))
2339
		dev_dbg(dev, "no ref clk property\n");
2340 2341 2342
	else
		hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;

2343 2344 2345 2346
	if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
		dev_err(dev, "could not get property phy-count\n");
		return -ENOENT;
	}
J
John Garry 已提交
2347

2348
	if (device_property_read_u32(dev, "queue-count",
2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364
				     &hisi_hba->queue_count)) {
		dev_err(dev, "could not get property queue-count\n");
		return -ENOENT;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);

static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
					      const struct hisi_sas_hw *hw)
{
	struct resource *res;
	struct Scsi_Host *shost;
	struct hisi_hba *hisi_hba;
	struct device *dev = &pdev->dev;
2365
	int error;
2366

2367
	shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380
	if (!shost) {
		dev_err(dev, "scsi host alloc failed\n");
		return NULL;
	}
	hisi_hba = shost_priv(shost);

	INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
	hisi_hba->hw = hw;
	hisi_hba->dev = dev;
	hisi_hba->platform_dev = pdev;
	hisi_hba->shost = shost;
	SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;

2381
	timer_setup(&hisi_hba->timer, NULL, 0);
2382 2383

	if (hisi_sas_get_fw_info(hisi_hba) < 0)
J
John Garry 已提交
2384 2385
		goto err_out;

2386 2387
	error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
	if (error) {
2388 2389 2390 2391
		dev_err(dev, "No usable DMA addressing method\n");
		goto err_out;
	}

2392
	hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0);
J
John Garry 已提交
2393 2394 2395
	if (IS_ERR(hisi_hba->regs))
		goto err_out;

2396 2397 2398 2399 2400 2401 2402
	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	if (res) {
		hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
		if (IS_ERR(hisi_hba->sgpio_regs))
			goto err_out;
	}

2403
	if (hisi_sas_alloc(hisi_hba)) {
J
John Garry 已提交
2404
		hisi_sas_free(hisi_hba);
2405
		goto err_out;
J
John Garry 已提交
2406
	}
2407

J
John Garry 已提交
2408 2409
	return shost;
err_out:
2410
	scsi_host_put(shost);
J
John Garry 已提交
2411 2412 2413 2414
	dev_err(dev, "shost alloc failed\n");
	return NULL;
}

2415 2416 2417 2418 2419 2420 2421
static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba)
{
	if (hisi_hba->hw->interrupt_preinit)
		return hisi_hba->hw->interrupt_preinit(hisi_hba);
	return 0;
}

J
John Garry 已提交
2422
int hisi_sas_probe(struct platform_device *pdev,
2423
		   const struct hisi_sas_hw *hw)
J
John Garry 已提交
2424 2425 2426 2427 2428 2429 2430 2431 2432 2433
{
	struct Scsi_Host *shost;
	struct hisi_hba *hisi_hba;
	struct device *dev = &pdev->dev;
	struct asd_sas_phy **arr_phy;
	struct asd_sas_port **arr_port;
	struct sas_ha_struct *sha;
	int rc, phy_nr, port_nr, i;

	shost = hisi_sas_shost_alloc(pdev, hw);
2434 2435
	if (!shost)
		return -ENOMEM;
J
John Garry 已提交
2436 2437 2438 2439

	sha = SHOST_TO_SAS_HA(shost);
	hisi_hba = shost_priv(shost);
	platform_set_drvdata(pdev, sha);
J
John Garry 已提交
2440

J
John Garry 已提交
2441 2442 2443 2444
	phy_nr = port_nr = hisi_hba->n_phy;

	arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
	arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2445 2446 2447 2448
	if (!arr_phy || !arr_port) {
		rc = -ENOMEM;
		goto err_out_ha;
	}
J
John Garry 已提交
2449 2450 2451 2452 2453 2454 2455 2456 2457 2458

	sha->sas_phy = arr_phy;
	sha->sas_port = arr_port;
	sha->lldd_ha = hisi_hba;

	shost->transportt = hisi_sas_stt;
	shost->max_id = HISI_SAS_MAX_DEVICES;
	shost->max_lun = ~0;
	shost->max_channel = 1;
	shost->max_cmd_len = 16;
2459
	if (hisi_hba->hw->slot_index_alloc) {
2460 2461
		shost->can_queue = HISI_SAS_MAX_COMMANDS;
		shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS;
2462
	} else {
2463 2464
		shost->can_queue = HISI_SAS_UNRESERVED_IPTT;
		shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT;
2465
	}
J
John Garry 已提交
2466 2467

	sha->sas_ha_name = DRV_NAME;
2468
	sha->dev = hisi_hba->dev;
J
John Garry 已提交
2469 2470 2471 2472 2473 2474 2475 2476 2477 2478
	sha->lldd_module = THIS_MODULE;
	sha->sas_addr = &hisi_hba->sas_addr[0];
	sha->num_phys = hisi_hba->n_phy;
	sha->core.shost = hisi_hba->shost;

	for (i = 0; i < hisi_hba->n_phy; i++) {
		sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
		sha->sas_port[i] = &hisi_hba->port[i].sas_port;
	}

2479 2480 2481 2482
	rc = hisi_sas_interrupt_preinit(hisi_hba);
	if (rc)
		goto err_out_ha;

J
John Garry 已提交
2483 2484 2485 2486 2487 2488 2489 2490
	rc = scsi_add_host(shost, &pdev->dev);
	if (rc)
		goto err_out_ha;

	rc = sas_register_ha(sha);
	if (rc)
		goto err_out_register_ha;

2491 2492
	rc = hisi_hba->hw->hw_init(hisi_hba);
	if (rc)
2493
		goto err_out_hw_init;
2494

J
John Garry 已提交
2495 2496 2497 2498
	scsi_scan_host(shost);

	return 0;

2499 2500
err_out_hw_init:
	sas_unregister_ha(sha);
J
John Garry 已提交
2501 2502 2503
err_out_register_ha:
	scsi_remove_host(shost);
err_out_ha:
2504
	hisi_sas_free(hisi_hba);
2505
	scsi_host_put(shost);
J
John Garry 已提交
2506 2507 2508 2509
	return rc;
}
EXPORT_SYMBOL_GPL(hisi_sas_probe);

J
John Garry 已提交
2510
int hisi_sas_remove(struct platform_device *pdev)
2511
{
J
John Garry 已提交
2512 2513
	struct sas_ha_struct *sha = platform_get_drvdata(pdev);
	struct hisi_hba *hisi_hba = sha->lldd_ha;
2514
	struct Scsi_Host *shost = sha->core.shost;
2515

2516
	del_timer_sync(&hisi_hba->timer);
2517

J
John Garry 已提交
2518 2519
	sas_unregister_ha(sha);
	sas_remove_host(sha->core.shost);
2520

J
John Garry 已提交
2521
	hisi_sas_free(hisi_hba);
2522
	scsi_host_put(shost);
2523 2524
	return 0;
}
J
John Garry 已提交
2525
EXPORT_SYMBOL_GPL(hisi_sas_remove);
2526

2527 2528 2529 2530 2531 2532
#if IS_ENABLED(CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE)
#define DEBUGFS_ENABLE_DEFAULT  "enabled"
bool hisi_sas_debugfs_enable = true;
u32 hisi_sas_debugfs_dump_count = 50;
#else
#define DEBUGFS_ENABLE_DEFAULT "disabled"
2533
bool hisi_sas_debugfs_enable;
2534 2535 2536
u32 hisi_sas_debugfs_dump_count = 1;
#endif

2537 2538
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable);
module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444);
2539 2540
MODULE_PARM_DESC(hisi_sas_debugfs_enable,
		 "Enable driver debugfs (default "DEBUGFS_ENABLE_DEFAULT")");
2541

2542 2543 2544
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count);
module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444);
MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow");
2545

2546 2547
struct dentry *hisi_sas_debugfs_dir;
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dir);
2548

2549 2550 2551 2552 2553 2554
static __init int hisi_sas_init(void)
{
	hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
	if (!hisi_sas_stt)
		return -ENOMEM;

2555
	if (hisi_sas_debugfs_enable) {
2556
		hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL);
2557 2558 2559 2560 2561
		if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) {
			pr_info("hisi_sas: Limiting debugfs dump count\n");
			hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP;
		}
	}
2562

2563 2564 2565 2566 2567 2568
	return 0;
}

static __exit void hisi_sas_exit(void)
{
	sas_release_transport(hisi_sas_stt);
2569 2570

	debugfs_remove(hisi_sas_debugfs_dir);
2571 2572 2573 2574 2575 2576 2577 2578 2579
}

module_init(hisi_sas_init);
module_exit(hisi_sas_exit);

MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
MODULE_DESCRIPTION("HISILICON SAS controller driver");
MODULE_ALIAS("platform:" DRV_NAME);