hisi_sas_main.c 71.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
2 3 4 5 6 7 8 9
/*
 * Copyright (c) 2015 Linaro Ltd.
 * Copyright (c) 2015 Hisilicon Limited.
 */

#include "hisi_sas.h"
#define DRV_NAME "hisi_sas"

J
John Garry 已提交
10 11 12
#define DEV_IS_GONE(dev) \
	((!dev) || (dev->dev_type == SAS_PHY_UNUSED))

J
John Garry 已提交
13 14
static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
				u8 *lun, struct hisi_sas_tmf_task *tmf);
15 16 17 18
static int
hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
			     struct domain_device *device,
			     int abort_flag, int tag);
19
static int hisi_sas_softreset_ata_disk(struct domain_device *device);
20 21
static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
				void *funcdata);
22 23 24
static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
				  struct domain_device *device);
static void hisi_sas_dev_gone(struct domain_device *device);
J
John Garry 已提交
25

26
u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
27
{
28
	switch (fis->command) {
29 30 31 32 33
	case ATA_CMD_FPDMA_WRITE:
	case ATA_CMD_FPDMA_READ:
	case ATA_CMD_FPDMA_RECV:
	case ATA_CMD_FPDMA_SEND:
	case ATA_CMD_NCQ_NON_DATA:
34
		return HISI_SAS_SATA_PROTOCOL_FPDMA;
35 36 37 38 39 40 41 42 43 44 45

	case ATA_CMD_DOWNLOAD_MICRO:
	case ATA_CMD_ID_ATA:
	case ATA_CMD_PMP_READ:
	case ATA_CMD_READ_LOG_EXT:
	case ATA_CMD_PIO_READ:
	case ATA_CMD_PIO_READ_EXT:
	case ATA_CMD_PMP_WRITE:
	case ATA_CMD_WRITE_LOG_EXT:
	case ATA_CMD_PIO_WRITE:
	case ATA_CMD_PIO_WRITE_EXT:
46
		return HISI_SAS_SATA_PROTOCOL_PIO;
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63

	case ATA_CMD_DSM:
	case ATA_CMD_DOWNLOAD_MICRO_DMA:
	case ATA_CMD_PMP_READ_DMA:
	case ATA_CMD_PMP_WRITE_DMA:
	case ATA_CMD_READ:
	case ATA_CMD_READ_EXT:
	case ATA_CMD_READ_LOG_DMA_EXT:
	case ATA_CMD_READ_STREAM_DMA_EXT:
	case ATA_CMD_TRUSTED_RCV_DMA:
	case ATA_CMD_TRUSTED_SND_DMA:
	case ATA_CMD_WRITE:
	case ATA_CMD_WRITE_EXT:
	case ATA_CMD_WRITE_FUA_EXT:
	case ATA_CMD_WRITE_QUEUED:
	case ATA_CMD_WRITE_LOG_DMA_EXT:
	case ATA_CMD_WRITE_STREAM_DMA_EXT:
64
	case ATA_CMD_ZAC_MGMT_IN:
65
		return HISI_SAS_SATA_PROTOCOL_DMA;
66 67 68 69 70 71 72 73 74 75 76

	case ATA_CMD_CHK_POWER:
	case ATA_CMD_DEV_RESET:
	case ATA_CMD_EDD:
	case ATA_CMD_FLUSH:
	case ATA_CMD_FLUSH_EXT:
	case ATA_CMD_VERIFY:
	case ATA_CMD_VERIFY_EXT:
	case ATA_CMD_SET_FEATURES:
	case ATA_CMD_STANDBY:
	case ATA_CMD_STANDBYNOW1:
77
	case ATA_CMD_ZAC_MGMT_OUT:
78
		return HISI_SAS_SATA_PROTOCOL_NONDATA;
79

80 81 82 83 84
	case ATA_CMD_SET_MAX:
		switch (fis->features) {
		case ATA_SET_MAX_PASSWD:
		case ATA_SET_MAX_LOCK:
			return HISI_SAS_SATA_PROTOCOL_PIO;
85

86 87 88 89 90 91
		case ATA_SET_MAX_PASSWD_DMA:
		case ATA_SET_MAX_UNLOCK_DMA:
			return HISI_SAS_SATA_PROTOCOL_DMA;

		default:
			return HISI_SAS_SATA_PROTOCOL_NONDATA;
92
		}
93 94 95

	default:
	{
96 97 98 99
		if (direction == DMA_NONE)
			return HISI_SAS_SATA_PROTOCOL_NONDATA;
		return HISI_SAS_SATA_PROTOCOL_PIO;
	}
100
	}
101 102 103
}
EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);

104 105 106 107 108
void hisi_sas_sata_done(struct sas_task *task,
			    struct hisi_sas_slot *slot)
{
	struct task_status_struct *ts = &task->task_status;
	struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
109 110 111 112
	struct hisi_sas_status_buffer *status_buf =
			hisi_sas_status_buf_addr_mem(slot);
	u8 *iu = &status_buf->iu[0];
	struct dev_to_host_fis *d2h =  (struct dev_to_host_fis *)iu;
113 114 115 116 117 118 119 120

	resp->frame_len = sizeof(struct dev_to_host_fis);
	memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));

	ts->buf_valid_size = sizeof(*resp);
}
EXPORT_SYMBOL_GPL(hisi_sas_sata_done);

121 122 123 124 125 126
/*
 * This function assumes linkrate mask fits in 8 bits, which it
 * does for all HW versions supported.
 */
u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
{
127
	u8 rate = 0;
128 129 130 131 132 133 134 135 136
	int i;

	max -= SAS_LINK_RATE_1_5_GBPS;
	for (i = 0; i <= max; i++)
		rate |= 1 << (i * 2);
	return rate;
}
EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);

J
John Garry 已提交
137 138 139 140 141
static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
{
	return device->port->ha->lldd_ha;
}

142 143 144 145 146 147
struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
{
	return container_of(sas_port, struct hisi_sas_port, sas_port);
}
EXPORT_SYMBOL_GPL(to_hisi_sas_port);

148 149 150 151 152
void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
{
	int phy_no;

	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
153
		hisi_sas_phy_enable(hisi_hba, phy_no, 0);
154 155 156
}
EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);

J
John Garry 已提交
157 158 159 160 161 162 163
static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
{
	void *bitmap = hisi_hba->slot_index_tags;

	clear_bit(slot_idx, bitmap);
}

J
John Garry 已提交
164 165
static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
{
166 167
	if (hisi_hba->hw->slot_index_alloc ||
	    slot_idx >= HISI_SAS_UNRESERVED_IPTT) {
168
		spin_lock(&hisi_hba->lock);
169
		hisi_sas_slot_index_clear(hisi_hba, slot_idx);
170
		spin_unlock(&hisi_hba->lock);
171
	}
J
John Garry 已提交
172 173 174 175 176 177 178 179 180
}

static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
{
	void *bitmap = hisi_hba->slot_index_tags;

	set_bit(slot_idx, bitmap);
}

181 182
static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
				     struct scsi_cmnd *scsi_cmnd)
J
John Garry 已提交
183
{
184
	int index;
J
John Garry 已提交
185 186
	void *bitmap = hisi_hba->slot_index_tags;

187 188 189
	if (scsi_cmnd)
		return scsi_cmnd->request->tag;

190
	spin_lock(&hisi_hba->lock);
191
	index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
192
				   hisi_hba->last_slot_index + 1);
193
	if (index >= hisi_hba->slot_index_count) {
194 195
		index = find_next_zero_bit(bitmap,
				hisi_hba->slot_index_count,
196
				HISI_SAS_UNRESERVED_IPTT);
197
		if (index >= hisi_hba->slot_index_count) {
198
			spin_unlock(&hisi_hba->lock);
199
			return -SAS_QUEUE_FULL;
200
		}
201
	}
J
John Garry 已提交
202
	hisi_sas_slot_index_set(hisi_hba, index);
203
	hisi_hba->last_slot_index = index;
204
	spin_unlock(&hisi_hba->lock);
205

206
	return index;
J
John Garry 已提交
207 208
}

J
John Garry 已提交
209 210 211 212 213 214 215
static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
{
	int i;

	for (i = 0; i < hisi_hba->slot_index_count; ++i)
		hisi_sas_slot_index_clear(hisi_hba, i);
}
J
John Garry 已提交
216 217 218 219

void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
			     struct hisi_sas_slot *slot)
{
220 221
	int device_id = slot->device_id;
	struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id];
J
John Garry 已提交
222

223
	if (task) {
224
		struct device *dev = hisi_hba->dev;
J
John Garry 已提交
225

226 227 228 229 230
		if (!task->lldd_task)
			return;

		task->lldd_task = NULL;

231
		if (!sas_protocol_ata(task->task_proto)) {
232
			if (slot->n_elem)
233 234
				dma_unmap_sg(dev, task->scatter,
					     task->num_scatter,
235
					     task->data_dir);
236 237 238 239
			if (slot->n_elem_dif) {
				struct sas_ssp_task *ssp_task = &task->ssp_task;
				struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;

240 241 242
				dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
					     scsi_prot_sg_count(scsi_cmnd),
					     task->data_dir);
243
			}
244
		}
245
	}
J
John Garry 已提交
246

247
	spin_lock(&sas_dev->lock);
J
John Garry 已提交
248
	list_del_init(&slot->entry);
249
	spin_unlock(&sas_dev->lock);
250 251 252

	memset(slot, 0, offsetof(struct hisi_sas_slot, buf));

J
John Garry 已提交
253 254 255 256
	hisi_sas_slot_index_free(hisi_hba, slot->idx);
}
EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);

257
static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
J
John Garry 已提交
258 259
				  struct hisi_sas_slot *slot)
{
260
	hisi_hba->hw->prep_smp(hisi_hba, slot);
J
John Garry 已提交
261 262
}

263
static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
264
				  struct hisi_sas_slot *slot)
J
John Garry 已提交
265
{
266
	hisi_hba->hw->prep_ssp(hisi_hba, slot);
J
John Garry 已提交
267 268
}

269
static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
270 271
				  struct hisi_sas_slot *slot)
{
272
	hisi_hba->hw->prep_stp(hisi_hba, slot);
273 274
}

275
static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
276 277 278
		struct hisi_sas_slot *slot,
		int device_id, int abort_flag, int tag_to_abort)
{
279
	hisi_hba->hw->prep_abort(hisi_hba, slot,
280 281 282
			device_id, abort_flag, tag_to_abort);
}

283 284
static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
			       struct sas_task *task, int n_elem,
285
			       int n_elem_req)
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
{
	struct device *dev = hisi_hba->dev;

	if (!sas_protocol_ata(task->task_proto)) {
		if (task->num_scatter) {
			if (n_elem)
				dma_unmap_sg(dev, task->scatter,
					     task->num_scatter,
					     task->data_dir);
		} else if (task->task_proto & SAS_PROTOCOL_SMP) {
			if (n_elem_req)
				dma_unmap_sg(dev, &task->smp_task.smp_req,
					     1, DMA_TO_DEVICE);
		}
	}
}

static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
			    struct sas_task *task, int *n_elem,
305
			    int *n_elem_req)
306 307 308 309 310 311 312
{
	struct device *dev = hisi_hba->dev;
	int rc;

	if (sas_protocol_ata(task->task_proto)) {
		*n_elem = task->num_scatter;
	} else {
313
		unsigned int req_len;
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337

		if (task->num_scatter) {
			*n_elem = dma_map_sg(dev, task->scatter,
					     task->num_scatter, task->data_dir);
			if (!*n_elem) {
				rc = -ENOMEM;
				goto prep_out;
			}
		} else if (task->task_proto & SAS_PROTOCOL_SMP) {
			*n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
						 1, DMA_TO_DEVICE);
			if (!*n_elem_req) {
				rc = -ENOMEM;
				goto prep_out;
			}
			req_len = sg_dma_len(&task->smp_task.smp_req);
			if (req_len & 0x3) {
				rc = -EINVAL;
				goto err_out_dma_unmap;
			}
		}
	}

	if (*n_elem > HISI_SAS_SGE_PAGE_CNT) {
338
		dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT\n",
339 340 341 342 343 344 345 346 347
			*n_elem);
		rc = -EINVAL;
		goto err_out_dma_unmap;
	}
	return 0;

err_out_dma_unmap:
	/* It would be better to call dma_unmap_sg() here, but it's messy */
	hisi_sas_dma_unmap(hisi_hba, task, *n_elem,
348
			   *n_elem_req);
349 350 351 352
prep_out:
	return rc;
}

353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba,
				   struct sas_task *task, int n_elem_dif)
{
	struct device *dev = hisi_hba->dev;

	if (n_elem_dif) {
		struct sas_ssp_task *ssp_task = &task->ssp_task;
		struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;

		dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
			     scsi_prot_sg_count(scsi_cmnd),
			     task->data_dir);
	}
}

static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba,
				int *n_elem_dif, struct sas_task *task)
{
	struct device *dev = hisi_hba->dev;
	struct sas_ssp_task *ssp_task;
	struct scsi_cmnd *scsi_cmnd;
	int rc;

	if (task->num_scatter) {
		ssp_task = &task->ssp_task;
		scsi_cmnd = ssp_task->cmd;

		if (scsi_prot_sg_count(scsi_cmnd)) {
			*n_elem_dif = dma_map_sg(dev,
						 scsi_prot_sglist(scsi_cmnd),
						 scsi_prot_sg_count(scsi_cmnd),
						 task->data_dir);

			if (!*n_elem_dif)
				return -ENOMEM;

			if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) {
				dev_err(dev, "task prep: n_elem_dif(%d) too large\n",
					*n_elem_dif);
				rc = -EINVAL;
				goto err_out_dif_dma_unmap;
			}
		}
	}

	return 0;

err_out_dif_dma_unmap:
	dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
		     scsi_prot_sg_count(scsi_cmnd), task->data_dir);
	return rc;
}

406 407
static int hisi_sas_task_prep(struct sas_task *task,
			      struct hisi_sas_dq **dq_pointer,
408
			      bool is_tmf, struct hisi_sas_tmf_task *tmf,
409
			      int *pass)
J
John Garry 已提交
410 411
{
	struct domain_device *device = task->dev;
412
	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
J
John Garry 已提交
413 414 415 416
	struct hisi_sas_device *sas_dev = device->lldd_dev;
	struct hisi_sas_port *port;
	struct hisi_sas_slot *slot;
	struct hisi_sas_cmd_hdr	*cmd_hdr_base;
417
	struct asd_sas_port *sas_port = device->port;
418
	struct device *dev = hisi_hba->dev;
X
Xiang Chen 已提交
419
	int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
420
	int n_elem = 0, n_elem_dif = 0, n_elem_req = 0;
J
John Garry 已提交
421
	struct scsi_cmnd *scmd = NULL;
422
	struct hisi_sas_dq *dq;
423
	unsigned long flags;
424
	int wr_q_index;
J
John Garry 已提交
425 426 427

	if (DEV_IS_GONE(sas_dev)) {
		if (sas_dev)
428
			dev_info(dev, "task prep: device %d not ready\n",
J
John Garry 已提交
429 430 431 432 433
				 sas_dev->device_id);
		else
			dev_info(dev, "task prep: device %016llx not ready\n",
				 SAS_ADDR(device->sas_addr));

434
		return -ECOMM;
J
John Garry 已提交
435
	}
436

J
John Garry 已提交
437 438
	if (task->uldd_task) {
		struct ata_queued_cmd *qc;
439

J
John Garry 已提交
440 441 442 443 444 445 446 447 448 449 450 451 452 453
		if (dev_is_sata(device)) {
			qc = task->uldd_task;
			scmd = qc->scsicmd;
		} else {
			scmd = task->uldd_task;
		}
	}

	if (scmd) {
		unsigned int dq_index;
		u32 blk_tag;

		blk_tag = blk_mq_unique_tag(scmd->request);
		dq_index = blk_mq_unique_tag_to_hwq(blk_tag);
454 455 456 457
		*dq_pointer = dq = &hisi_hba->dq[dq_index];
	} else {
		*dq_pointer = dq = sas_dev->dq;
	}
458

459
	port = to_hisi_sas_port(sas_port);
460
	if (port && !port->port_attached) {
461
		dev_info(dev, "task prep: %s port%d not attach device\n",
462
			 (dev_is_sata(device)) ?
463 464 465
			 "SATA/STP" : "SAS",
			 device->port->id);

466
		return -ECOMM;
J
John Garry 已提交
467 468
	}

469
	rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
470
			      &n_elem_req);
471 472
	if (rc < 0)
		goto prep_out;
473

474 475 476 477 478 479
	if (!sas_protocol_ata(task->task_proto)) {
		rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task);
		if (rc < 0)
			goto err_out_dma_unmap;
	}

480
	if (hisi_hba->hw->slot_index_alloc)
481
		rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
J
John Garry 已提交
482 483
	else
		rc = hisi_sas_slot_index_alloc(hisi_hba, scmd);
484 485

	if (rc < 0)
486
		goto err_out_dif_dma_unmap;
487

488
	slot_idx = rc;
489 490
	slot = &hisi_hba->slot_info[slot_idx];

491
	spin_lock(&dq->lock);
492 493
	wr_q_index = dq->wr_point;
	dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
494
	list_add_tail(&slot->delivery, &dq->list);
495 496
	spin_unlock(&dq->lock);
	spin_lock(&sas_dev->lock);
497
	list_add_tail(&slot->entry, &sas_dev->list);
498
	spin_unlock(&sas_dev->lock);
J
John Garry 已提交
499

500
	dlvry_queue = dq->id;
501
	dlvry_queue_slot = wr_q_index;
J
John Garry 已提交
502

503
	slot->device_id = sas_dev->device_id;
J
John Garry 已提交
504
	slot->n_elem = n_elem;
505
	slot->n_elem_dif = n_elem_dif;
J
John Garry 已提交
506 507 508 509 510 511
	slot->dlvry_queue = dlvry_queue;
	slot->dlvry_queue_slot = dlvry_queue_slot;
	cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
	slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
	slot->task = task;
	slot->port = port;
512 513
	slot->tmf = tmf;
	slot->is_internal = is_tmf;
J
John Garry 已提交
514 515 516
	task->lldd_task = slot;

	memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
517
	memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
518 519
	memset(hisi_sas_status_buf_addr_mem(slot), 0,
	       sizeof(struct hisi_sas_err_record));
J
John Garry 已提交
520 521

	switch (task->task_proto) {
J
John Garry 已提交
522
	case SAS_PROTOCOL_SMP:
523
		hisi_sas_task_prep_smp(hisi_hba, slot);
J
John Garry 已提交
524
		break;
J
John Garry 已提交
525
	case SAS_PROTOCOL_SSP:
526
		hisi_sas_task_prep_ssp(hisi_hba, slot);
J
John Garry 已提交
527 528 529 530
		break;
	case SAS_PROTOCOL_SATA:
	case SAS_PROTOCOL_STP:
	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
531
		hisi_sas_task_prep_ata(hisi_hba, slot);
532
		break;
J
John Garry 已提交
533 534 535 536 537 538
	default:
		dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
			task->task_proto);
		break;
	}

539
	spin_lock_irqsave(&task->task_state_lock, flags);
J
John Garry 已提交
540
	task->task_state_flags |= SAS_TASK_AT_INITIATOR;
541
	spin_unlock_irqrestore(&task->task_state_lock, flags);
J
John Garry 已提交
542 543

	++(*pass);
544
	WRITE_ONCE(slot->ready, 1);
J
John Garry 已提交
545

546
	return 0;
J
John Garry 已提交
547

548 549 550
err_out_dif_dma_unmap:
	if (!sas_protocol_ata(task->task_proto))
		hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
X
Xiang Chen 已提交
551
err_out_dma_unmap:
552
	hisi_sas_dma_unmap(hisi_hba, task, n_elem,
553
			   n_elem_req);
J
John Garry 已提交
554
prep_out:
X
Xiang Chen 已提交
555
	dev_err(dev, "task prep: failed[%d]!\n", rc);
J
John Garry 已提交
556 557 558 559
	return rc;
}

static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
560
			      bool is_tmf, struct hisi_sas_tmf_task *tmf)
J
John Garry 已提交
561 562 563
{
	u32 rc;
	u32 pass = 0;
564 565 566 567
	struct hisi_hba *hisi_hba;
	struct device *dev;
	struct domain_device *device = task->dev;
	struct asd_sas_port *sas_port = device->port;
568
	struct hisi_sas_dq *dq = NULL;
J
John Garry 已提交
569

570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
	if (!sas_port) {
		struct task_status_struct *ts = &task->task_status;

		ts->resp = SAS_TASK_UNDELIVERED;
		ts->stat = SAS_PHY_DOWN;
		/*
		 * libsas will use dev->port, should
		 * not call task_done for sata
		 */
		if (device->dev_type != SAS_SATA_DEV)
			task->task_done(task);
		return -ECOMM;
	}

	hisi_hba = dev_to_hisi_hba(device);
	dev = hisi_hba->dev;

587
	if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
588 589 590 591 592 593 594
		/*
		 * For IOs from upper layer, it may already disable preempt
		 * in the IO path, if disable preempt again in down(),
		 * function schedule() will report schedule_bug(), so check
		 * preemptible() before goto down().
		 */
		if (!preemptible())
595 596 597 598 599
			return -EINVAL;

		down(&hisi_hba->sem);
		up(&hisi_hba->sem);
	}
600

J
John Garry 已提交
601
	/* protect task_prep and start_delivery sequence */
602
	rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
J
John Garry 已提交
603 604 605
	if (rc)
		dev_err(dev, "task exec: failed[%d]!\n", rc);

606
	if (likely(pass)) {
607
		spin_lock(&dq->lock);
608
		hisi_hba->hw->start_delivery(dq);
609
		spin_unlock(&dq->lock);
610
	}
J
John Garry 已提交
611 612 613

	return rc;
}
J
John Garry 已提交
614

615 616 617 618 619 620 621 622 623
static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
{
	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
	struct asd_sas_phy *sas_phy = &phy->sas_phy;
	struct sas_ha_struct *sas_ha;

	if (!phy->phy_attached)
		return;

624 625 626 627 628 629
	if (test_bit(HISI_SAS_PM_BIT, &hisi_hba->flags) &&
	    !sas_phy->suspended) {
		dev_warn(hisi_hba->dev, "phy%d during suspend filtered out\n", phy_no);
		return;
	}

630 631 632 633 634 635 636 637
	sas_ha = &hisi_hba->sha;
	sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);

	if (sas_phy->phy) {
		struct sas_phy *sphy = sas_phy->phy;

		sphy->negotiated_linkrate = sas_phy->linkrate;
		sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
638 639 640 641 642 643 644
		sphy->maximum_linkrate_hw =
			hisi_hba->hw->phy_get_max_linkrate();
		if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
			sphy->minimum_linkrate = phy->minimum_linkrate;

		if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
			sphy->maximum_linkrate = phy->maximum_linkrate;
645 646 647 648 649 650 651 652 653 654
	}

	if (phy->phy_type & PORT_TYPE_SAS) {
		struct sas_identify_frame *id;

		id = (struct sas_identify_frame *)phy->frame_rcvd;
		id->dev_type = phy->identify.device_type;
		id->initiator_bits = SAS_PROTOCOL_ALL;
		id->target_bits = phy->identify.target_port_protocols;
	} else if (phy->phy_type & PORT_TYPE_SATA) {
X
Xiang Chen 已提交
655
		/* Nothing */
656 657 658 659 660 661
	}

	sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
	sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
}

J
John Garry 已提交
662 663 664 665
static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
{
	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
	struct hisi_sas_device *sas_dev = NULL;
666 667
	int last = hisi_hba->last_dev_id;
	int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
J
John Garry 已提交
668 669
	int i;

670
	spin_lock(&hisi_hba->lock);
671
	for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
J
John Garry 已提交
672
		if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
673 674 675
			int queue = i % hisi_hba->queue_count;
			struct hisi_sas_dq *dq = &hisi_hba->dq[queue];

J
John Garry 已提交
676 677
			hisi_hba->devices[i].device_id = i;
			sas_dev = &hisi_hba->devices[i];
678
			sas_dev->dev_status = HISI_SAS_DEV_INIT;
J
John Garry 已提交
679 680 681
			sas_dev->dev_type = device->dev_type;
			sas_dev->hisi_hba = hisi_hba;
			sas_dev->sas_device = device;
682
			sas_dev->dq = dq;
683
			spin_lock_init(&sas_dev->lock);
684
			INIT_LIST_HEAD(&hisi_hba->devices[i].list);
J
John Garry 已提交
685 686
			break;
		}
687
		i++;
J
John Garry 已提交
688
	}
689
	hisi_hba->last_dev_id = i;
690
	spin_unlock(&hisi_hba->lock);
J
John Garry 已提交
691 692 693 694

	return sas_dev;
}

695
#define HISI_SAS_DISK_RECOVER_CNT 3
696 697 698 699 700
static int hisi_sas_init_device(struct domain_device *device)
{
	int rc = TMF_RESP_FUNC_COMPLETE;
	struct scsi_lun lun;
	struct hisi_sas_tmf_task tmf_task;
701
	int retry = HISI_SAS_DISK_RECOVER_CNT;
702
	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
703 704
	struct device *dev = hisi_hba->dev;
	struct sas_phy *local_phy;
705 706 707 708 709 710

	switch (device->dev_type) {
	case SAS_END_DEVICE:
		int_to_scsilun(0, &lun);

		tmf_task.tmf = TMF_CLEAR_TASK_SET;
711 712 713 714 715 716 717 718
		while (retry-- > 0) {
			rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun,
							  &tmf_task);
			if (rc == TMF_RESP_FUNC_COMPLETE) {
				hisi_sas_release_task(hisi_hba, device);
				break;
			}
		}
719 720 721 722 723
		break;
	case SAS_SATA_DEV:
	case SAS_SATA_PM:
	case SAS_SATA_PM_PORT:
	case SAS_SATA_PENDING:
724 725 726 727 728
		/*
		 * send HARD RESET to clear previous affiliation of
		 * STP target port
		 */
		local_phy = sas_get_local_phy(device);
729 730
		if (!scsi_is_sas_phy_local(local_phy) &&
		    !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
731 732 733 734 735 736 737 738 739 740 741 742 743 744
			unsigned long deadline = ata_deadline(jiffies, 20000);
			struct sata_device *sata_dev = &device->sata_dev;
			struct ata_host *ata_host = sata_dev->ata_host;
			struct ata_port_operations *ops = ata_host->ops;
			struct ata_port *ap = sata_dev->ap;
			struct ata_link *link;
			unsigned int classes;

			ata_for_each_link(link, ap, EDGE)
				rc = ops->hardreset(link, &classes,
						    deadline);
		}
		sas_put_local_phy(local_phy);
		if (rc) {
745
			dev_warn(dev, "SATA disk hardreset fail: %d\n", rc);
746 747 748
			return rc;
		}

749 750 751 752 753 754 755 756 757 758 759 760 761
		while (retry-- > 0) {
			rc = hisi_sas_softreset_ata_disk(device);
			if (!rc)
				break;
		}
		break;
	default:
		break;
	}

	return rc;
}

J
John Garry 已提交
762 763 764 765 766
static int hisi_sas_dev_found(struct domain_device *device)
{
	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
	struct domain_device *parent_dev = device->parent;
	struct hisi_sas_device *sas_dev;
767
	struct device *dev = hisi_hba->dev;
768
	int rc;
J
John Garry 已提交
769

770 771 772 773
	if (hisi_hba->hw->alloc_dev)
		sas_dev = hisi_hba->hw->alloc_dev(device);
	else
		sas_dev = hisi_sas_alloc_dev(device);
J
John Garry 已提交
774 775 776 777 778 779 780 781 782
	if (!sas_dev) {
		dev_err(dev, "fail alloc dev: max support %d devices\n",
			HISI_SAS_MAX_DEVICES);
		return -EINVAL;
	}

	device->lldd_dev = sas_dev;
	hisi_hba->hw->setup_itct(hisi_hba, sas_dev);

783
	if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
J
John Garry 已提交
784 785 786 787 788 789 790
		int phy_no;
		u8 phy_num = parent_dev->ex_dev.num_phys;
		struct ex_phy *phy;

		for (phy_no = 0; phy_no < phy_num; phy_no++) {
			phy = &parent_dev->ex_dev.ex_phy[phy_no];
			if (SAS_ADDR(phy->attached_sas_addr) ==
791
				SAS_ADDR(device->sas_addr))
J
John Garry 已提交
792 793 794 795 796 797 798 799
				break;
		}

		if (phy_no == phy_num) {
			dev_info(dev, "dev found: no attached "
				 "dev:%016llx at ex:%016llx\n",
				 SAS_ADDR(device->sas_addr),
				 SAS_ADDR(parent_dev->sas_addr));
800 801
			rc = -EINVAL;
			goto err_out;
J
John Garry 已提交
802 803 804
		}
	}

805 806 807
	dev_info(dev, "dev[%d:%x] found\n",
		sas_dev->device_id, sas_dev->dev_type);

808 809 810
	rc = hisi_sas_init_device(device);
	if (rc)
		goto err_out;
811
	sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
J
John Garry 已提交
812
	return 0;
813 814 815 816

err_out:
	hisi_sas_dev_gone(device);
	return rc;
J
John Garry 已提交
817 818
}

819
int hisi_sas_slave_configure(struct scsi_device *sdev)
820 821 822 823 824 825 826 827 828 829 830
{
	struct domain_device *dev = sdev_to_domain_dev(sdev);
	int ret = sas_slave_configure(sdev);

	if (ret)
		return ret;
	if (!dev_is_sata(dev))
		sas_change_queue_depth(sdev, 64);

	return 0;
}
831
EXPORT_SYMBOL_GPL(hisi_sas_slave_configure);
832

833
void hisi_sas_scan_start(struct Scsi_Host *shost)
J
John Garry 已提交
834 835 836
{
	struct hisi_hba *hisi_hba = shost_priv(shost);

837
	hisi_hba->hw->phys_init(hisi_hba);
J
John Garry 已提交
838
}
839
EXPORT_SYMBOL_GPL(hisi_sas_scan_start);
J
John Garry 已提交
840

841
int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
J
John Garry 已提交
842 843 844 845
{
	struct hisi_hba *hisi_hba = shost_priv(shost);
	struct sas_ha_struct *sha = &hisi_hba->sha;

846 847
	/* Wait for PHY up interrupt to occur */
	if (time < HZ)
J
John Garry 已提交
848 849 850 851 852
		return 0;

	sas_drain_work(sha);
	return 1;
}
853
EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
J
John Garry 已提交
854

855 856 857
static void hisi_sas_phyup_work(struct work_struct *work)
{
	struct hisi_sas_phy *phy =
858
		container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
859 860 861 862
	struct hisi_hba *hisi_hba = phy->hisi_hba;
	struct asd_sas_phy *sas_phy = &phy->sas_phy;
	int phy_no = sas_phy->id;

863 864
	if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP)
		hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no);
865 866
	hisi_sas_bytes_dmaed(hisi_hba, phy_no);
}
J
John Garry 已提交
867

868 869 870 871 872 873 874 875 876
static void hisi_sas_linkreset_work(struct work_struct *work)
{
	struct hisi_sas_phy *phy =
		container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
	struct asd_sas_phy *sas_phy = &phy->sas_phy;

	hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
}

877 878
static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
	[HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
879
	[HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
880 881 882 883 884 885 886 887 888 889 890 891 892 893
};

bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
				enum hisi_sas_phy_event event)
{
	struct hisi_hba *hisi_hba = phy->hisi_hba;

	if (WARN_ON(event >= HISI_PHYES_NUM))
		return false;

	return queue_work(hisi_hba->wq, &phy->works[event]);
}
EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);

894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909
static void hisi_sas_wait_phyup_timedout(struct timer_list *t)
{
	struct hisi_sas_phy *phy = from_timer(phy, t, timer);
	struct hisi_hba *hisi_hba = phy->hisi_hba;
	struct device *dev = hisi_hba->dev;
	int phy_no = phy->sas_phy.id;

	dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no);
	hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
}

void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
{
	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
	struct device *dev = hisi_hba->dev;

910 911 912 913
	dev_dbg(dev, "phy%d OOB ready\n", phy_no);
	if (phy->phy_attached)
		return;

914 915 916 917 918 919 920
	if (!timer_pending(&phy->timer)) {
		phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT * HZ;
		add_timer(&phy->timer);
	}
}
EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready);

J
John Garry 已提交
921 922 923 924
static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
{
	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
	struct asd_sas_phy *sas_phy = &phy->sas_phy;
925
	int i;
J
John Garry 已提交
926 927 928

	phy->hisi_hba = hisi_hba;
	phy->port = NULL;
929 930
	phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
	phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
J
John Garry 已提交
931 932 933 934 935 936 937 938 939 940 941 942 943
	sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
	sas_phy->class = SAS;
	sas_phy->iproto = SAS_PROTOCOL_ALL;
	sas_phy->tproto = 0;
	sas_phy->type = PHY_TYPE_PHYSICAL;
	sas_phy->role = PHY_ROLE_INITIATOR;
	sas_phy->oob_mode = OOB_NOT_CONNECTED;
	sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
	sas_phy->id = phy_no;
	sas_phy->sas_addr = &hisi_hba->sas_addr[0];
	sas_phy->frame_rcvd = &phy->frame_rcvd[0];
	sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
	sas_phy->lldd_phy = phy;
944

945 946
	for (i = 0; i < HISI_PHYES_NUM; i++)
		INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
947 948

	spin_lock_init(&phy->lock);
949 950

	timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0);
J
John Garry 已提交
951 952
}

953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976
/* Wrapper to ensure we track hisi_sas_phy.enable properly */
void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable)
{
	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
	struct asd_sas_phy *aphy = &phy->sas_phy;
	struct sas_phy *sphy = aphy->phy;
	unsigned long flags;

	spin_lock_irqsave(&phy->lock, flags);

	if (enable) {
		/* We may have been enabled already; if so, don't touch */
		if (!phy->enable)
			sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
		hisi_hba->hw->phy_start(hisi_hba, phy_no);
	} else {
		sphy->negotiated_linkrate = SAS_PHY_DISABLED;
		hisi_hba->hw->phy_disable(hisi_hba, phy_no);
	}
	phy->enable = enable;
	spin_unlock_irqrestore(&phy->lock, flags);
}
EXPORT_SYMBOL_GPL(hisi_sas_phy_enable);

J
John Garry 已提交
977 978 979 980 981 982
static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
{
	struct sas_ha_struct *sas_ha = sas_phy->ha;
	struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
	struct hisi_sas_phy *phy = sas_phy->lldd_phy;
	struct asd_sas_port *sas_port = sas_phy->port;
983
	struct hisi_sas_port *port;
J
John Garry 已提交
984 985 986 987 988
	unsigned long flags;

	if (!sas_port)
		return;

989
	port = to_hisi_sas_port(sas_port);
J
John Garry 已提交
990 991 992 993 994 995 996 997
	spin_lock_irqsave(&hisi_hba->lock, flags);
	port->port_attached = 1;
	port->id = phy->port_id;
	phy->port = port;
	sas_port->lldd_port = port;
	spin_unlock_irqrestore(&hisi_hba->lock, flags);
}

998
static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
999
				     struct hisi_sas_slot *slot)
J
John Garry 已提交
1000
{
1001 1002 1003
	if (task) {
		unsigned long flags;
		struct task_status_struct *ts;
J
John Garry 已提交
1004

1005
		ts = &task->task_status;
J
John Garry 已提交
1006

1007 1008 1009 1010 1011
		ts->resp = SAS_TASK_COMPLETE;
		ts->stat = SAS_ABORTED_TASK;
		spin_lock_irqsave(&task->task_state_lock, flags);
		task->task_state_flags &=
			~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1012 1013
		if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP)
			task->task_state_flags |= SAS_TASK_STATE_DONE;
1014 1015
		spin_unlock_irqrestore(&task->task_state_lock, flags);
	}
J
John Garry 已提交
1016

1017
	hisi_sas_slot_task_free(hisi_hba, task, slot);
J
John Garry 已提交
1018 1019 1020 1021 1022
}

static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
			struct domain_device *device)
{
1023 1024
	struct hisi_sas_slot *slot, *slot2;
	struct hisi_sas_device *sas_dev = device->lldd_dev;
J
John Garry 已提交
1025

1026 1027
	list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
		hisi_sas_do_release_task(hisi_hba, slot->task, slot);
J
John Garry 已提交
1028 1029
}

1030
void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
1031
{
1032 1033
	struct hisi_sas_device *sas_dev;
	struct domain_device *device;
1034 1035
	int i;

1036 1037 1038
	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
		sas_dev = &hisi_hba->devices[i];
		device = sas_dev->sas_device;
1039

1040 1041
		if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
		    !device)
1042
			continue;
1043 1044

		hisi_sas_release_task(hisi_hba, device);
1045 1046
	}
}
1047
EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
1048

1049 1050 1051 1052 1053 1054 1055
static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
				struct domain_device *device)
{
	if (hisi_hba->hw->dereg_device)
		hisi_hba->hw->dereg_device(hisi_hba, device);
}

J
John Garry 已提交
1056 1057 1058 1059
static void hisi_sas_dev_gone(struct domain_device *device)
{
	struct hisi_sas_device *sas_dev = device->lldd_dev;
	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1060
	struct device *dev = hisi_hba->dev;
1061
	int ret = 0;
J
John Garry 已提交
1062

1063
	dev_info(dev, "dev[%d:%x] is gone\n",
J
John Garry 已提交
1064 1065
		 sas_dev->device_id, sas_dev->dev_type);

1066
	down(&hisi_hba->sem);
1067 1068
	if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
		hisi_sas_internal_task_abort(hisi_hba, device,
1069
					     HISI_SAS_INT_ABT_DEV, 0);
1070

1071 1072
		hisi_sas_dereg_device(hisi_hba, device);

1073
		ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
1074 1075
		device->lldd_dev = NULL;
	}
1076

1077 1078
	if (hisi_hba->hw->free_device)
		hisi_hba->hw->free_device(sas_dev);
1079 1080 1081 1082

	/* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */
	if (!ret)
		sas_dev->dev_type = SAS_PHY_UNUSED;
1083 1084
	sas_dev->sas_device = NULL;
	up(&hisi_hba->sem);
J
John Garry 已提交
1085
}
J
John Garry 已提交
1086 1087 1088 1089 1090 1091

static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
{
	return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
}

1092
static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
1093 1094 1095 1096 1097 1098 1099 1100
			struct sas_phy_linkrates *r)
{
	struct sas_phy_linkrates _r;

	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
	struct asd_sas_phy *sas_phy = &phy->sas_phy;
	enum sas_linkrate min, max;

1101 1102 1103
	if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS)
		return -EINVAL;

1104 1105 1106 1107 1108 1109 1110
	if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
		max = sas_phy->phy->maximum_linkrate;
		min = r->minimum_linkrate;
	} else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
		max = r->maximum_linkrate;
		min = sas_phy->phy->minimum_linkrate;
	} else
1111
		return -EINVAL;
1112 1113 1114 1115

	_r.maximum_linkrate = max;
	_r.minimum_linkrate = min;

1116 1117 1118
	sas_phy->phy->maximum_linkrate = max;
	sas_phy->phy->minimum_linkrate = min;

1119
	hisi_sas_phy_enable(hisi_hba, phy_no, 0);
1120 1121
	msleep(100);
	hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
1122
	hisi_sas_phy_enable(hisi_hba, phy_no, 1);
1123 1124

	return 0;
1125 1126
}

J
John Garry 已提交
1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139
static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
				void *funcdata)
{
	struct sas_ha_struct *sas_ha = sas_phy->ha;
	struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
	int phy_no = sas_phy->id;

	switch (func) {
	case PHY_FUNC_HARD_RESET:
		hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
		break;

	case PHY_FUNC_LINK_RESET:
1140
		hisi_sas_phy_enable(hisi_hba, phy_no, 0);
1141
		msleep(100);
1142
		hisi_sas_phy_enable(hisi_hba, phy_no, 1);
J
John Garry 已提交
1143 1144 1145
		break;

	case PHY_FUNC_DISABLE:
1146
		hisi_sas_phy_enable(hisi_hba, phy_no, 0);
J
John Garry 已提交
1147 1148 1149
		break;

	case PHY_FUNC_SET_LINK_RATE:
1150
		return hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
1151 1152 1153 1154 1155
	case PHY_FUNC_GET_EVENTS:
		if (hisi_hba->hw->get_events) {
			hisi_hba->hw->get_events(hisi_hba, phy_no);
			break;
		}
1156
		fallthrough;
J
John Garry 已提交
1157 1158 1159 1160 1161 1162
	case PHY_FUNC_RELEASE_SPINUP_HOLD:
	default:
		return -EOPNOTSUPP;
	}
	return 0;
}
J
John Garry 已提交
1163

J
John Garry 已提交
1164 1165
static void hisi_sas_task_done(struct sas_task *task)
{
1166
	del_timer(&task->slow_task->timer);
J
John Garry 已提交
1167 1168 1169
	complete(&task->slow_task->completion);
}

1170
static void hisi_sas_tmf_timedout(struct timer_list *t)
J
John Garry 已提交
1171
{
1172 1173
	struct sas_task_slow *slow = from_timer(slow, t, timer);
	struct sas_task *task = slow->task;
1174
	unsigned long flags;
1175
	bool is_completed = true;
1176 1177

	spin_lock_irqsave(&task->task_state_lock, flags);
1178
	if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1179
		task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1180 1181
		is_completed = false;
	}
1182
	spin_unlock_irqrestore(&task->task_state_lock, flags);
J
John Garry 已提交
1183

1184 1185
	if (!is_completed)
		complete(&task->slow_task->completion);
J
John Garry 已提交
1186 1187 1188 1189
}

#define TASK_TIMEOUT 20
#define TASK_RETRY 3
1190
#define INTERNAL_ABORT_TIMEOUT 6
J
John Garry 已提交
1191 1192 1193 1194 1195 1196
static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
					   void *parameter, u32 para_len,
					   struct hisi_sas_tmf_task *tmf)
{
	struct hisi_sas_device *sas_dev = device->lldd_dev;
	struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
1197
	struct device *dev = hisi_hba->dev;
J
John Garry 已提交
1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
	struct sas_task *task;
	int res, retry;

	for (retry = 0; retry < TASK_RETRY; retry++) {
		task = sas_alloc_slow_task(GFP_KERNEL);
		if (!task)
			return -ENOMEM;

		task->dev = device;
		task->task_proto = device->tproto;

1209 1210 1211 1212 1213 1214
		if (dev_is_sata(device)) {
			task->ata_task.device_control_reg_update = 1;
			memcpy(&task->ata_task.fis, parameter, para_len);
		} else {
			memcpy(&task->ssp_task, parameter, para_len);
		}
J
John Garry 已提交
1215 1216
		task->task_done = hisi_sas_task_done;

1217
		task->slow_task->timer.function = hisi_sas_tmf_timedout;
1218
		task->slow_task->timer.expires = jiffies + TASK_TIMEOUT * HZ;
J
John Garry 已提交
1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
		add_timer(&task->slow_task->timer);

		res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);

		if (res) {
			del_timer(&task->slow_task->timer);
			dev_err(dev, "abort tmf: executing internal task failed: %d\n",
				res);
			goto ex_err;
		}

		wait_for_completion(&task->slow_task->completion);
		res = TMF_RESP_FUNC_FAILED;
		/* Even TMF timed out, return direct. */
		if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
			if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1235 1236
				struct hisi_sas_slot *slot = task->lldd_task;

1237
				dev_err(dev, "abort tmf: TMF task timeout and not done\n");
1238
				if (slot) {
1239 1240
					struct hisi_sas_cq *cq =
					       &hisi_hba->cq[slot->dlvry_queue];
1241
					/*
1242
					 * sync irq to avoid free'ing task
1243 1244
					 * before using task in IO completion
					 */
1245
					synchronize_irq(cq->irq_no);
1246
					slot->task = NULL;
1247
				}
1248

J
John Garry 已提交
1249
				goto ex_err;
1250 1251
			} else
				dev_err(dev, "abort tmf: TMF task timeout\n");
J
John Garry 已提交
1252 1253 1254
		}

		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1255
		     task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
J
John Garry 已提交
1256 1257 1258 1259
			res = TMF_RESP_FUNC_COMPLETE;
			break;
		}

1260 1261 1262 1263 1264 1265
		if (task->task_status.resp == SAS_TASK_COMPLETE &&
			task->task_status.stat == TMF_RESP_FUNC_SUCC) {
			res = TMF_RESP_FUNC_SUCC;
			break;
		}

J
John Garry 已提交
1266 1267 1268 1269 1270
		if (task->task_status.resp == SAS_TASK_COMPLETE &&
		      task->task_status.stat == SAS_DATA_UNDERRUN) {
			/* no error, but return the number of bytes of
			 * underrun
			 */
1271
			dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x sts 0x%x underrun\n",
J
John Garry 已提交
1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285
				 SAS_ADDR(device->sas_addr),
				 task->task_status.resp,
				 task->task_status.stat);
			res = task->task_status.residual;
			break;
		}

		if (task->task_status.resp == SAS_TASK_COMPLETE &&
			task->task_status.stat == SAS_DATA_OVERRUN) {
			dev_warn(dev, "abort tmf: blocked task error\n");
			res = -EMSGSIZE;
			break;
		}

1286 1287 1288 1289 1290 1291 1292 1293 1294 1295
		if (task->task_status.resp == SAS_TASK_COMPLETE &&
		    task->task_status.stat == SAS_OPEN_REJECT) {
			dev_warn(dev, "abort tmf: open reject failed\n");
			res = -EIO;
		} else {
			dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x status 0x%x\n",
				 SAS_ADDR(device->sas_addr),
				 task->task_status.resp,
				 task->task_status.stat);
		}
J
John Garry 已提交
1296 1297 1298 1299
		sas_free_task(task);
		task = NULL;
	}
ex_err:
1300 1301
	if (retry == TASK_RETRY)
		dev_warn(dev, "abort tmf: executing internal task failed!\n");
J
John Garry 已提交
1302 1303 1304 1305
	sas_free_task(task);
	return res;
}

1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326
static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
		bool reset, int pmp, u8 *fis)
{
	struct ata_taskfile tf;

	ata_tf_init(dev, &tf);
	if (reset)
		tf.ctl |= ATA_SRST;
	else
		tf.ctl &= ~ATA_SRST;
	tf.command = ATA_CMD_DEV_RESET;
	ata_tf_to_fis(&tf, pmp, 0, fis);
}

static int hisi_sas_softreset_ata_disk(struct domain_device *device)
{
	u8 fis[20] = {0};
	struct ata_port *ap = device->sata_dev.ap;
	struct ata_link *link;
	int rc = TMF_RESP_FUNC_FAILED;
	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1327
	struct device *dev = hisi_hba->dev;
1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352
	int s = sizeof(struct host_to_dev_fis);

	ata_for_each_link(link, ap, EDGE) {
		int pmp = sata_srst_pmp(link);

		hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
		rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
		if (rc != TMF_RESP_FUNC_COMPLETE)
			break;
	}

	if (rc == TMF_RESP_FUNC_COMPLETE) {
		ata_for_each_link(link, ap, EDGE) {
			int pmp = sata_srst_pmp(link);

			hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
			rc = hisi_sas_exec_internal_tmf_task(device, fis,
							     s, NULL);
			if (rc != TMF_RESP_FUNC_COMPLETE)
				dev_err(dev, "ata disk de-reset failed\n");
		}
	} else {
		dev_err(dev, "ata disk reset failed\n");
	}

1353
	if (rc == TMF_RESP_FUNC_COMPLETE)
1354 1355 1356 1357 1358
		hisi_sas_release_task(hisi_hba, device);

	return rc;
}

J
John Garry 已提交
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372
static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
				u8 *lun, struct hisi_sas_tmf_task *tmf)
{
	struct sas_ssp_task ssp_task;

	if (!(device->tproto & SAS_PROTOCOL_SSP))
		return TMF_RESP_FUNC_ESUPP;

	memcpy(ssp_task.LUN, lun, 8);

	return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
				sizeof(ssp_task), tmf);
}

1373
static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
1374
{
1375
	u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
1376 1377 1378
	int i;

	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
1379 1380 1381 1382 1383 1384 1385
		struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
		struct domain_device *device = sas_dev->sas_device;
		struct asd_sas_port *sas_port;
		struct hisi_sas_port *port;
		struct hisi_sas_phy *phy = NULL;
		struct asd_sas_phy *sas_phy;

1386
		if ((sas_dev->dev_type == SAS_PHY_UNUSED)
1387
				|| !device || !device->port)
1388 1389
			continue;

1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400
		sas_port = device->port;
		port = to_hisi_sas_port(sas_port);

		list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
			if (state & BIT(sas_phy->id)) {
				phy = sas_phy->lldd_phy;
				break;
			}

		if (phy) {
			port->id = phy->port_id;
1401

1402 1403 1404
			/* Update linkrate of directly attached device. */
			if (!device->parent)
				device->linkrate = phy->sas_phy.linkrate;
1405

1406 1407 1408
			hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
		} else
			port->id = 0xff;
1409 1410 1411
	}
}

1412
static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
1413 1414 1415 1416 1417 1418 1419 1420 1421
{
	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
	struct asd_sas_port *_sas_port = NULL;
	int phy_no;

	for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
		struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
		struct asd_sas_phy *sas_phy = &phy->sas_phy;
		struct asd_sas_port *sas_port = sas_phy->port;
1422
		bool do_port_check = _sas_port != sas_port;
1423 1424 1425 1426 1427

		if (!sas_phy->phy->enabled)
			continue;

		/* Report PHY state change to libsas */
1428 1429
		if (state & BIT(phy_no)) {
			if (do_port_check && sas_port && sas_port->port_dev) {
1430 1431 1432 1433
				struct domain_device *dev = sas_port->port_dev;

				_sas_port = sas_port;

1434
				if (dev_is_expander(dev->dev_type))
1435 1436 1437
					sas_ha->notify_port_event(sas_phy,
							PORTE_BROADCAST_RCVD);
			}
1438
		} else {
1439
			hisi_sas_phy_down(hisi_hba, phy_no, 0);
1440
		}
1441 1442 1443
	}
}

1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460
static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba)
{
	struct hisi_sas_device *sas_dev;
	struct domain_device *device;
	int i;

	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
		sas_dev = &hisi_hba->devices[i];
		device = sas_dev->sas_device;

		if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
			continue;

		hisi_sas_init_device(device);
	}
}

1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519
static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
					     struct asd_sas_port *sas_port,
					     struct domain_device *device)
{
	struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 };
	struct ata_port *ap = device->sata_dev.ap;
	struct device *dev = hisi_hba->dev;
	int s = sizeof(struct host_to_dev_fis);
	int rc = TMF_RESP_FUNC_FAILED;
	struct asd_sas_phy *sas_phy;
	struct ata_link *link;
	u8 fis[20] = {0};
	u32 state;

	state = hisi_hba->hw->get_phys_state(hisi_hba);
	list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) {
		if (!(state & BIT(sas_phy->id)))
			continue;

		ata_for_each_link(link, ap, EDGE) {
			int pmp = sata_srst_pmp(link);

			tmf_task.phy_id = sas_phy->id;
			hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
			rc = hisi_sas_exec_internal_tmf_task(device, fis, s,
							     &tmf_task);
			if (rc != TMF_RESP_FUNC_COMPLETE) {
				dev_err(dev, "phy%d ata reset failed rc=%d\n",
					sas_phy->id, rc);
				break;
			}
		}
	}
}

static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
{
	struct device *dev = hisi_hba->dev;
	int port_no, rc, i;

	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
		struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
		struct domain_device *device = sas_dev->sas_device;

		if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
			continue;

		rc = hisi_sas_internal_task_abort(hisi_hba, device,
						  HISI_SAS_INT_ABT_DEV, 0);
		if (rc < 0)
			dev_err(dev, "STP reject: abort dev failed %d\n", rc);
	}

	for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) {
		struct hisi_sas_port *port = &hisi_hba->port[port_no];
		struct asd_sas_port *sas_port = &port->sas_port;
		struct domain_device *port_dev = sas_port->port_dev;
		struct domain_device *device;

1520
		if (!port_dev || !dev_is_expander(port_dev->dev_type))
1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535
			continue;

		/* Try to find a SATA device */
		list_for_each_entry(device, &sas_port->dev_list,
				    dev_list_node) {
			if (dev_is_sata(device)) {
				hisi_sas_send_ata_reset_each_phy(hisi_hba,
								 sas_port,
								 device);
				break;
			}
		}
	}
}

1536
void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
1537
{
1538
	struct Scsi_Host *shost = hisi_hba->shost;
1539

1540
	down(&hisi_hba->sem);
1541
	hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
1542

1543
	scsi_block_requests(shost);
1544 1545
	hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);

1546 1547 1548
	if (timer_pending(&hisi_hba->timer))
		del_timer_sync(&hisi_hba->timer);

1549
	set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1550 1551 1552 1553 1554 1555
}
EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare);

void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
{
	struct Scsi_Host *shost = hisi_hba->shost;
1556 1557 1558 1559

	/* Init and wait for PHYs to come up and all libsas event finished. */
	hisi_hba->hw->phys_init(hisi_hba);
	msleep(1000);
1560
	hisi_sas_refresh_port_id(hisi_hba);
1561
	clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
1562 1563 1564

	if (hisi_hba->reject_stp_links_msk)
		hisi_sas_terminate_stp_reject(hisi_hba);
1565
	hisi_sas_reset_init_all_devices(hisi_hba);
1566
	up(&hisi_hba->sem);
1567
	scsi_unblock_requests(shost);
1568
	clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
1569

1570
	hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state);
1571 1572 1573 1574 1575 1576 1577 1578 1579
}
EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);

static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
{
	struct device *dev = hisi_hba->dev;
	struct Scsi_Host *shost = hisi_hba->shost;
	int rc;

1580
	if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
1581 1582
		queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);

1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602
	if (!hisi_hba->hw->soft_reset)
		return -1;

	if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
		return -1;

	dev_info(dev, "controller resetting...\n");
	hisi_sas_controller_reset_prepare(hisi_hba);

	rc = hisi_hba->hw->soft_reset(hisi_hba);
	if (rc) {
		dev_warn(dev, "controller reset failed (%d)\n", rc);
		clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
		up(&hisi_hba->sem);
		scsi_unblock_requests(shost);
		clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
		return rc;
	}

	hisi_sas_controller_reset_done(hisi_hba);
1603
	dev_info(dev, "controller reset complete\n");
1604

1605
	return 0;
1606 1607
}

J
John Garry 已提交
1608 1609 1610 1611 1612 1613
static int hisi_sas_abort_task(struct sas_task *task)
{
	struct scsi_lun lun;
	struct hisi_sas_tmf_task tmf_task;
	struct domain_device *device = task->dev;
	struct hisi_sas_device *sas_dev = device->lldd_dev;
1614 1615
	struct hisi_hba *hisi_hba;
	struct device *dev;
J
John Garry 已提交
1616 1617 1618
	int rc = TMF_RESP_FUNC_FAILED;
	unsigned long flags;

1619
	if (!sas_dev)
J
John Garry 已提交
1620
		return TMF_RESP_FUNC_FAILED;
1621 1622 1623

	hisi_hba = dev_to_hisi_hba(task->dev);
	dev = hisi_hba->dev;
J
John Garry 已提交
1624

1625
	spin_lock_irqsave(&task->task_state_lock, flags);
J
John Garry 已提交
1626
	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1627 1628 1629 1630 1631
		struct hisi_sas_slot *slot = task->lldd_task;
		struct hisi_sas_cq *cq;

		if (slot) {
			/*
1632
			 * sync irq to avoid free'ing task
1633 1634 1635
			 * before using task in IO completion
			 */
			cq = &hisi_hba->cq[slot->dlvry_queue];
1636
			synchronize_irq(cq->irq_no);
1637
		}
1638
		spin_unlock_irqrestore(&task->task_state_lock, flags);
J
John Garry 已提交
1639 1640 1641
		rc = TMF_RESP_FUNC_COMPLETE;
		goto out;
	}
1642 1643
	task->task_state_flags |= SAS_TASK_STATE_ABORTED;
	spin_unlock_irqrestore(&task->task_state_lock, flags);
J
John Garry 已提交
1644 1645 1646 1647

	if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
		struct scsi_cmnd *cmnd = task->uldd_task;
		struct hisi_sas_slot *slot = task->lldd_task;
1648
		u16 tag = slot->idx;
1649
		int rc2;
J
John Garry 已提交
1650 1651 1652

		int_to_scsilun(cmnd->device->lun, &lun);
		tmf_task.tmf = TMF_ABORT_TASK;
1653
		tmf_task.tag_of_task_to_be_managed = tag;
J
John Garry 已提交
1654 1655 1656 1657

		rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
						  &tmf_task);

1658 1659
		rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
						   HISI_SAS_INT_ABT_CMD, tag);
1660 1661 1662 1663 1664
		if (rc2 < 0) {
			dev_err(dev, "abort task: internal abort (%d)\n", rc2);
			return TMF_RESP_FUNC_FAILED;
		}

1665 1666 1667 1668 1669 1670 1671 1672
		/*
		 * If the TMF finds that the IO is not in the device and also
		 * the internal abort does not succeed, then it is safe to
		 * free the slot.
		 * Note: if the internal abort succeeds then the slot
		 * will have already been completed
		 */
		if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
1673
			if (task->lldd_task)
1674
				hisi_sas_do_release_task(hisi_hba, task, slot);
J
John Garry 已提交
1675 1676 1677 1678
		}
	} else if (task->task_proto & SAS_PROTOCOL_SATA ||
		task->task_proto & SAS_PROTOCOL_STP) {
		if (task->dev->dev_type == SAS_SATA_DEV) {
1679
			rc = hisi_sas_internal_task_abort(hisi_hba, device,
1680 1681
							  HISI_SAS_INT_ABT_DEV,
							  0);
1682 1683 1684 1685
			if (rc < 0) {
				dev_err(dev, "abort task: internal abort failed\n");
				goto out;
			}
1686
			hisi_sas_dereg_device(hisi_hba, device);
1687
			rc = hisi_sas_softreset_ata_disk(device);
J
John Garry 已提交
1688
		}
1689
	} else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
1690 1691 1692
		/* SMP */
		struct hisi_sas_slot *slot = task->lldd_task;
		u32 tag = slot->idx;
1693
		struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
J
John Garry 已提交
1694

1695
		rc = hisi_sas_internal_task_abort(hisi_hba, device,
1696
						  HISI_SAS_INT_ABT_CMD, tag);
1697
		if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
1698 1699
					task->lldd_task) {
			/*
1700
			 * sync irq to avoid free'ing task
1701 1702
			 * before using task in IO completion
			 */
1703
			synchronize_irq(cq->irq_no);
1704 1705
			slot->task = NULL;
		}
J
John Garry 已提交
1706 1707 1708 1709 1710 1711 1712 1713 1714 1715
	}

out:
	if (rc != TMF_RESP_FUNC_COMPLETE)
		dev_notice(dev, "abort task: rc=%d\n", rc);
	return rc;
}

static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
{
1716 1717
	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
	struct device *dev = hisi_hba->dev;
J
John Garry 已提交
1718
	struct hisi_sas_tmf_task tmf_task;
X
Xiang Chen 已提交
1719
	int rc;
1720 1721

	rc = hisi_sas_internal_task_abort(hisi_hba, device,
1722
					  HISI_SAS_INT_ABT_DEV, 0);
1723 1724 1725 1726 1727
	if (rc < 0) {
		dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
		return TMF_RESP_FUNC_FAILED;
	}
	hisi_sas_dereg_device(hisi_hba, device);
J
John Garry 已提交
1728 1729 1730 1731

	tmf_task.tmf = TMF_ABORT_TASK_SET;
	rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);

1732
	if (rc == TMF_RESP_FUNC_COMPLETE)
1733 1734
		hisi_sas_release_task(hisi_hba, device);

J
John Garry 已提交
1735 1736 1737 1738 1739 1740
	return rc;
}

static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
{
	struct hisi_sas_tmf_task tmf_task;
1741
	int rc;
J
John Garry 已提交
1742 1743 1744 1745 1746 1747 1748 1749 1750

	tmf_task.tmf = TMF_CLEAR_ACA;
	rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);

	return rc;
}

static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
{
1751
	struct sas_phy *local_phy = sas_get_local_phy(device);
1752
	struct hisi_sas_device *sas_dev = device->lldd_dev;
1753 1754 1755
	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
	DECLARE_COMPLETION_ONSTACK(phyreset);
1756
	int rc, reset_type;
1757

1758 1759 1760 1761 1762
	if (!local_phy->enabled) {
		sas_put_local_phy(local_phy);
		return -ENODEV;
	}

1763
	if (scsi_is_sas_phy_local(local_phy)) {
1764 1765 1766 1767
		struct asd_sas_phy *sas_phy =
			sas_ha->sas_phy[local_phy->number];
		struct hisi_sas_phy *phy =
			container_of(sas_phy, struct hisi_sas_phy, sas_phy);
1768 1769 1770 1771
		phy->in_reset = 1;
		phy->reset_completion = &phyreset;
	}

1772
	reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT ||
1773
		      !dev_is_sata(device)) ? true : false;
1774

1775 1776 1777 1778
	rc = sas_phy_reset(local_phy, reset_type);
	sas_put_local_phy(local_phy);

	if (scsi_is_sas_phy_local(local_phy)) {
1779 1780 1781 1782
		struct asd_sas_phy *sas_phy =
			sas_ha->sas_phy[local_phy->number];
		struct hisi_sas_phy *phy =
			container_of(sas_phy, struct hisi_sas_phy, sas_phy);
1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793
		int ret = wait_for_completion_timeout(&phyreset, 2 * HZ);
		unsigned long flags;

		spin_lock_irqsave(&phy->lock, flags);
		phy->reset_completion = NULL;
		phy->in_reset = 0;
		spin_unlock_irqrestore(&phy->lock, flags);

		/* report PHY down if timed out */
		if (!ret)
			hisi_sas_phy_down(hisi_hba, sas_phy->id, 0);
1794 1795 1796
	} else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) {
		/*
		 * If in init state, we rely on caller to wait for link to be
1797
		 * ready; otherwise, except phy reset is fail, delay.
1798
		 */
1799 1800
		if (!rc)
			msleep(2000);
1801
	}
1802

J
John Garry 已提交
1803 1804 1805 1806 1807 1808
	return rc;
}

static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
{
	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1809
	struct device *dev = hisi_hba->dev;
1810
	int rc;
J
John Garry 已提交
1811

1812
	rc = hisi_sas_internal_task_abort(hisi_hba, device,
1813
					  HISI_SAS_INT_ABT_DEV, 0);
1814 1815 1816 1817
	if (rc < 0) {
		dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
		return TMF_RESP_FUNC_FAILED;
	}
1818 1819
	hisi_sas_dereg_device(hisi_hba, device);

1820 1821
	if (dev_is_sata(device)) {
		rc = hisi_sas_softreset_ata_disk(device);
1822
		if (rc == TMF_RESP_FUNC_FAILED)
1823 1824 1825
			return TMF_RESP_FUNC_FAILED;
	}

J
John Garry 已提交
1826 1827
	rc = hisi_sas_debug_I_T_nexus_reset(device);

1828
	if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
1829
		hisi_sas_release_task(hisi_hba, device);
1830

1831
	return rc;
J
John Garry 已提交
1832 1833 1834 1835 1836 1837
}

static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
{
	struct hisi_sas_device *sas_dev = device->lldd_dev;
	struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1838
	struct device *dev = hisi_hba->dev;
J
John Garry 已提交
1839 1840
	int rc = TMF_RESP_FUNC_FAILED;

1841 1842 1843 1844 1845 1846 1847 1848 1849
	/* Clear internal IO and then lu reset */
	rc = hisi_sas_internal_task_abort(hisi_hba, device,
					  HISI_SAS_INT_ABT_DEV, 0);
	if (rc < 0) {
		dev_err(dev, "lu_reset: internal abort failed\n");
		goto out;
	}
	hisi_sas_dereg_device(hisi_hba, device);

1850 1851 1852 1853 1854
	if (dev_is_sata(device)) {
		struct sas_phy *phy;

		phy = sas_get_local_phy(device);

1855
		rc = sas_phy_reset(phy, true);
1856

1857
		if (rc == 0)
1858 1859 1860 1861 1862 1863
			hisi_sas_release_task(hisi_hba, device);
		sas_put_local_phy(phy);
	} else {
		struct hisi_sas_tmf_task tmf_task = { .tmf =  TMF_LU_RESET };

		rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
1864
		if (rc == TMF_RESP_FUNC_COMPLETE)
1865 1866 1867
			hisi_sas_release_task(hisi_hba, device);
	}
out:
1868
	if (rc != TMF_RESP_FUNC_COMPLETE)
1869
		dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
1870
			     sas_dev->device_id, rc);
J
John Garry 已提交
1871 1872 1873
	return rc;
}

1874 1875 1876
static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
{
	struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1877
	struct device *dev = hisi_hba->dev;
1878
	HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
1879
	int rc, i;
1880

1881 1882
	queue_work(hisi_hba->wq, &r.work);
	wait_for_completion(r.completion);
1883 1884 1885 1886 1887 1888 1889 1890
	if (!r.done)
		return TMF_RESP_FUNC_FAILED;

	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
		struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
		struct domain_device *device = sas_dev->sas_device;

		if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device ||
1891
		    dev_is_expander(device->dev_type))
1892 1893 1894 1895 1896 1897 1898 1899 1900
			continue;

		rc = hisi_sas_debug_I_T_nexus_reset(device);
		if (rc != TMF_RESP_FUNC_COMPLETE)
			dev_info(dev, "clear nexus ha: for device[%d] rc=%d\n",
				 sas_dev->device_id, rc);
	}

	hisi_sas_release_tasks(hisi_hba);
1901

1902
	return TMF_RESP_FUNC_COMPLETE;
1903 1904
}

J
John Garry 已提交
1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918
static int hisi_sas_query_task(struct sas_task *task)
{
	struct scsi_lun lun;
	struct hisi_sas_tmf_task tmf_task;
	int rc = TMF_RESP_FUNC_FAILED;

	if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
		struct scsi_cmnd *cmnd = task->uldd_task;
		struct domain_device *device = task->dev;
		struct hisi_sas_slot *slot = task->lldd_task;
		u32 tag = slot->idx;

		int_to_scsilun(cmnd->device->lun, &lun);
		tmf_task.tmf = TMF_QUERY_TASK;
1919
		tmf_task.tag_of_task_to_be_managed = tag;
J
John Garry 已提交
1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930

		rc = hisi_sas_debug_issue_ssp_tmf(device,
						  lun.scsi_lun,
						  &tmf_task);
		switch (rc) {
		/* The task is still in Lun, release it then */
		case TMF_RESP_FUNC_SUCC:
		/* The task is not in Lun or failed, reset the phy */
		case TMF_RESP_FUNC_FAILED:
		case TMF_RESP_FUNC_COMPLETE:
			break;
1931 1932 1933
		default:
			rc = TMF_RESP_FUNC_FAILED;
			break;
J
John Garry 已提交
1934 1935 1936 1937 1938
		}
	}
	return rc;
}

1939
static int
1940
hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
1941
				  struct sas_task *task, int abort_flag,
1942
				  int task_tag, struct hisi_sas_dq *dq)
1943 1944 1945
{
	struct domain_device *device = task->dev;
	struct hisi_sas_device *sas_dev = device->lldd_dev;
1946
	struct device *dev = hisi_hba->dev;
1947 1948
	struct hisi_sas_port *port;
	struct hisi_sas_slot *slot;
1949
	struct asd_sas_port *sas_port = device->port;
1950 1951
	struct hisi_sas_cmd_hdr *cmd_hdr_base;
	int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
1952
	unsigned long flags;
1953
	int wr_q_index;
1954

1955
	if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
1956 1957
		return -EINVAL;

1958 1959 1960
	if (!device->port)
		return -1;

1961
	port = to_hisi_sas_port(sas_port);
1962 1963

	/* simply get a slot and send abort command */
1964 1965
	rc = hisi_sas_slot_index_alloc(hisi_hba, NULL);
	if (rc < 0)
1966
		goto err_out;
1967

1968
	slot_idx = rc;
1969
	slot = &hisi_hba->slot_info[slot_idx];
1970

1971
	spin_lock(&dq->lock);
1972 1973
	wr_q_index = dq->wr_point;
	dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
1974
	list_add_tail(&slot->delivery, &dq->list);
1975 1976
	spin_unlock(&dq->lock);
	spin_lock(&sas_dev->lock);
1977
	list_add_tail(&slot->entry, &sas_dev->list);
1978
	spin_unlock(&sas_dev->lock);
1979

1980
	dlvry_queue = dq->id;
1981
	dlvry_queue_slot = wr_q_index;
1982

1983
	slot->device_id = sas_dev->device_id;
1984 1985 1986 1987 1988 1989 1990
	slot->n_elem = n_elem;
	slot->dlvry_queue = dlvry_queue;
	slot->dlvry_queue_slot = dlvry_queue_slot;
	cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
	slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
	slot->task = task;
	slot->port = port;
1991
	slot->is_internal = true;
1992 1993 1994
	task->lldd_task = slot;

	memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1995
	memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
1996 1997
	memset(hisi_sas_status_buf_addr_mem(slot), 0,
	       sizeof(struct hisi_sas_err_record));
1998

1999
	hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
2000 2001
				      abort_flag, task_tag);

2002
	spin_lock_irqsave(&task->task_state_lock, flags);
2003
	task->task_state_flags |= SAS_TASK_AT_INITIATOR;
2004
	spin_unlock_irqrestore(&task->task_state_lock, flags);
2005
	WRITE_ONCE(slot->ready, 1);
2006
	/* send abort command to the chip */
2007
	spin_lock(&dq->lock);
2008
	hisi_hba->hw->start_delivery(dq);
2009
	spin_unlock(&dq->lock);
2010 2011 2012 2013 2014 2015 2016 2017 2018 2019

	return 0;

err_out:
	dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);

	return rc;
}

/**
2020
 * _hisi_sas_internal_task_abort -- execute an internal
2021 2022 2023 2024 2025 2026
 * abort command for single IO command or a device
 * @hisi_hba: host controller struct
 * @device: domain device
 * @abort_flag: mode of operation, device or single IO
 * @tag: tag of IO to be aborted (only relevant to single
 *       IO mode)
2027
 * @dq: delivery queue for this internal abort command
2028 2029
 */
static int
2030 2031 2032
_hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
			      struct domain_device *device, int abort_flag,
			      int tag, struct hisi_sas_dq *dq)
2033 2034 2035
{
	struct sas_task *task;
	struct hisi_sas_device *sas_dev = device->lldd_dev;
2036
	struct device *dev = hisi_hba->dev;
2037 2038
	int res;

2039 2040 2041 2042 2043 2044
	/*
	 * The interface is not realized means this HW don't support internal
	 * abort, or don't need to do internal abort. Then here, we return
	 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
	 * the internal abort has been executed and returned CQ.
	 */
2045
	if (!hisi_hba->hw->prep_abort)
2046
		return TMF_RESP_FUNC_FAILED;
2047 2048 2049 2050 2051 2052 2053 2054

	task = sas_alloc_slow_task(GFP_KERNEL);
	if (!task)
		return -ENOMEM;

	task->dev = device;
	task->task_proto = device->tproto;
	task->task_done = hisi_sas_task_done;
2055
	task->slow_task->timer.function = hisi_sas_tmf_timedout;
2056
	task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT * HZ;
2057 2058 2059
	add_timer(&task->slow_task->timer);

	res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
2060
						task, abort_flag, tag, dq);
2061 2062 2063 2064 2065 2066 2067 2068 2069
	if (res) {
		del_timer(&task->slow_task->timer);
		dev_err(dev, "internal task abort: executing internal task failed: %d\n",
			res);
		goto exit;
	}
	wait_for_completion(&task->slow_task->completion);
	res = TMF_RESP_FUNC_FAILED;

2070 2071
	/* Internal abort timed out */
	if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
2072
		if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
2073 2074
			queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);

2075 2076
		if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
			struct hisi_sas_slot *slot = task->lldd_task;
2077 2078

			if (slot) {
2079 2080
				struct hisi_sas_cq *cq =
					&hisi_hba->cq[slot->dlvry_queue];
2081
				/*
2082
				 * sync irq to avoid free'ing task
2083 2084
				 * before using task in IO completion
				 */
2085
				synchronize_irq(cq->irq_no);
2086
				slot->task = NULL;
2087
			}
2088
			dev_err(dev, "internal task abort: timeout and not done.\n");
2089

2090
			res = -EIO;
2091
			goto exit;
2092 2093
		} else
			dev_err(dev, "internal task abort: timeout.\n");
2094 2095
	}

2096 2097 2098 2099 2100 2101
	if (task->task_status.resp == SAS_TASK_COMPLETE &&
		task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
		res = TMF_RESP_FUNC_COMPLETE;
		goto exit;
	}

2102 2103 2104 2105 2106 2107
	if (task->task_status.resp == SAS_TASK_COMPLETE &&
		task->task_status.stat == TMF_RESP_FUNC_SUCC) {
		res = TMF_RESP_FUNC_SUCC;
		goto exit;
	}

2108
exit:
2109
	dev_dbg(dev, "internal task abort: task to dev %016llx task=%pK resp: 0x%x sts 0x%x\n",
2110
		SAS_ADDR(device->sas_addr), task,
2111 2112 2113 2114 2115 2116 2117
		task->task_status.resp, /* 0 is complete, -1 is undelivered */
		task->task_status.stat);
	sas_free_task(task);

	return res;
}

2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135
static int
hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
			     struct domain_device *device,
			     int abort_flag, int tag)
{
	struct hisi_sas_slot *slot;
	struct device *dev = hisi_hba->dev;
	struct hisi_sas_dq *dq;
	int i, rc;

	switch (abort_flag) {
	case HISI_SAS_INT_ABT_CMD:
		slot = &hisi_hba->slot_info[tag];
		dq = &hisi_hba->dq[slot->dlvry_queue];
		return _hisi_sas_internal_task_abort(hisi_hba, device,
						     abort_flag, tag, dq);
	case HISI_SAS_INT_ABT_DEV:
		for (i = 0; i < hisi_hba->cq_nvecs; i++) {
2136
			struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2137
			const struct cpumask *mask = cq->irq_mask;
2138 2139 2140

			if (mask && !cpumask_intersects(cpu_online_mask, mask))
				continue;
2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157
			dq = &hisi_hba->dq[i];
			rc = _hisi_sas_internal_task_abort(hisi_hba, device,
							   abort_flag, tag,
							   dq);
			if (rc)
				return rc;
		}
		break;
	default:
		dev_err(dev, "Unrecognised internal abort flag (%d)\n",
			abort_flag);
		return -EINVAL;
	}

	return 0;
}

J
John Garry 已提交
2158 2159 2160 2161 2162
static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
{
	hisi_sas_port_notify_formed(sas_phy);
}

2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174
static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
			u8 reg_index, u8 reg_count, u8 *write_data)
{
	struct hisi_hba *hisi_hba = sha->lldd_ha;

	if (!hisi_hba->hw->write_gpio)
		return -EOPNOTSUPP;

	return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
				reg_index, reg_count, write_data);
}

J
John Garry 已提交
2175 2176
static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
{
2177 2178
	struct asd_sas_phy *sas_phy = &phy->sas_phy;
	struct sas_phy *sphy = sas_phy->phy;
2179
	unsigned long flags;
2180

J
John Garry 已提交
2181 2182 2183
	phy->phy_attached = 0;
	phy->phy_type = 0;
	phy->port = NULL;
2184

2185 2186
	spin_lock_irqsave(&phy->lock, flags);
	if (phy->enable)
2187 2188 2189
		sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
	else
		sphy->negotiated_linkrate = SAS_PHY_DISABLED;
2190
	spin_unlock_irqrestore(&phy->lock, flags);
J
John Garry 已提交
2191 2192 2193 2194 2195 2196 2197
}

void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
{
	struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
	struct asd_sas_phy *sas_phy = &phy->sas_phy;
	struct sas_ha_struct *sas_ha = &hisi_hba->sha;
2198
	struct device *dev = hisi_hba->dev;
J
John Garry 已提交
2199 2200 2201 2202 2203 2204 2205 2206

	if (rdy) {
		/* Phy down but ready */
		hisi_sas_bytes_dmaed(hisi_hba, phy_no);
		hisi_sas_port_notify_formed(sas_phy);
	} else {
		struct hisi_sas_port *port  = phy->port;

2207 2208
		if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) ||
		    phy->in_reset) {
2209 2210 2211
			dev_info(dev, "ignore flutter phy%d down\n", phy_no);
			return;
		}
J
John Garry 已提交
2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230
		/* Phy down and not ready */
		sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
		sas_phy_disconnected(sas_phy);

		if (port) {
			if (phy->phy_type & PORT_TYPE_SAS) {
				int port_id = port->id;

				if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
								       port_id))
					port->port_attached = 0;
			} else if (phy->phy_type & PORT_TYPE_SATA)
				port->port_attached = 0;
		}
		hisi_sas_phy_disconnected(phy);
	}
}
EXPORT_SYMBOL_GPL(hisi_sas_phy_down);

2231
void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba)
2232 2233 2234
{
	int i;

2235
	for (i = 0; i < hisi_hba->cq_nvecs; i++) {
2236 2237
		struct hisi_sas_cq *cq = &hisi_hba->cq[i];

2238
		synchronize_irq(cq->irq_no);
2239 2240
	}
}
2241
EXPORT_SYMBOL_GPL(hisi_sas_sync_irqs);
2242

2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255
int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type)
{
	struct hisi_hba *hisi_hba = shost_priv(shost);

	if (reset_type != SCSI_ADAPTER_RESET)
		return -EOPNOTSUPP;

	queue_work(hisi_hba->wq, &hisi_hba->rst_work);

	return 0;
}
EXPORT_SYMBOL_GPL(hisi_sas_host_reset);

2256 2257
struct scsi_transport_template *hisi_sas_stt;
EXPORT_SYMBOL_GPL(hisi_sas_stt);
2258 2259

static struct sas_domain_function_template hisi_sas_transport_ops = {
J
John Garry 已提交
2260 2261
	.lldd_dev_found		= hisi_sas_dev_found,
	.lldd_dev_gone		= hisi_sas_dev_gone,
J
John Garry 已提交
2262
	.lldd_execute_task	= hisi_sas_queue_command,
J
John Garry 已提交
2263
	.lldd_control_phy	= hisi_sas_control_phy,
J
John Garry 已提交
2264 2265 2266 2267 2268 2269
	.lldd_abort_task	= hisi_sas_abort_task,
	.lldd_abort_task_set	= hisi_sas_abort_task_set,
	.lldd_clear_aca		= hisi_sas_clear_aca,
	.lldd_I_T_nexus_reset	= hisi_sas_I_T_nexus_reset,
	.lldd_lu_reset		= hisi_sas_lu_reset,
	.lldd_query_task	= hisi_sas_query_task,
2270
	.lldd_clear_nexus_ha	= hisi_sas_clear_nexus_ha,
J
John Garry 已提交
2271
	.lldd_port_formed	= hisi_sas_port_formed,
2272
	.lldd_write_gpio	= hisi_sas_write_gpio,
2273 2274
};

2275 2276
void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
{
2277
	int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS;
J
John Garry 已提交
2278
	struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint;
2279 2280 2281 2282

	for (i = 0; i < hisi_hba->queue_count; i++) {
		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
		struct hisi_sas_dq *dq = &hisi_hba->dq[i];
J
John Garry 已提交
2283 2284 2285 2286 2287
		struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i];

		s = sizeof(struct hisi_sas_cmd_hdr);
		for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
			memset(&cmd_hdr[j], 0, s);
2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304

		dq->wr_point = 0;

		s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
		memset(hisi_hba->complete_hdr[i], 0, s);
		cq->rd_point = 0;
	}

	s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
	memset(hisi_hba->initial_fis, 0, s);

	s = max_command_entries * sizeof(struct hisi_sas_iost);
	memset(hisi_hba->iost, 0, s);

	s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
	memset(hisi_hba->breakpoint, 0, s);

J
John Garry 已提交
2305 2306 2307
	s = sizeof(struct hisi_sas_sata_breakpoint);
	for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++)
		memset(&sata_breakpoint[j], 0, s);
2308 2309 2310
}
EXPORT_SYMBOL_GPL(hisi_sas_init_mem);

2311
int hisi_sas_alloc(struct hisi_hba *hisi_hba)
2312
{
2313
	struct device *dev = hisi_hba->dev;
2314
	int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS;
2315 2316
	int max_command_entries_ru, sz_slot_buf_ru;
	int blk_cnt, slots_per_blk;
2317

2318
	sema_init(&hisi_hba->sem, 1);
J
John Garry 已提交
2319
	spin_lock_init(&hisi_hba->lock);
J
John Garry 已提交
2320 2321 2322 2323 2324 2325
	for (i = 0; i < hisi_hba->n_phy; i++) {
		hisi_sas_phy_init(hisi_hba, i);
		hisi_hba->port[i].port_attached = 0;
		hisi_hba->port[i].id = -1;
	}

J
John Garry 已提交
2326 2327 2328
	for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
		hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
		hisi_hba->devices[i].device_id = i;
2329
		hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT;
J
John Garry 已提交
2330 2331
	}

2332
	for (i = 0; i < hisi_hba->queue_count; i++) {
2333
		struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2334
		struct hisi_sas_dq *dq = &hisi_hba->dq[i];
2335 2336 2337 2338 2339

		/* Completion queue structure */
		cq->id = i;
		cq->hisi_hba = hisi_hba;

2340
		/* Delivery queue structure */
2341
		spin_lock_init(&dq->lock);
2342
		INIT_LIST_HEAD(&dq->list);
2343 2344 2345
		dq->id = i;
		dq->hisi_hba = hisi_hba;

2346 2347
		/* Delivery queue */
		s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
2348 2349 2350
		hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s,
						&hisi_hba->cmd_hdr_dma[i],
						GFP_KERNEL);
2351 2352 2353 2354 2355
		if (!hisi_hba->cmd_hdr[i])
			goto err_out;

		/* Completion queue */
		s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
2356 2357 2358
		hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s,
						&hisi_hba->complete_hdr_dma[i],
						GFP_KERNEL);
2359 2360 2361 2362 2363
		if (!hisi_hba->complete_hdr[i])
			goto err_out;
	}

	s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
2364
	hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma,
2365
					     GFP_KERNEL);
2366 2367 2368
	if (!hisi_hba->itct)
		goto err_out;

2369
	hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
2370 2371 2372 2373 2374
					   sizeof(struct hisi_sas_slot),
					   GFP_KERNEL);
	if (!hisi_hba->slot_info)
		goto err_out;

2375 2376
	/* roundup to avoid overly large block size */
	max_command_entries_ru = roundup(max_command_entries, 64);
2377 2378 2379 2380 2381
	if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK)
		sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table);
	else
		sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table);
	sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64);
2382
	s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE);
2383 2384
	blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
	slots_per_blk = s / sz_slot_buf_ru;
2385

2386 2387
	for (i = 0; i < blk_cnt; i++) {
		int slot_index = i * slots_per_blk;
2388 2389
		dma_addr_t buf_dma;
		void *buf;
2390

2391
		buf = dmam_alloc_coherent(dev, s, &buf_dma,
2392
					  GFP_KERNEL);
2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403
		if (!buf)
			goto err_out;

		for (j = 0; j < slots_per_blk; j++, slot_index++) {
			struct hisi_sas_slot *slot;

			slot = &hisi_hba->slot_info[slot_index];
			slot->buf = buf;
			slot->buf_dma = buf_dma;
			slot->idx = slot_index;

2404 2405
			buf += sz_slot_buf_ru;
			buf_dma += sz_slot_buf_ru;
2406 2407 2408
		}
	}

2409
	s = max_command_entries * sizeof(struct hisi_sas_iost);
2410 2411
	hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma,
					     GFP_KERNEL);
2412 2413 2414
	if (!hisi_hba->iost)
		goto err_out;

2415
	s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
2416 2417 2418
	hisi_hba->breakpoint = dmam_alloc_coherent(dev, s,
						   &hisi_hba->breakpoint_dma,
						   GFP_KERNEL);
2419 2420 2421
	if (!hisi_hba->breakpoint)
		goto err_out;

2422
	hisi_hba->slot_index_count = max_command_entries;
2423
	s = hisi_hba->slot_index_count / BITS_PER_BYTE;
J
John Garry 已提交
2424 2425 2426 2427
	hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
	if (!hisi_hba->slot_index_tags)
		goto err_out;

2428
	s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
2429 2430 2431
	hisi_hba->initial_fis = dmam_alloc_coherent(dev, s,
						    &hisi_hba->initial_fis_dma,
						    GFP_KERNEL);
2432 2433 2434
	if (!hisi_hba->initial_fis)
		goto err_out;

2435
	s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
2436 2437 2438
	hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s,
					&hisi_hba->sata_breakpoint_dma,
					GFP_KERNEL);
2439 2440 2441
	if (!hisi_hba->sata_breakpoint)
		goto err_out;

J
John Garry 已提交
2442
	hisi_sas_slot_index_init(hisi_hba);
2443
	hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT;
J
John Garry 已提交
2444

J
John Garry 已提交
2445 2446 2447 2448 2449 2450
	hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
	if (!hisi_hba->wq) {
		dev_err(dev, "sas_alloc: failed to create workqueue\n");
		goto err_out;
	}

2451 2452 2453 2454
	return 0;
err_out:
	return -ENOMEM;
}
2455
EXPORT_SYMBOL_GPL(hisi_sas_alloc);
2456

2457
void hisi_sas_free(struct hisi_hba *hisi_hba)
J
John Garry 已提交
2458
{
2459 2460 2461 2462 2463 2464 2465 2466
	int i;

	for (i = 0; i < hisi_hba->n_phy; i++) {
		struct hisi_sas_phy *phy = &hisi_hba->phy[i];

		del_timer_sync(&phy->timer);
	}

J
John Garry 已提交
2467 2468
	if (hisi_hba->wq)
		destroy_workqueue(hisi_hba->wq);
J
John Garry 已提交
2469
}
2470
EXPORT_SYMBOL_GPL(hisi_sas_free);
2471

2472
void hisi_sas_rst_work_handler(struct work_struct *work)
2473 2474 2475 2476 2477 2478
{
	struct hisi_hba *hisi_hba =
		container_of(work, struct hisi_hba, rst_work);

	hisi_sas_controller_reset(hisi_hba);
}
2479
EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
2480

2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491
void hisi_sas_sync_rst_work_handler(struct work_struct *work)
{
	struct hisi_sas_rst *rst =
		container_of(work, struct hisi_sas_rst, work);

	if (!hisi_sas_controller_reset(rst->hisi_hba))
		rst->done = true;
	complete(rst->completion);
}
EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);

2492
int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
J
John Garry 已提交
2493
{
2494 2495 2496
	struct device *dev = hisi_hba->dev;
	struct platform_device *pdev = hisi_hba->platform_dev;
	struct device_node *np = pdev ? pdev->dev.of_node : NULL;
2497
	struct clk *refclk;
J
John Garry 已提交
2498

2499
	if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
2500 2501 2502 2503
					  SAS_ADDR_SIZE)) {
		dev_err(dev, "could not get property sas-addr\n");
		return -ENOENT;
	}
J
John Garry 已提交
2504

2505
	if (np) {
2506 2507 2508 2509
		/*
		 * These properties are only required for platform device-based
		 * controller with DT firmware.
		 */
2510 2511
		hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
					"hisilicon,sas-syscon");
2512 2513 2514 2515
		if (IS_ERR(hisi_hba->ctrl)) {
			dev_err(dev, "could not get syscon\n");
			return -ENOENT;
		}
J
John Garry 已提交
2516

2517
		if (device_property_read_u32(dev, "ctrl-reset-reg",
2518
					     &hisi_hba->ctrl_reset_reg)) {
X
Xiang Chen 已提交
2519
			dev_err(dev, "could not get property ctrl-reset-reg\n");
2520 2521
			return -ENOENT;
		}
J
John Garry 已提交
2522

2523
		if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
2524
					     &hisi_hba->ctrl_reset_sts_reg)) {
X
Xiang Chen 已提交
2525
			dev_err(dev, "could not get property ctrl-reset-sts-reg\n");
2526 2527
			return -ENOENT;
		}
J
John Garry 已提交
2528

2529
		if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
2530
					     &hisi_hba->ctrl_clock_ena_reg)) {
X
Xiang Chen 已提交
2531
			dev_err(dev, "could not get property ctrl-clock-ena-reg\n");
2532 2533
			return -ENOENT;
		}
2534 2535
	}

2536
	refclk = devm_clk_get(dev, NULL);
2537
	if (IS_ERR(refclk))
2538
		dev_dbg(dev, "no ref clk property\n");
2539 2540 2541
	else
		hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;

2542 2543 2544 2545
	if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
		dev_err(dev, "could not get property phy-count\n");
		return -ENOENT;
	}
J
John Garry 已提交
2546

2547
	if (device_property_read_u32(dev, "queue-count",
2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563
				     &hisi_hba->queue_count)) {
		dev_err(dev, "could not get property queue-count\n");
		return -ENOENT;
	}

	return 0;
}
EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);

static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
					      const struct hisi_sas_hw *hw)
{
	struct resource *res;
	struct Scsi_Host *shost;
	struct hisi_hba *hisi_hba;
	struct device *dev = &pdev->dev;
2564
	int error;
2565

2566
	shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579
	if (!shost) {
		dev_err(dev, "scsi host alloc failed\n");
		return NULL;
	}
	hisi_hba = shost_priv(shost);

	INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
	hisi_hba->hw = hw;
	hisi_hba->dev = dev;
	hisi_hba->platform_dev = pdev;
	hisi_hba->shost = shost;
	SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;

2580
	timer_setup(&hisi_hba->timer, NULL, 0);
2581 2582

	if (hisi_sas_get_fw_info(hisi_hba) < 0)
J
John Garry 已提交
2583 2584
		goto err_out;

2585 2586 2587 2588 2589
	error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
	if (error)
		error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));

	if (error) {
2590 2591 2592 2593
		dev_err(dev, "No usable DMA addressing method\n");
		goto err_out;
	}

2594
	hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0);
J
John Garry 已提交
2595 2596 2597
	if (IS_ERR(hisi_hba->regs))
		goto err_out;

2598 2599 2600 2601 2602 2603 2604
	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	if (res) {
		hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
		if (IS_ERR(hisi_hba->sgpio_regs))
			goto err_out;
	}

2605
	if (hisi_sas_alloc(hisi_hba)) {
J
John Garry 已提交
2606
		hisi_sas_free(hisi_hba);
2607
		goto err_out;
J
John Garry 已提交
2608
	}
2609

J
John Garry 已提交
2610 2611
	return shost;
err_out:
2612
	scsi_host_put(shost);
J
John Garry 已提交
2613 2614 2615 2616 2617
	dev_err(dev, "shost alloc failed\n");
	return NULL;
}

int hisi_sas_probe(struct platform_device *pdev,
2618
		   const struct hisi_sas_hw *hw)
J
John Garry 已提交
2619 2620 2621 2622 2623 2624 2625 2626 2627 2628
{
	struct Scsi_Host *shost;
	struct hisi_hba *hisi_hba;
	struct device *dev = &pdev->dev;
	struct asd_sas_phy **arr_phy;
	struct asd_sas_port **arr_port;
	struct sas_ha_struct *sha;
	int rc, phy_nr, port_nr, i;

	shost = hisi_sas_shost_alloc(pdev, hw);
2629 2630
	if (!shost)
		return -ENOMEM;
J
John Garry 已提交
2631 2632 2633 2634

	sha = SHOST_TO_SAS_HA(shost);
	hisi_hba = shost_priv(shost);
	platform_set_drvdata(pdev, sha);
J
John Garry 已提交
2635

J
John Garry 已提交
2636 2637 2638 2639
	phy_nr = port_nr = hisi_hba->n_phy;

	arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
	arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
2640 2641 2642 2643
	if (!arr_phy || !arr_port) {
		rc = -ENOMEM;
		goto err_out_ha;
	}
J
John Garry 已提交
2644 2645 2646 2647 2648 2649 2650 2651 2652 2653

	sha->sas_phy = arr_phy;
	sha->sas_port = arr_port;
	sha->lldd_ha = hisi_hba;

	shost->transportt = hisi_sas_stt;
	shost->max_id = HISI_SAS_MAX_DEVICES;
	shost->max_lun = ~0;
	shost->max_channel = 1;
	shost->max_cmd_len = 16;
2654
	if (hisi_hba->hw->slot_index_alloc) {
2655 2656
		shost->can_queue = HISI_SAS_MAX_COMMANDS;
		shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS;
2657
	} else {
2658 2659
		shost->can_queue = HISI_SAS_UNRESERVED_IPTT;
		shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT;
2660
	}
J
John Garry 已提交
2661 2662

	sha->sas_ha_name = DRV_NAME;
2663
	sha->dev = hisi_hba->dev;
J
John Garry 已提交
2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681
	sha->lldd_module = THIS_MODULE;
	sha->sas_addr = &hisi_hba->sas_addr[0];
	sha->num_phys = hisi_hba->n_phy;
	sha->core.shost = hisi_hba->shost;

	for (i = 0; i < hisi_hba->n_phy; i++) {
		sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
		sha->sas_port[i] = &hisi_hba->port[i].sas_port;
	}

	rc = scsi_add_host(shost, &pdev->dev);
	if (rc)
		goto err_out_ha;

	rc = sas_register_ha(sha);
	if (rc)
		goto err_out_register_ha;

2682 2683 2684 2685
	rc = hisi_hba->hw->hw_init(hisi_hba);
	if (rc)
		goto err_out_register_ha;

J
John Garry 已提交
2686 2687 2688 2689 2690 2691 2692
	scsi_scan_host(shost);

	return 0;

err_out_register_ha:
	scsi_remove_host(shost);
err_out_ha:
2693
	hisi_sas_free(hisi_hba);
2694
	scsi_host_put(shost);
J
John Garry 已提交
2695 2696 2697 2698
	return rc;
}
EXPORT_SYMBOL_GPL(hisi_sas_probe);

J
John Garry 已提交
2699 2700 2701 2702
int hisi_sas_remove(struct platform_device *pdev)
{
	struct sas_ha_struct *sha = platform_get_drvdata(pdev);
	struct hisi_hba *hisi_hba = sha->lldd_ha;
2703
	struct Scsi_Host *shost = sha->core.shost;
J
John Garry 已提交
2704

2705 2706 2707
	if (timer_pending(&hisi_hba->timer))
		del_timer(&hisi_hba->timer);

J
John Garry 已提交
2708 2709 2710 2711
	sas_unregister_ha(sha);
	sas_remove_host(sha->core.shost);

	hisi_sas_free(hisi_hba);
2712
	scsi_host_put(shost);
J
John Garry 已提交
2713 2714 2715 2716
	return 0;
}
EXPORT_SYMBOL_GPL(hisi_sas_remove);

2717 2718 2719 2720 2721
bool hisi_sas_debugfs_enable;
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable);
module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444);
MODULE_PARM_DESC(hisi_sas_debugfs_enable, "Enable driver debugfs (default disabled)");

2722 2723 2724 2725 2726
u32 hisi_sas_debugfs_dump_count = 1;
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count);
module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444);
MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow");

2727 2728 2729
struct dentry *hisi_sas_debugfs_dir;
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dir);

2730 2731 2732 2733 2734 2735
static __init int hisi_sas_init(void)
{
	hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
	if (!hisi_sas_stt)
		return -ENOMEM;

2736
	if (hisi_sas_debugfs_enable) {
2737
		hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL);
2738 2739 2740 2741 2742
		if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) {
			pr_info("hisi_sas: Limiting debugfs dump count\n");
			hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP;
		}
	}
2743

2744 2745 2746 2747 2748 2749
	return 0;
}

static __exit void hisi_sas_exit(void)
{
	sas_release_transport(hisi_sas_stt);
2750 2751

	debugfs_remove(hisi_sas_debugfs_dir);
2752 2753 2754 2755 2756 2757 2758 2759 2760
}

module_init(hisi_sas_init);
module_exit(hisi_sas_exit);

MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
MODULE_DESCRIPTION("HISILICON SAS controller driver");
MODULE_ALIAS("platform:" DRV_NAME);