提交 17d0fbf4 编写于 作者: L Linus Torvalds

Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI fixes from James Bottomley:
 "Four fixes, three for edge conditions which don't occur very often.
  The lpfc fix mitigates memory exhaustion for some high CPU systems"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi:
  scsi: lpfc: Mitigate high memory pre-allocation by SCSI-MQ
  scsi: ufs: Fix NULL pointer dereference in ufshcd_config_vreg_hpm()
  scsi: target: tcmu: avoid use-after-free after command timeout
  scsi: qla2xxx: Fix gnl.l memory leak on adapter init failure
......@@ -824,6 +824,7 @@ struct lpfc_hba {
uint32_t cfg_cq_poll_threshold;
uint32_t cfg_cq_max_proc_limit;
uint32_t cfg_fcp_cpu_map;
uint32_t cfg_fcp_mq_threshold;
uint32_t cfg_hdw_queue;
uint32_t cfg_irq_chann;
uint32_t cfg_suppress_rsp;
......
......@@ -5708,6 +5708,19 @@ LPFC_ATTR_RW(nvme_oas, 0, 0, 1,
LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
"Embed NVME Command in WQE");
/*
* lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues
* the driver will advertise it supports to the SCSI layer.
*
* 0 = Set nr_hw_queues by the number of CPUs or HW queues.
* 1,128 = Manually specify the maximum nr_hw_queue value to be set,
*
* Value range is [0,128]. Default value is 8.
*/
LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
"Set the number of SCSI Queues advertised");
/*
* lpfc_hdw_queue: Set the number of Hardware Queues the driver
* will advertise it supports to the NVME and SCSI layers. This also
......@@ -6030,6 +6043,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_cq_poll_threshold,
&dev_attr_lpfc_cq_max_proc_limit,
&dev_attr_lpfc_fcp_cpu_map,
&dev_attr_lpfc_fcp_mq_threshold,
&dev_attr_lpfc_hdw_queue,
&dev_attr_lpfc_irq_chann,
&dev_attr_lpfc_suppress_rsp,
......@@ -7112,6 +7126,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
/* Initialize first burst. Target vs Initiator are different. */
lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size);
lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold);
lpfc_hdw_queue_init(phba, lpfc_hdw_queue);
lpfc_irq_chann_init(phba, lpfc_irq_chann);
lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr);
......
......@@ -4309,10 +4309,12 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
shost->max_cmd_len = 16;
if (phba->sli_rev == LPFC_SLI_REV4) {
if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)
shost->nr_hw_queues = phba->cfg_hdw_queue;
else
shost->nr_hw_queues = phba->sli4_hba.num_present_cpu;
if (!phba->cfg_fcp_mq_threshold ||
phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(),
phba->cfg_fcp_mq_threshold);
shost->dma_boundary =
phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
......
......@@ -44,6 +44,11 @@
#define LPFC_HBA_HDWQ_MAX 128
#define LPFC_HBA_HDWQ_DEF 0
/* FCP MQ queue count limiting */
#define LPFC_FCP_MQ_THRESHOLD_MIN 0
#define LPFC_FCP_MQ_THRESHOLD_MAX 128
#define LPFC_FCP_MQ_THRESHOLD_DEF 8
/* Common buffer size to accomidate SCSI and NVME IO buffers */
#define LPFC_COMMON_IO_BUF_SZ 768
......
......@@ -2956,6 +2956,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
vha->gnl.ldma);
vha->gnl.l = NULL;
vfree(vha->scan.l);
if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
......
......@@ -3440,6 +3440,12 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
probe_failed:
if (base_vha->gnl.l) {
dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
base_vha->gnl.l, base_vha->gnl.ldma);
base_vha->gnl.l = NULL;
}
if (base_vha->timer_active)
qla2x00_stop_timer(base_vha);
base_vha->flags.online = 0;
......@@ -3673,7 +3679,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
if (!atomic_read(&pdev->enable_cnt)) {
dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size,
base_vha->gnl.l, base_vha->gnl.ldma);
base_vha->gnl.l = NULL;
scsi_host_put(base_vha->host);
kfree(ha);
pci_set_drvdata(pdev, NULL);
......@@ -3713,6 +3719,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
dma_free_coherent(&ha->pdev->dev,
base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);
base_vha->gnl.l = NULL;
vfree(base_vha->scan.l);
if (IS_QLAFX00(ha))
......@@ -4816,6 +4824,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
"Alloc failed for scan database.\n");
dma_free_coherent(&ha->pdev->dev, vha->gnl.size,
vha->gnl.l, vha->gnl.ldma);
vha->gnl.l = NULL;
scsi_remove_host(vha->host);
return NULL;
}
......
......@@ -7062,6 +7062,9 @@ static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
struct ufs_vreg *vreg)
{
if (!vreg)
return 0;
return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
}
......
......@@ -1132,14 +1132,16 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
struct se_cmd *se_cmd = cmd->se_cmd;
struct tcmu_dev *udev = cmd->tcmu_dev;
bool read_len_valid = false;
uint32_t read_len = se_cmd->data_length;
uint32_t read_len;
/*
* cmd has been completed already from timeout, just reclaim
* data area space and free cmd
*/
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
WARN_ON_ONCE(se_cmd);
goto out;
}
list_del_init(&cmd->queue_entry);
......@@ -1152,6 +1154,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
goto done;
}
read_len = se_cmd->data_length;
if (se_cmd->data_direction == DMA_FROM_DEVICE &&
(entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
read_len_valid = true;
......@@ -1307,6 +1310,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
*/
scsi_status = SAM_STAT_CHECK_CONDITION;
list_del_init(&cmd->queue_entry);
cmd->se_cmd = NULL;
} else {
list_del_init(&cmd->queue_entry);
idr_remove(&udev->commands, id);
......@@ -2022,6 +2026,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
idr_remove(&udev->commands, i);
if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
WARN_ON(!cmd->se_cmd);
list_del_init(&cmd->queue_entry);
if (err_level == 1) {
/*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册