提交 8959d300 编写于 作者: R Robert Hancock 提交者: Jeff Garzik

sata_nv: fix ATAPI issues with memory over 4GB (v7)

This fixes some problems with ATAPI devices on nForce4 controllers in ADMA mode
on systems with memory located above 4GB. We need to delay setting the 64-bit
DMA mask until the PRD table and padding buffer are allocated so that they don't
get allocated above 4GB and break legacy mode (which is needed for ATAPI
devices). Also, if either port is in ATAPI mode we need to set the DMA mask
for the PCI device to 32-bit to ensure that the IOMMU code properly bounces
requests above 4GB, as it appears setting the bounce limit does not guarantee
that we will not try to map requests above this point.

Reported to fix https://bugzilla.redhat.com/show_bug.cgi?id=351451Signed-off-by: NRobert Hancock <hancockr@shaw.ca>
Signed-off-by: NJeff Garzik <jeff@garzik.org>
上级 8d8b6004
...@@ -247,6 +247,7 @@ struct nv_adma_port_priv { ...@@ -247,6 +247,7 @@ struct nv_adma_port_priv {
void __iomem *ctl_block; void __iomem *ctl_block;
void __iomem *gen_block; void __iomem *gen_block;
void __iomem *notifier_clear_block; void __iomem *notifier_clear_block;
u64 adma_dma_mask;
u8 flags; u8 flags;
int last_issue_ncq; int last_issue_ncq;
}; };
...@@ -715,9 +716,10 @@ static int nv_adma_slave_config(struct scsi_device *sdev) ...@@ -715,9 +716,10 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
{ {
struct ata_port *ap = ata_shost_to_port(sdev->host); struct ata_port *ap = ata_shost_to_port(sdev->host);
struct nv_adma_port_priv *pp = ap->private_data; struct nv_adma_port_priv *pp = ap->private_data;
struct nv_adma_port_priv *port0, *port1;
struct scsi_device *sdev0, *sdev1;
struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u64 bounce_limit; unsigned long segment_boundary, flags;
unsigned long segment_boundary;
unsigned short sg_tablesize; unsigned short sg_tablesize;
int rc; int rc;
int adma_enable; int adma_enable;
...@@ -729,6 +731,8 @@ static int nv_adma_slave_config(struct scsi_device *sdev) ...@@ -729,6 +731,8 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
/* Not a proper libata device, ignore */ /* Not a proper libata device, ignore */
return rc; return rc;
spin_lock_irqsave(ap->lock, flags);
if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) { if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
/* /*
* NVIDIA reports that ADMA mode does not support ATAPI commands. * NVIDIA reports that ADMA mode does not support ATAPI commands.
...@@ -737,7 +741,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev) ...@@ -737,7 +741,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
* Restrict DMA parameters as required by the legacy interface * Restrict DMA parameters as required by the legacy interface
* when an ATAPI device is connected. * when an ATAPI device is connected.
*/ */
bounce_limit = ATA_DMA_MASK;
segment_boundary = ATA_DMA_BOUNDARY; segment_boundary = ATA_DMA_BOUNDARY;
/* Subtract 1 since an extra entry may be needed for padding, see /* Subtract 1 since an extra entry may be needed for padding, see
libata-scsi.c */ libata-scsi.c */
...@@ -748,7 +751,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev) ...@@ -748,7 +751,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
adma_enable = 0; adma_enable = 0;
nv_adma_register_mode(ap); nv_adma_register_mode(ap);
} else { } else {
bounce_limit = *ap->dev->dma_mask;
segment_boundary = NV_ADMA_DMA_BOUNDARY; segment_boundary = NV_ADMA_DMA_BOUNDARY;
sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN; sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
adma_enable = 1; adma_enable = 1;
...@@ -774,12 +776,49 @@ static int nv_adma_slave_config(struct scsi_device *sdev) ...@@ -774,12 +776,49 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
if (current_reg != new_reg) if (current_reg != new_reg)
pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg); pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
blk_queue_bounce_limit(sdev->request_queue, bounce_limit); port0 = ap->host->ports[0]->private_data;
port1 = ap->host->ports[1]->private_data;
sdev0 = ap->host->ports[0]->link.device[0].sdev;
sdev1 = ap->host->ports[1]->link.device[0].sdev;
if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
(port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
/** We have to set the DMA mask to 32-bit if either port is in
ATAPI mode, since they are on the same PCI device which is
used for DMA mapping. If we set the mask we also need to set
the bounce limit on both ports to ensure that the block
layer doesn't feed addresses that cause DMA mapping to
choke. If either SCSI device is not allocated yet, it's OK
since that port will discover its correct setting when it
does get allocated.
Note: Setting 32-bit mask should not fail. */
if (sdev0)
blk_queue_bounce_limit(sdev0->request_queue,
ATA_DMA_MASK);
if (sdev1)
blk_queue_bounce_limit(sdev1->request_queue,
ATA_DMA_MASK);
pci_set_dma_mask(pdev, ATA_DMA_MASK);
} else {
/** This shouldn't fail as it was set to this value before */
pci_set_dma_mask(pdev, pp->adma_dma_mask);
if (sdev0)
blk_queue_bounce_limit(sdev0->request_queue,
pp->adma_dma_mask);
if (sdev1)
blk_queue_bounce_limit(sdev1->request_queue,
pp->adma_dma_mask);
}
blk_queue_segment_boundary(sdev->request_queue, segment_boundary); blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize); blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
ata_port_printk(ap, KERN_INFO, ata_port_printk(ap, KERN_INFO,
"bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n", "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
(unsigned long long)bounce_limit, segment_boundary, sg_tablesize); (unsigned long long)*ap->host->dev->dma_mask,
segment_boundary, sg_tablesize);
spin_unlock_irqrestore(ap->lock, flags);
return rc; return rc;
} }
...@@ -1140,10 +1179,20 @@ static int nv_adma_port_start(struct ata_port *ap) ...@@ -1140,10 +1179,20 @@ static int nv_adma_port_start(struct ata_port *ap)
void *mem; void *mem;
dma_addr_t mem_dma; dma_addr_t mem_dma;
void __iomem *mmio; void __iomem *mmio;
struct pci_dev *pdev = to_pci_dev(dev);
u16 tmp; u16 tmp;
VPRINTK("ENTER\n"); VPRINTK("ENTER\n");
/* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
pad buffers */
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc)
return rc;
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc)
return rc;
rc = ata_port_start(ap); rc = ata_port_start(ap);
if (rc) if (rc)
return rc; return rc;
...@@ -1159,6 +1208,15 @@ static int nv_adma_port_start(struct ata_port *ap) ...@@ -1159,6 +1208,15 @@ static int nv_adma_port_start(struct ata_port *ap)
pp->notifier_clear_block = pp->gen_block + pp->notifier_clear_block = pp->gen_block +
NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no); NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
/* Now that the legacy PRD and padding buffer are allocated we can
safely raise the DMA mask to allocate the CPB/APRD table.
These are allowed to fail since we store the value that ends up
being used to set as the bounce limit in slave_config later if
needed. */
pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
pp->adma_dma_mask = *dev->dma_mask;
mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
&mem_dma, GFP_KERNEL); &mem_dma, GFP_KERNEL);
if (!mem) if (!mem)
...@@ -2417,12 +2475,6 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -2417,12 +2475,6 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
hpriv->type = type; hpriv->type = type;
host->private_data = hpriv; host->private_data = hpriv;
/* set 64bit dma masks, may fail */
if (type == ADMA) {
if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0)
pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
}
/* request and iomap NV_MMIO_BAR */ /* request and iomap NV_MMIO_BAR */
rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME); rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
if (rc) if (rc)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册