提交 63d12cc3 编写于 作者: L Linus Torvalds

Merge tag 'dma-mapping-5.18-1' of git://git.infradead.org/users/hch/dma-mapping

Pull more dma-mapping updates from Christoph Hellwig:

 - fix a regression in dma remap handling vs AMD memory encryption (me)

 - finally kill off the legacy PCI DMA API (Christophe JAILLET)

* tag 'dma-mapping-5.18-1' of git://git.infradead.org/users/hch/dma-mapping:
  dma-mapping: move pgprot_decrypted out of dma_pgprot
  PCI/doc: cleanup references to the legacy PCI DMA API
  PCI: Remove the deprecated "pci-dma-compat.h" API
......@@ -278,20 +278,20 @@ appropriate parameters. In general this allows more efficient DMA
on systems where System RAM exists above 4G _physical_ address.
Drivers for all PCI-X and PCIe compliant devices must call
pci_set_dma_mask() as they are 64-bit DMA devices.
set_dma_mask() as they are 64-bit DMA devices.
Similarly, drivers must also "register" this capability if the device
can directly address "consistent memory" in System RAM above 4G physical
address by calling pci_set_consistent_dma_mask().
can directly address "coherent memory" in System RAM above 4G physical
address by calling dma_set_coherent_mask().
Again, this includes drivers for all PCI-X and PCIe compliant devices.
Many 64-bit "PCI" devices (before PCI-X) and some PCI-X devices are
64-bit DMA capable for payload ("streaming") data but not control
("consistent") data.
("coherent") data.
Setup shared control data
-------------------------
Once the DMA masks are set, the driver can allocate "consistent" (a.k.a. shared)
Once the DMA masks are set, the driver can allocate "coherent" (a.k.a. shared)
memory. See Documentation/core-api/dma-api.rst for a full description of
the DMA APIs. This section is just a reminder that it needs to be done
before enabling DMA on the device.
......@@ -367,7 +367,7 @@ steps need to be performed:
- Disable the device from generating IRQs
- Release the IRQ (free_irq())
- Stop all DMA activity
- Release DMA buffers (both streaming and consistent)
- Release DMA buffers (both streaming and coherent)
- Unregister from other subsystems (e.g. scsi or netdev)
- Disable device from responding to MMIO/IO Port addresses
- Release MMIO/IO Port resource(s)
......@@ -420,7 +420,7 @@ Once DMA is stopped, clean up streaming DMA first.
I.e. unmap data buffers and return buffers to "upstream"
owners if there is one.
Then clean up "consistent" buffers which contain the control data.
Then clean up "coherent" buffers which contain the control data.
See Documentation/core-api/dma-api.rst for details on unmapping interfaces.
......
/* SPDX-License-Identifier: GPL-2.0 */
/* include this file if the platform implements the dma_ DMA Mapping API
* and wants to provide the pci_ DMA Mapping API in terms of it */
#ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H
#define _ASM_GENERIC_PCI_DMA_COMPAT_H
#include <linux/dma-mapping.h>
/* This defines the direction arg to the DMA mapping routines. */
#define PCI_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL
#define PCI_DMA_TODEVICE DMA_TO_DEVICE
#define PCI_DMA_FROMDEVICE DMA_FROM_DEVICE
#define PCI_DMA_NONE DMA_NONE
static inline void *
pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
{
return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
}
static inline void *
pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
{
return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC);
}
static inline void
pci_free_consistent(struct pci_dev *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
dma_free_coherent(&hwdev->dev, size, vaddr, dma_handle);
}
static inline dma_addr_t
pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
{
return dma_map_single(&hwdev->dev, ptr, size, (enum dma_data_direction)direction);
}
static inline void
pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
size_t size, int direction)
{
dma_unmap_single(&hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
}
static inline dma_addr_t
pci_map_page(struct pci_dev *hwdev, struct page *page,
unsigned long offset, size_t size, int direction)
{
return dma_map_page(&hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
}
static inline void
pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
size_t size, int direction)
{
dma_unmap_page(&hwdev->dev, dma_address, size, (enum dma_data_direction)direction);
}
static inline int
pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
{
return dma_map_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction);
}
static inline void
pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
{
dma_unmap_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction);
}
static inline void
pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
dma_sync_single_for_cpu(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
}
static inline void
pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
dma_sync_single_for_device(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
}
static inline void
pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
dma_sync_sg_for_cpu(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
}
static inline void
pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
dma_sync_sg_for_device(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
}
static inline int
pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
{
return dma_mapping_error(&pdev->dev, dma_addr);
}
#ifdef CONFIG_PCI
static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
{
return dma_set_mask(&dev->dev, mask);
}
static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
{
return dma_set_coherent_mask(&dev->dev, mask);
}
#else
static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
{ return -EIO; }
static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
{ return -EIO; }
#endif
#endif
......@@ -2473,8 +2473,7 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
#endif
/* Provide the legacy pci_dma_* API */
#include <linux/pci-dma-compat.h>
#include <linux/dma-mapping.h>
#define pci_printk(level, pdev, fmt, arg...) \
dev_printk(level, &(pdev)->dev, fmt, ##arg)
......
......@@ -277,12 +277,16 @@ void *dma_direct_alloc(struct device *dev, size_t size,
}
if (remap) {
pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
if (force_dma_unencrypted(dev))
prot = pgprot_decrypted(prot);
/* remove any dirty cache lines on the kernel alias */
arch_dma_prep_coherent(page, size);
/* create a coherent mapping */
ret = dma_common_contiguous_remap(page, size,
dma_pgprot(dev, PAGE_KERNEL, attrs),
ret = dma_common_contiguous_remap(page, size, prot,
__builtin_return_address(0));
if (!ret)
goto out_free_pages;
......@@ -535,6 +539,8 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
int ret = -ENXIO;
vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
if (force_dma_unencrypted(dev))
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
......
......@@ -407,8 +407,6 @@ EXPORT_SYMBOL(dma_get_sgtable_attrs);
*/
pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
{
if (force_dma_unencrypted(dev))
prot = pgprot_decrypted(prot);
if (dev_is_dma_coherent(dev))
return prot;
#ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册