提交 b6197b93 编写于 作者: S Suthikulpanit, Suravee 提交者: Rafael J. Wysocki

arm64 : Introduce support for ACPI _CCA object

section 6.2.17 _CCA states that ARM platforms require ACPI _CCA
object to be specified for DMA-cabpable devices. Therefore, this patch
specifies ACPI_CCA_REQUIRED in arm64 Kconfig.

In addition, to handle the case when _CCA is missing, arm64 would assign
dummy_dma_ops to disable DMA capability of the device.
Acked-by: NCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: NMark Salter <msalter@redhat.com>
Signed-off-by: NSuravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
Signed-off-by: NRafael J. Wysocki <rafael.j.wysocki@intel.com>
上级 d0562674
config ARM64 config ARM64
def_bool y def_bool y
select ACPI_CCA_REQUIRED if ACPI
select ACPI_GENERIC_GSI if ACPI select ACPI_GENERIC_GSI if ACPI
select ACPI_REDUCED_HARDWARE_ONLY if ACPI select ACPI_REDUCED_HARDWARE_ONLY if ACPI
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/acpi.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
...@@ -28,13 +29,23 @@ ...@@ -28,13 +29,23 @@
#define DMA_ERROR_CODE (~(dma_addr_t)0) #define DMA_ERROR_CODE (~(dma_addr_t)0)
extern struct dma_map_ops *dma_ops; extern struct dma_map_ops *dma_ops;
extern struct dma_map_ops dummy_dma_ops;
static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
{ {
if (unlikely(!dev) || !dev->archdata.dma_ops) if (unlikely(!dev))
return dma_ops; return dma_ops;
else else if (dev->archdata.dma_ops)
return dev->archdata.dma_ops; return dev->archdata.dma_ops;
else if (acpi_disabled)
return dma_ops;
/*
* When ACPI is enabled, if arch_set_dma_ops is not called,
* we will disable device DMA capability by setting it
* to dummy_dma_ops.
*/
return &dummy_dma_ops;
} }
static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline struct dma_map_ops *get_dma_ops(struct device *dev)
...@@ -48,6 +59,9 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) ...@@ -48,6 +59,9 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu, bool coherent) struct iommu_ops *iommu, bool coherent)
{ {
if (!acpi_disabled && !dev->archdata.dma_ops)
dev->archdata.dma_ops = dma_ops;
dev->archdata.dma_coherent = coherent; dev->archdata.dma_coherent = coherent;
} }
#define arch_setup_dma_ops arch_setup_dma_ops #define arch_setup_dma_ops arch_setup_dma_ops
......
...@@ -414,6 +414,98 @@ static int __init atomic_pool_init(void) ...@@ -414,6 +414,98 @@ static int __init atomic_pool_init(void)
return -ENOMEM; return -ENOMEM;
} }
/********************************************
* The following APIs are for dummy DMA ops *
********************************************/
static void *__dummy_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{
return NULL;
}
static void __dummy_free(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
}
static int __dummy_mmap(struct device *dev,
struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
struct dma_attrs *attrs)
{
return -ENXIO;
}
static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
return DMA_ERROR_CODE;
}
static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
}
static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
return 0;
}
static void __dummy_unmap_sg(struct device *dev,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
}
static void __dummy_sync_single(struct device *dev,
dma_addr_t dev_addr, size_t size,
enum dma_data_direction dir)
{
}
static void __dummy_sync_sg(struct device *dev,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir)
{
}
static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
{
return 1;
}
static int __dummy_dma_supported(struct device *hwdev, u64 mask)
{
return 0;
}
struct dma_map_ops dummy_dma_ops = {
.alloc = __dummy_alloc,
.free = __dummy_free,
.mmap = __dummy_mmap,
.map_page = __dummy_map_page,
.unmap_page = __dummy_unmap_page,
.map_sg = __dummy_map_sg,
.unmap_sg = __dummy_unmap_sg,
.sync_single_for_cpu = __dummy_sync_single,
.sync_single_for_device = __dummy_sync_single,
.sync_sg_for_cpu = __dummy_sync_sg,
.sync_sg_for_device = __dummy_sync_sg,
.mapping_error = __dummy_mapping_error,
.dma_supported = __dummy_dma_supported,
};
EXPORT_SYMBOL(dummy_dma_ops);
static int __init arm64_dma_init(void) static int __init arm64_dma_init(void)
{ {
int ret; int ret;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册