提交 944fc36c 编写于 作者: R Rob Clark

drm/msm: use upstream iommu

Downstream kernel IOMMU had a non-standard way of dealing with multiple
devices and multiple ports/contexts.  We don't need that on upstream
kernel, so rip out the crazy.

Note that we have to move the pinning of the ringbuffer to after the
IOMMU is attached.  No idea how that managed to work properly on the
downstream kernel.

For now, I am leaving the IOMMU port name stuff in place, to simplify
things for folks trying to backport latest drm/msm to device kernels.
Once we no longer have to care about pre-DT kernels, we can drop this
and instead backport upstream IOMMU driver.
Signed-off-by: NRob Clark <robdclark@gmail.com>
上级 1c4997fe
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
config DRM_MSM config DRM_MSM
tristate "MSM DRM" tristate "MSM DRM"
depends on DRM depends on DRM
depends on MSM_IOMMU
depends on ARCH_QCOM || (ARM && COMPILE_TEST) depends on ARCH_QCOM || (ARM && COMPILE_TEST)
select DRM_KMS_HELPER select DRM_KMS_HELPER
select SHMEM select SHMEM
......
...@@ -91,9 +91,17 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) ...@@ -91,9 +91,17 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
int adreno_hw_init(struct msm_gpu *gpu) int adreno_hw_init(struct msm_gpu *gpu)
{ {
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int ret;
DBG("%s", gpu->name); DBG("%s", gpu->name);
ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
if (ret) {
gpu->rb_iova = 0;
dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
return ret;
}
/* Setup REG_CP_RB_CNTL: */ /* Setup REG_CP_RB_CNTL: */
gpu_write(gpu, REG_AXXX_CP_RB_CNTL, gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
/* size is log2(quad-words): */ /* size is log2(quad-words): */
......
...@@ -361,7 +361,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) ...@@ -361,7 +361,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdelay(16); mdelay(16);
if (config->iommu) { if (config->iommu) {
mmu = msm_iommu_new(dev, config->iommu); mmu = msm_iommu_new(&pdev->dev, config->iommu);
if (IS_ERR(mmu)) { if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu); ret = PTR_ERR(mmu);
goto fail; goto fail;
......
...@@ -320,7 +320,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) ...@@ -320,7 +320,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdelay(16); mdelay(16);
if (config->iommu) { if (config->iommu) {
mmu = msm_iommu_new(dev, config->iommu); mmu = msm_iommu_new(&pdev->dev, config->iommu);
if (IS_ERR(mmu)) { if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu); ret = PTR_ERR(mmu);
dev_err(dev->dev, "failed to init iommu: %d\n", ret); dev_err(dev->dev, "failed to init iommu: %d\n", ret);
......
...@@ -606,7 +606,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, ...@@ -606,7 +606,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
iommu = iommu_domain_alloc(&platform_bus_type); iommu = iommu_domain_alloc(&platform_bus_type);
if (iommu) { if (iommu) {
dev_info(drm->dev, "%s: using IOMMU\n", name); dev_info(drm->dev, "%s: using IOMMU\n", name);
gpu->mmu = msm_iommu_new(drm, iommu); gpu->mmu = msm_iommu_new(&pdev->dev, iommu);
} else { } else {
dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
} }
...@@ -621,13 +621,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, ...@@ -621,13 +621,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
goto fail; goto fail;
} }
ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
if (ret) {
gpu->rb_iova = 0;
dev_err(drm->dev, "could not map ringbuffer: %d\n", ret);
goto fail;
}
bs_init(gpu); bs_init(gpu);
return 0; return 0;
......
...@@ -33,39 +33,14 @@ static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev, ...@@ -33,39 +33,14 @@ static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt) static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
{ {
struct drm_device *dev = mmu->dev;
struct msm_iommu *iommu = to_msm_iommu(mmu); struct msm_iommu *iommu = to_msm_iommu(mmu);
int i, ret; return iommu_attach_device(iommu->domain, mmu->dev);
for (i = 0; i < cnt; i++) {
struct device *msm_iommu_get_ctx(const char *ctx_name);
struct device *ctx = msm_iommu_get_ctx(names[i]);
if (IS_ERR_OR_NULL(ctx)) {
dev_warn(dev->dev, "couldn't get %s context", names[i]);
continue;
}
ret = iommu_attach_device(iommu->domain, ctx);
if (ret) {
dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
return ret;
}
}
return 0;
} }
static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt) static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
{ {
struct msm_iommu *iommu = to_msm_iommu(mmu); struct msm_iommu *iommu = to_msm_iommu(mmu);
int i; iommu_detach_device(iommu->domain, mmu->dev);
for (i = 0; i < cnt; i++) {
struct device *msm_iommu_get_ctx(const char *ctx_name);
struct device *ctx = msm_iommu_get_ctx(names[i]);
if (IS_ERR_OR_NULL(ctx))
continue;
iommu_detach_device(iommu->domain, ctx);
}
} }
static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova, static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
...@@ -149,7 +124,7 @@ static const struct msm_mmu_funcs funcs = { ...@@ -149,7 +124,7 @@ static const struct msm_mmu_funcs funcs = {
.destroy = msm_iommu_destroy, .destroy = msm_iommu_destroy,
}; };
struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain) struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
{ {
struct msm_iommu *iommu; struct msm_iommu *iommu;
......
...@@ -32,17 +32,17 @@ struct msm_mmu_funcs { ...@@ -32,17 +32,17 @@ struct msm_mmu_funcs {
struct msm_mmu { struct msm_mmu {
const struct msm_mmu_funcs *funcs; const struct msm_mmu_funcs *funcs;
struct drm_device *dev; struct device *dev;
}; };
static inline void msm_mmu_init(struct msm_mmu *mmu, struct drm_device *dev, static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
const struct msm_mmu_funcs *funcs) const struct msm_mmu_funcs *funcs)
{ {
mmu->dev = dev; mmu->dev = dev;
mmu->funcs = funcs; mmu->funcs = funcs;
} }
struct msm_mmu *msm_iommu_new(struct drm_device *dev, struct iommu_domain *domain); struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
struct msm_mmu *msm_gpummu_new(struct drm_device *dev, struct msm_gpu *gpu); struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
#endif /* __MSM_MMU_H__ */ #endif /* __MSM_MMU_H__ */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册