提交 8bdcd949 编写于 作者: R Rob Clark

drm/msm: pass address-space to _get_iova() and friends

No functional change, that will come later.  But this will make it
easier to deal with dynamically created address spaces (ie. per-
process pagetables for gpu).
Signed-off-by: NRob Clark <robdclark@gmail.com>
上级 f59f62d5
...@@ -308,7 +308,7 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu, ...@@ -308,7 +308,7 @@ static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
} }
if (iova) { if (iova) {
int ret = msm_gem_get_iova_locked(bo, gpu->id, iova); int ret = msm_gem_get_iova_locked(bo, gpu->aspace, iova);
if (ret) { if (ret) {
drm_gem_object_unreference(bo); drm_gem_object_unreference(bo);
...@@ -696,19 +696,19 @@ static void a5xx_destroy(struct msm_gpu *gpu) ...@@ -696,19 +696,19 @@ static void a5xx_destroy(struct msm_gpu *gpu)
if (a5xx_gpu->pm4_bo) { if (a5xx_gpu->pm4_bo) {
if (a5xx_gpu->pm4_iova) if (a5xx_gpu->pm4_iova)
msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->id); msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo); drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo);
} }
if (a5xx_gpu->pfp_bo) { if (a5xx_gpu->pfp_bo) {
if (a5xx_gpu->pfp_iova) if (a5xx_gpu->pfp_iova)
msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->id); msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo); drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo);
} }
if (a5xx_gpu->gpmu_bo) { if (a5xx_gpu->gpmu_bo) {
if (a5xx_gpu->gpmu_iova) if (a5xx_gpu->gpmu_iova)
msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id); msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo); drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
} }
......
...@@ -298,7 +298,8 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) ...@@ -298,7 +298,8 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
if (IS_ERR(a5xx_gpu->gpmu_bo)) if (IS_ERR(a5xx_gpu->gpmu_bo))
goto err; goto err;
if (msm_gem_get_iova_locked(a5xx_gpu->gpmu_bo, gpu->id, &a5xx_gpu->gpmu_iova)) if (msm_gem_get_iova_locked(a5xx_gpu->gpmu_bo, gpu->aspace,
&a5xx_gpu->gpmu_iova))
goto err; goto err;
ptr = msm_gem_get_vaddr_locked(a5xx_gpu->gpmu_bo); ptr = msm_gem_get_vaddr_locked(a5xx_gpu->gpmu_bo);
...@@ -327,7 +328,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu) ...@@ -327,7 +328,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
err: err:
if (a5xx_gpu->gpmu_iova) if (a5xx_gpu->gpmu_iova)
msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id); msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
if (a5xx_gpu->gpmu_bo) if (a5xx_gpu->gpmu_bo)
drm_gem_object_unreference(a5xx_gpu->gpmu_bo); drm_gem_object_unreference(a5xx_gpu->gpmu_bo);
......
...@@ -64,7 +64,7 @@ int adreno_hw_init(struct msm_gpu *gpu) ...@@ -64,7 +64,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
DBG("%s", gpu->name); DBG("%s", gpu->name);
ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova); ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->aspace, &gpu->rb_iova);
if (ret) { if (ret) {
gpu->rb_iova = 0; gpu->rb_iova = 0;
dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret); dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
...@@ -414,7 +414,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, ...@@ -414,7 +414,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return -ENOMEM; return -ENOMEM;
} }
ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->id, ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace,
&adreno_gpu->memptrs_iova); &adreno_gpu->memptrs_iova);
if (ret) { if (ret) {
dev_err(drm->dev, "could not map memptrs: %d\n", ret); dev_err(drm->dev, "could not map memptrs: %d\n", ret);
...@@ -433,7 +433,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) ...@@ -433,7 +433,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
msm_gem_put_vaddr(adreno_gpu->memptrs_bo); msm_gem_put_vaddr(adreno_gpu->memptrs_bo);
if (adreno_gpu->memptrs_iova) if (adreno_gpu->memptrs_iova)
msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->id); msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->aspace);
drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo); drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo);
} }
......
...@@ -994,7 +994,7 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size) ...@@ -994,7 +994,7 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
} }
ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj,
priv->kms->id, &iova); priv->kms->aspace, &iova);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
if (ret) { if (ret) {
pr_err("%s: failed to get iova, %d\n", __func__, ret); pr_err("%s: failed to get iova, %d\n", __func__, ret);
...@@ -1152,7 +1152,7 @@ static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len) ...@@ -1152,7 +1152,7 @@ static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) { if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
ret = msm_gem_get_iova(msm_host->tx_gem_obj, ret = msm_gem_get_iova(msm_host->tx_gem_obj,
priv->kms->id, &dma_base); priv->kms->aspace, &dma_base);
if (ret) { if (ret) {
pr_err("%s: failed to get iova: %d\n", __func__, ret); pr_err("%s: failed to get iova: %d\n", __func__, ret);
return ret; return ret;
......
...@@ -128,7 +128,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val) ...@@ -128,7 +128,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base); struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
struct msm_kms *kms = &mdp4_kms->base.base; struct msm_kms *kms = &mdp4_kms->base.base;
msm_gem_put_iova(val, kms->id); msm_gem_put_iova(val, kms->aspace);
drm_gem_object_unreference_unlocked(val); drm_gem_object_unreference_unlocked(val);
} }
...@@ -374,7 +374,7 @@ static void update_cursor(struct drm_crtc *crtc) ...@@ -374,7 +374,7 @@ static void update_cursor(struct drm_crtc *crtc)
if (next_bo) { if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */ /* take a obj ref + iova ref when we start scanning out: */
drm_gem_object_reference(next_bo); drm_gem_object_reference(next_bo);
msm_gem_get_iova_locked(next_bo, kms->id, &iova); msm_gem_get_iova_locked(next_bo, kms->aspace, &iova);
/* enable cursor: */ /* enable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma), mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
...@@ -432,7 +432,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc, ...@@ -432,7 +432,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
} }
if (cursor_bo) { if (cursor_bo) {
ret = msm_gem_get_iova(cursor_bo, kms->id, &iova); ret = msm_gem_get_iova(cursor_bo, kms->aspace, &iova);
if (ret) if (ret)
goto fail; goto fail;
} else { } else {
......
...@@ -163,7 +163,7 @@ static void mdp4_destroy(struct msm_kms *kms) ...@@ -163,7 +163,7 @@ static void mdp4_destroy(struct msm_kms *kms)
struct msm_gem_address_space *aspace = kms->aspace; struct msm_gem_address_space *aspace = kms->aspace;
if (mdp4_kms->blank_cursor_iova) if (mdp4_kms->blank_cursor_iova)
msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->id); msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo); drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
if (aspace) { if (aspace) {
...@@ -545,7 +545,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) ...@@ -545,7 +545,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail; goto fail;
} }
ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, kms->id, ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
&mdp4_kms->blank_cursor_iova); &mdp4_kms->blank_cursor_iova);
if (ret) { if (ret) {
dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret); dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
......
...@@ -110,7 +110,7 @@ static int mdp4_plane_prepare_fb(struct drm_plane *plane, ...@@ -110,7 +110,7 @@ static int mdp4_plane_prepare_fb(struct drm_plane *plane,
return 0; return 0;
DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id); DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
return msm_framebuffer_prepare(fb, kms->id); return msm_framebuffer_prepare(fb, kms->aspace);
} }
static void mdp4_plane_cleanup_fb(struct drm_plane *plane, static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
...@@ -125,7 +125,7 @@ static void mdp4_plane_cleanup_fb(struct drm_plane *plane, ...@@ -125,7 +125,7 @@ static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
return; return;
DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id); DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
msm_framebuffer_cleanup(fb, kms->id); msm_framebuffer_cleanup(fb, kms->aspace);
} }
...@@ -175,13 +175,13 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane, ...@@ -175,13 +175,13 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe),
msm_framebuffer_iova(fb, kms->id, 0)); msm_framebuffer_iova(fb, kms->aspace, 0));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe), mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe),
msm_framebuffer_iova(fb, kms->id, 1)); msm_framebuffer_iova(fb, kms->aspace, 1));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe), mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe),
msm_framebuffer_iova(fb, kms->id, 2)); msm_framebuffer_iova(fb, kms->aspace, 2));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe), mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
msm_framebuffer_iova(fb, kms->id, 3)); msm_framebuffer_iova(fb, kms->aspace, 3));
plane->fb = fb; plane->fb = fb;
} }
......
...@@ -162,7 +162,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val) ...@@ -162,7 +162,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base); struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
struct msm_kms *kms = &mdp5_kms->base.base; struct msm_kms *kms = &mdp5_kms->base.base;
msm_gem_put_iova(val, kms->id); msm_gem_put_iova(val, kms->aspace);
drm_gem_object_unreference_unlocked(val); drm_gem_object_unreference_unlocked(val);
} }
...@@ -760,7 +760,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, ...@@ -760,7 +760,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!cursor_bo) if (!cursor_bo)
return -ENOENT; return -ENOENT;
ret = msm_gem_get_iova(cursor_bo, kms->id, &cursor_addr); ret = msm_gem_get_iova(cursor_bo, kms->aspace, &cursor_addr);
if (ret) if (ret)
return -EINVAL; return -EINVAL;
......
...@@ -279,7 +279,7 @@ static int mdp5_plane_prepare_fb(struct drm_plane *plane, ...@@ -279,7 +279,7 @@ static int mdp5_plane_prepare_fb(struct drm_plane *plane,
return 0; return 0;
DBG("%s: prepare: FB[%u]", plane->name, fb->base.id); DBG("%s: prepare: FB[%u]", plane->name, fb->base.id);
return msm_framebuffer_prepare(fb, kms->id); return msm_framebuffer_prepare(fb, kms->aspace);
} }
static void mdp5_plane_cleanup_fb(struct drm_plane *plane, static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
...@@ -293,7 +293,7 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane, ...@@ -293,7 +293,7 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
return; return;
DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id); DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id);
msm_framebuffer_cleanup(fb, kms->id); msm_framebuffer_cleanup(fb, kms->aspace);
} }
#define FRAC_16_16(mult, div) (((mult) << 16) / (div)) #define FRAC_16_16(mult, div) (((mult) << 16) / (div))
...@@ -511,13 +511,13 @@ static void set_scanout_locked(struct mdp5_kms *mdp5_kms, ...@@ -511,13 +511,13 @@ static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
msm_framebuffer_iova(fb, kms->id, 0)); msm_framebuffer_iova(fb, kms->aspace, 0));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
msm_framebuffer_iova(fb, kms->id, 1)); msm_framebuffer_iova(fb, kms->aspace, 1));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
msm_framebuffer_iova(fb, kms->id, 2)); msm_framebuffer_iova(fb, kms->aspace, 2));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
msm_framebuffer_iova(fb, kms->id, 3)); msm_framebuffer_iova(fb, kms->aspace, 3));
} }
/* Note: mdp5_plane->pipe_lock must be locked */ /* Note: mdp5_plane->pipe_lock must be locked */
......
...@@ -51,6 +51,7 @@ static const struct drm_mode_config_funcs mode_config_funcs = { ...@@ -51,6 +51,7 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
.atomic_state_free = msm_atomic_state_free, .atomic_state_free = msm_atomic_state_free,
}; };
#include "msm_gem.h" /* temporary */
int msm_register_address_space(struct drm_device *dev, int msm_register_address_space(struct drm_device *dev,
struct msm_gem_address_space *aspace) struct msm_gem_address_space *aspace)
{ {
...@@ -61,7 +62,9 @@ int msm_register_address_space(struct drm_device *dev, ...@@ -61,7 +62,9 @@ int msm_register_address_space(struct drm_device *dev,
priv->aspace[priv->num_aspaces] = aspace; priv->aspace[priv->num_aspaces] = aspace;
return priv->num_aspaces++; aspace->id = priv->num_aspaces++;
return aspace->id;
} }
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
...@@ -707,7 +710,7 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev, ...@@ -707,7 +710,7 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
if (!priv->gpu) if (!priv->gpu)
return -EINVAL; return -EINVAL;
return msm_gem_get_iova(obj, priv->gpu->id, iova); return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
} }
static int msm_ioctl_gem_info(struct drm_device *dev, void *data, static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
......
...@@ -209,13 +209,16 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj, ...@@ -209,13 +209,16 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int msm_gem_fault(struct vm_fault *vmf); int msm_gem_fault(struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, int msm_gem_get_iova_locked(struct drm_gem_object *obj,
uint64_t *iova); struct msm_gem_address_space *aspace, uint64_t *iova);
int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova); int msm_gem_get_iova(struct drm_gem_object *obj,
uint64_t msm_gem_iova(struct drm_gem_object *obj, int id); struct msm_gem_address_space *aspace, uint64_t *iova);
uint64_t msm_gem_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
struct page **msm_gem_get_pages(struct drm_gem_object *obj); struct page **msm_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj); void msm_gem_put_pages(struct drm_gem_object *obj);
void msm_gem_put_iova(struct drm_gem_object *obj, int id); void msm_gem_put_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args); struct drm_mode_create_dumb *args);
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
...@@ -251,9 +254,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, ...@@ -251,9 +254,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
struct drm_gem_object *msm_gem_import(struct drm_device *dev, struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt); struct dma_buf *dmabuf, struct sg_table *sgt);
int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id); int msm_framebuffer_prepare(struct drm_framebuffer *fb,
void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id); struct msm_gem_address_space *aspace);
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane); void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace);
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace, int plane);
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane); struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb); const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
......
...@@ -84,14 +84,15 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m) ...@@ -84,14 +84,15 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
* should be fine, since only the scanout (mdpN) side of things needs * should be fine, since only the scanout (mdpN) side of things needs
* this, the gpu doesn't care about fb's. * this, the gpu doesn't care about fb's.
*/ */
int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id) int msm_framebuffer_prepare(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace)
{ {
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int ret, i, n = fb->format->num_planes; int ret, i, n = fb->format->num_planes;
uint64_t iova; uint64_t iova;
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova); ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret); DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
if (ret) if (ret)
return ret; return ret;
...@@ -100,21 +101,23 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id) ...@@ -100,21 +101,23 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
return 0; return 0;
} }
void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id) void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace)
{ {
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = fb->format->num_planes; int i, n = fb->format->num_planes;
for (i = 0; i < n; i++) for (i = 0; i < n; i++)
msm_gem_put_iova(msm_fb->planes[i], id); msm_gem_put_iova(msm_fb->planes[i], aspace);
} }
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane) uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace, int plane)
{ {
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
if (!msm_fb->planes[plane]) if (!msm_fb->planes[plane])
return 0; return 0;
return msm_gem_iova(msm_fb->planes[plane], id) + fb->offsets[plane]; return msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
} }
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane) struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
......
...@@ -126,7 +126,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, ...@@ -126,7 +126,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
* in panic (ie. lock-safe, etc) we could avoid pinning the * in panic (ie. lock-safe, etc) we could avoid pinning the
* buffer now: * buffer now:
*/ */
ret = msm_gem_get_iova_locked(fbdev->bo, priv->kms->id, &paddr); ret = msm_gem_get_iova_locked(fbdev->bo, priv->kms->aspace, &paddr);
if (ret) { if (ret) {
dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret); dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
goto fail_unlock; goto fail_unlock;
......
...@@ -308,10 +308,11 @@ put_iova(struct drm_gem_object *obj) ...@@ -308,10 +308,11 @@ put_iova(struct drm_gem_object *obj)
* That means when I do eventually need to add support for unpinning * That means when I do eventually need to add support for unpinning
* the refcnt counter needs to be atomic_t. * the refcnt counter needs to be atomic_t.
*/ */
int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, int msm_gem_get_iova_locked(struct drm_gem_object *obj,
uint64_t *iova) struct msm_gem_address_space *aspace, uint64_t *iova)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
int id = aspace ? aspace->id : 0;
int ret = 0; int ret = 0;
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
...@@ -338,9 +339,11 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, ...@@ -338,9 +339,11 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
} }
/* get iova, taking a reference. Should have a matching put */ /* get iova, taking a reference. Should have a matching put */
int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova) int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
int id = aspace ? aspace->id : 0;
int ret; int ret;
/* this is safe right now because we don't unmap until the /* this is safe right now because we don't unmap until the
...@@ -353,7 +356,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova) ...@@ -353,7 +356,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova)
} }
mutex_lock(&obj->dev->struct_mutex); mutex_lock(&obj->dev->struct_mutex);
ret = msm_gem_get_iova_locked(obj, id, iova); ret = msm_gem_get_iova_locked(obj, aspace, iova);
mutex_unlock(&obj->dev->struct_mutex); mutex_unlock(&obj->dev->struct_mutex);
return ret; return ret;
} }
...@@ -361,14 +364,17 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova) ...@@ -361,14 +364,17 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova)
/* get iova without taking a reference, used in places where you have /* get iova without taking a reference, used in places where you have
* already done a 'msm_gem_get_iova()'. * already done a 'msm_gem_get_iova()'.
*/ */
uint64_t msm_gem_iova(struct drm_gem_object *obj, int id) uint64_t msm_gem_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{ {
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
int id = aspace ? aspace->id : 0;
WARN_ON(!msm_obj->domain[id].iova); WARN_ON(!msm_obj->domain[id].iova);
return msm_obj->domain[id].iova; return msm_obj->domain[id].iova;
} }
void msm_gem_put_iova(struct drm_gem_object *obj, int id) void msm_gem_put_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{ {
// XXX TODO .. // XXX TODO ..
// NOTE: probably don't need a _locked() version.. we wouldn't // NOTE: probably don't need a _locked() version.. we wouldn't
......
...@@ -33,6 +33,7 @@ struct msm_gem_address_space { ...@@ -33,6 +33,7 @@ struct msm_gem_address_space {
struct drm_mm mm; struct drm_mm mm;
struct msm_mmu *mmu; struct msm_mmu *mmu;
struct kref kref; struct kref kref;
int id; /* temporary */
}; };
struct msm_gem_vma { struct msm_gem_vma {
......
...@@ -158,7 +158,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i) ...@@ -158,7 +158,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
struct msm_gem_object *msm_obj = submit->bos[i].obj; struct msm_gem_object *msm_obj = submit->bos[i].obj;
if (submit->bos[i].flags & BO_PINNED) if (submit->bos[i].flags & BO_PINNED)
msm_gem_put_iova(&msm_obj->base, submit->gpu->id); msm_gem_put_iova(&msm_obj->base, submit->gpu->aspace);
if (submit->bos[i].flags & BO_LOCKED) if (submit->bos[i].flags & BO_LOCKED)
ww_mutex_unlock(&msm_obj->resv->lock); ww_mutex_unlock(&msm_obj->resv->lock);
...@@ -246,7 +246,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit) ...@@ -246,7 +246,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
/* if locking succeeded, pin bo: */ /* if locking succeeded, pin bo: */
ret = msm_gem_get_iova_locked(&msm_obj->base, ret = msm_gem_get_iova_locked(&msm_obj->base,
submit->gpu->id, &iova); submit->gpu->aspace, &iova);
if (ret) if (ret)
break; break;
......
...@@ -416,7 +416,7 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) ...@@ -416,7 +416,7 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct msm_gem_object *msm_obj = submit->bos[i].obj; struct msm_gem_object *msm_obj = submit->bos[i].obj;
/* move to inactive: */ /* move to inactive: */
msm_gem_move_to_inactive(&msm_obj->base); msm_gem_move_to_inactive(&msm_obj->base);
msm_gem_put_iova(&msm_obj->base, gpu->id); msm_gem_put_iova(&msm_obj->base, gpu->aspace);
drm_gem_object_unreference(&msm_obj->base); drm_gem_object_unreference(&msm_obj->base);
} }
...@@ -498,7 +498,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, ...@@ -498,7 +498,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
/* submit takes a reference to the bo and iova until retired: */ /* submit takes a reference to the bo and iova until retired: */
drm_gem_object_reference(&msm_obj->base); drm_gem_object_reference(&msm_obj->base);
msm_gem_get_iova_locked(&msm_obj->base, msm_gem_get_iova_locked(&msm_obj->base,
submit->gpu->id, &iova); submit->gpu->aspace, &iova);
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
...@@ -694,7 +694,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) ...@@ -694,7 +694,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
if (gpu->rb) { if (gpu->rb) {
if (gpu->rb_iova) if (gpu->rb_iova)
msm_gem_put_iova(gpu->rb->bo, gpu->id); msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
msm_ringbuffer_destroy(gpu->rb); msm_ringbuffer_destroy(gpu->rb);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册