提交 1d5494e9 编写于 作者: C Cihangir Akturk 提交者: Eric Anholt

drm/vc4: switch to drm_*{get,put} helpers

drm_*_reference() and drm_*_unreference() functions are just
compatibility alias for drm_*_get() and drm_*_put() adn should not be
used by new code. So convert all users of compatibility functions to use
the new APIs.
Signed-off-by: NCihangir Akturk <cakturk@gmail.com>
Signed-off-by: NEric Anholt <eric@anholt.net>
Link: https://patchwork.freedesktop.org/patch/msgid/1501761585-11757-26-git-send-email-cakturk@gmail.comReviewed-by: NEric Anholt <eric@anholt.net>
上级 97ee731d
......@@ -366,7 +366,7 @@ int vc4_dumb_create(struct drm_file *file_priv,
return PTR_ERR(bo);
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
drm_gem_object_unreference_unlocked(&bo->base.base);
drm_gem_object_put_unlocked(&bo->base.base);
return ret;
}
......@@ -581,7 +581,7 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
return PTR_ERR(bo);
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
drm_gem_object_unreference_unlocked(&bo->base.base);
drm_gem_object_put_unlocked(&bo->base.base);
return ret;
}
......@@ -601,7 +601,7 @@ int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
/* The mmap offset was set up at BO allocation time. */
args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
drm_gem_object_unreference_unlocked(gem_obj);
drm_gem_object_put_unlocked(gem_obj);
return 0;
}
......@@ -657,7 +657,7 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
fail:
drm_gem_object_unreference_unlocked(&bo->base.base);
drm_gem_object_put_unlocked(&bo->base.base);
return ret;
}
......@@ -704,7 +704,7 @@ int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
bo = to_vc4_bo(gem_obj);
bo->t_format = t_format;
drm_gem_object_unreference_unlocked(gem_obj);
drm_gem_object_put_unlocked(gem_obj);
return 0;
}
......@@ -739,7 +739,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
else
args->modifier = DRM_FORMAT_MOD_NONE;
drm_gem_object_unreference_unlocked(gem_obj);
drm_gem_object_put_unlocked(gem_obj);
return 0;
}
......
......@@ -763,7 +763,7 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
}
drm_crtc_vblank_put(crtc);
drm_framebuffer_unreference(flip_state->fb);
drm_framebuffer_put(flip_state->fb);
kfree(flip_state);
up(&vc4->async_modeset);
......@@ -792,7 +792,7 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
if (!flip_state)
return -ENOMEM;
drm_framebuffer_reference(fb);
drm_framebuffer_get(fb);
flip_state->fb = fb;
flip_state->crtc = crtc;
flip_state->event = event;
......@@ -800,7 +800,7 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
/* Make sure all other async modesetes have landed. */
ret = down_interruptible(&vc4->async_modeset);
if (ret) {
drm_framebuffer_unreference(fb);
drm_framebuffer_put(fb);
kfree(flip_state);
return ret;
}
......
......@@ -55,7 +55,7 @@ vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
unsigned int i;
for (i = 0; i < state->user_state.bo_count; i++)
drm_gem_object_unreference_unlocked(state->bo[i]);
drm_gem_object_put_unlocked(state->bo[i]);
kfree(state);
}
......@@ -188,12 +188,12 @@ vc4_save_hang_state(struct drm_device *dev)
continue;
for (j = 0; j < exec[i]->bo_count; j++) {
drm_gem_object_reference(&exec[i]->bo[j]->base);
drm_gem_object_get(&exec[i]->bo[j]->base);
kernel_state->bo[j + prev_idx] = &exec[i]->bo[j]->base;
}
list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
drm_gem_object_reference(&bo->base.base);
drm_gem_object_get(&bo->base.base);
kernel_state->bo[j + prev_idx] = &bo->base.base;
j++;
}
......@@ -696,7 +696,7 @@ vc4_cl_lookup_bos(struct drm_device *dev,
spin_unlock(&file_priv->table_lock);
goto fail;
}
drm_gem_object_reference(bo);
drm_gem_object_get(bo);
exec->bo[i] = (struct drm_gem_cma_object *)bo;
}
spin_unlock(&file_priv->table_lock);
......@@ -834,7 +834,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
if (exec->bo) {
for (i = 0; i < exec->bo_count; i++)
drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
drm_gem_object_put_unlocked(&exec->bo[i]->base);
kvfree(exec->bo);
}
......@@ -842,7 +842,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
struct vc4_bo *bo = list_first_entry(&exec->unref_list,
struct vc4_bo, unref_head);
list_del(&bo->unref_head);
drm_gem_object_unreference_unlocked(&bo->base.base);
drm_gem_object_put_unlocked(&bo->base.base);
}
/* Free up the allocation of any bin slots we used. */
......@@ -981,7 +981,7 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
&args->timeout_ns);
drm_gem_object_unreference_unlocked(gem_obj);
drm_gem_object_put_unlocked(gem_obj);
return ret;
}
......
......@@ -184,7 +184,7 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
}
drm_gem_object_unreference_unlocked(gem_obj);
drm_gem_object_put_unlocked(gem_obj);
mode_cmd = &mode_cmd_local;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册