diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index b223c8d0a491b821428d7b422e1f546c53aa123a..5d84a66fed3638144917124a02c68dfd420a2baa 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -458,7 +458,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) trace_dma_fence_emit(&release->base); driver = bdev->driver; - glob = bo->glob; + glob = bdev->glob; spin_lock(&glob->lru_lock); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 2bde37291e3eab8442d1566a89daacff1b7c82ac..fe4aef6b1a7a0750f1654373eef836e113cc3402 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -149,7 +149,7 @@ static void ttm_bo_release_list(struct kref *list_kref) BUG_ON(!list_empty(&bo->lru)); BUG_ON(!list_empty(&bo->ddestroy)); ttm_tt_destroy(bo->ttm); - atomic_dec(&bo->glob->bo_count); + atomic_dec(&bo->bdev->glob->bo_count); dma_fence_put(bo->moving); reservation_object_fini(&bo->ttm_resv); mutex_destroy(&bo->wu_mutex); @@ -174,7 +174,7 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) if (bo->ttm && !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) { list_add_tail(&bo->swap, - &bo->glob->swap_lru[bo->priority]); + &bdev->glob->swap_lru[bo->priority]); kref_get(&bo->list_kref); } } @@ -205,9 +205,11 @@ void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) { - spin_lock(&bo->glob->lru_lock); + struct ttm_bo_global *glob = bo->bdev->glob; + + spin_lock(&glob->lru_lock); ttm_bo_del_from_lru(bo); - spin_unlock(&bo->glob->lru_lock); + spin_unlock(&glob->lru_lock); } EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); @@ -226,7 +228,7 @@ EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_bo_global *glob = bo->glob; + struct ttm_bo_global *glob = bdev->glob; int ret = 0; uint32_t page_flags = 0; @@ -429,7 +431,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_bo_global *glob = bo->glob; + struct ttm_bo_global *glob = bdev->glob; int ret; ret = ttm_bo_individualize_resv(bo); @@ -500,7 +502,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool interruptible, bool no_wait_gpu, bool unlock_resv) { - struct ttm_bo_global *glob = bo->glob; + struct ttm_bo_global *glob = bo->bdev->glob; struct reservation_object *resv; int ret; @@ -1191,7 +1193,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, INIT_LIST_HEAD(&bo->io_reserve_lru); mutex_init(&bo->wu_mutex); bo->bdev = bdev; - bo->glob = bdev->glob; bo->type = type; bo->num_pages = num_pages; bo->mem.size = num_pages << PAGE_SHIFT; @@ -1213,7 +1214,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, bo->resv = &bo->ttm_resv; } reservation_object_init(&bo->ttm_resv); - atomic_inc(&bo->glob->bo_count); + atomic_inc(&bo->bdev->glob->bo_count); drm_vma_node_reset(&bo->vma_node); bo->priority = 0; @@ -1246,9 +1247,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, } if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { - spin_lock(&bo->glob->lru_lock); + spin_lock(&bdev->glob->lru_lock); ttm_bo_add_to_lru(bo); - spin_unlock(&bo->glob->lru_lock); + spin_unlock(&bdev->glob->lru_lock); } return ret; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 38da6903cae9e6c53bb889ae6ce2a649c20005cf..6d6a3f46143be30f404ebbcd383ffe6bce03ffb3 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -470,7 +470,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, * TODO: Explicit member copy would probably be better here. */ - atomic_inc(&bo->glob->bo_count); + atomic_inc(&bo->bdev->glob->bo_count); INIT_LIST_HEAD(&fbo->ddestroy); INIT_LIST_HEAD(&fbo->lru); INIT_LIST_HEAD(&fbo->swap); diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index fa44f7b152859ae8e001204163ff0de9883a8200..3dca206e85f709314dcc36e11db01d6e2b20afd9 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -62,7 +62,7 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, return; entry = list_first_entry(list, struct ttm_validate_buffer, head); - glob = entry->bo->glob; + glob = entry->bo->bdev->glob; spin_lock(&glob->lru_lock); list_for_each_entry(entry, list, head) { @@ -102,7 +102,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, return 0; entry = list_first_entry(list, struct ttm_validate_buffer, head); - glob = entry->bo->glob; + glob = entry->bo->bdev->glob; if (ticket) ww_acquire_init(ticket, &reservation_ww_class); @@ -194,7 +194,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo; bdev = bo->bdev; driver = bdev->driver; - glob = bo->glob; + glob = bo->bdev->glob; spin_lock(&glob->lru_lock); diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 21426395820c749c9d04bb3964f11947c96cbb1f..a9e0640849d860e4a7fcdd10bbd6cff8224813bd 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -41,6 +41,8 @@ #include #include +struct ttm_bo_global; + struct ttm_bo_device; struct drm_mm_node; @@ -169,7 +171,6 @@ struct ttm_buffer_object { * Members constant at init. */ - struct ttm_bo_global *glob; struct ttm_bo_device *bdev; enum ttm_bo_type type; void (*destroy) (struct ttm_buffer_object *); diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 2bac25a6cf9017f33818f89f1a001952dbb32e37..738bb8d35c44112d576fef09ecc73939ffdfa4ba 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -956,9 +956,9 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) { if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { - spin_lock(&bo->glob->lru_lock); + spin_lock(&bo->bdev->glob->lru_lock); ttm_bo_add_to_lru(bo); - spin_unlock(&bo->glob->lru_lock); + spin_unlock(&bo->bdev->glob->lru_lock); } reservation_object_unlock(bo->resv); }