提交 6d7c2b4c 编写于 作者: L Linus Torvalds

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
  drm/radeon/kms: Make GPU/CPU page size handling consistent in blit code (v2)
  drm/radeon/kms: fix typo in r100_blit_copy
  drm/radeon: Unreference GEM object outside of spinlock in page flip error path.
  drm/radeon: Don't read from CP ring write pointer registers.
  drm/ttm: request zeroed system memory pages for new TT buffer objects
...@@ -1404,7 +1404,8 @@ int evergreen_cp_resume(struct radeon_device *rdev) ...@@ -1404,7 +1404,8 @@ int evergreen_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */ /* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0); WREG32(CP_RB_RPTR_WR, 0);
WREG32(CP_RB_WPTR, 0); rdev->cp.wptr = 0;
WREG32(CP_RB_WPTR, rdev->cp.wptr);
/* set the wb address wether it's enabled or not */ /* set the wb address wether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR, WREG32(CP_RB_RPTR_ADDR,
...@@ -1426,7 +1427,6 @@ int evergreen_cp_resume(struct radeon_device *rdev) ...@@ -1426,7 +1427,6 @@ int evergreen_cp_resume(struct radeon_device *rdev)
WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
rdev->cp.rptr = RREG32(CP_RB_RPTR); rdev->cp.rptr = RREG32(CP_RB_RPTR);
rdev->cp.wptr = RREG32(CP_RB_WPTR);
evergreen_cp_start(rdev); evergreen_cp_start(rdev);
rdev->cp.ready = true; rdev->cp.ready = true;
...@@ -3171,21 +3171,23 @@ int evergreen_suspend(struct radeon_device *rdev) ...@@ -3171,21 +3171,23 @@ int evergreen_suspend(struct radeon_device *rdev)
} }
int evergreen_copy_blit(struct radeon_device *rdev, int evergreen_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset,
unsigned num_pages, struct radeon_fence *fence) uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence *fence)
{ {
int r; int r;
mutex_lock(&rdev->r600_blit.mutex); mutex_lock(&rdev->r600_blit.mutex);
rdev->r600_blit.vb_ib = NULL; rdev->r600_blit.vb_ib = NULL;
r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
if (r) { if (r) {
if (rdev->r600_blit.vb_ib) if (rdev->r600_blit.vb_ib)
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
mutex_unlock(&rdev->r600_blit.mutex); mutex_unlock(&rdev->r600_blit.mutex);
return r; return r;
} }
evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
evergreen_blit_done_copy(rdev, fence); evergreen_blit_done_copy(rdev, fence);
mutex_unlock(&rdev->r600_blit.mutex); mutex_unlock(&rdev->r600_blit.mutex);
return 0; return 0;
......
...@@ -1187,7 +1187,8 @@ int cayman_cp_resume(struct radeon_device *rdev) ...@@ -1187,7 +1187,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */ /* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA); WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB0_WPTR, 0); rdev->cp.wptr = 0;
WREG32(CP_RB0_WPTR, rdev->cp.wptr);
/* set the wb address wether it's enabled or not */ /* set the wb address wether it's enabled or not */
WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
...@@ -1207,7 +1208,6 @@ int cayman_cp_resume(struct radeon_device *rdev) ...@@ -1207,7 +1208,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8); WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);
rdev->cp.rptr = RREG32(CP_RB0_RPTR); rdev->cp.rptr = RREG32(CP_RB0_RPTR);
rdev->cp.wptr = RREG32(CP_RB0_WPTR);
/* ring1 - compute only */ /* ring1 - compute only */
/* Set ring buffer size */ /* Set ring buffer size */
...@@ -1220,7 +1220,8 @@ int cayman_cp_resume(struct radeon_device *rdev) ...@@ -1220,7 +1220,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */ /* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA); WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB1_WPTR, 0); rdev->cp1.wptr = 0;
WREG32(CP_RB1_WPTR, rdev->cp1.wptr);
/* set the wb address wether it's enabled or not */ /* set the wb address wether it's enabled or not */
WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
...@@ -1232,7 +1233,6 @@ int cayman_cp_resume(struct radeon_device *rdev) ...@@ -1232,7 +1233,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8); WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);
rdev->cp1.rptr = RREG32(CP_RB1_RPTR); rdev->cp1.rptr = RREG32(CP_RB1_RPTR);
rdev->cp1.wptr = RREG32(CP_RB1_WPTR);
/* ring2 - compute only */ /* ring2 - compute only */
/* Set ring buffer size */ /* Set ring buffer size */
...@@ -1245,7 +1245,8 @@ int cayman_cp_resume(struct radeon_device *rdev) ...@@ -1245,7 +1245,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */ /* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA); WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB2_WPTR, 0); rdev->cp2.wptr = 0;
WREG32(CP_RB2_WPTR, rdev->cp2.wptr);
/* set the wb address wether it's enabled or not */ /* set the wb address wether it's enabled or not */
WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
...@@ -1257,7 +1258,6 @@ int cayman_cp_resume(struct radeon_device *rdev) ...@@ -1257,7 +1258,6 @@ int cayman_cp_resume(struct radeon_device *rdev)
WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8); WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);
rdev->cp2.rptr = RREG32(CP_RB2_RPTR); rdev->cp2.rptr = RREG32(CP_RB2_RPTR);
rdev->cp2.wptr = RREG32(CP_RB2_WPTR);
/* start the rings */ /* start the rings */
cayman_cp_start(rdev); cayman_cp_start(rdev);
......
...@@ -721,11 +721,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev, ...@@ -721,11 +721,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
int r100_copy_blit(struct radeon_device *rdev, int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_pages, unsigned num_gpu_pages,
struct radeon_fence *fence) struct radeon_fence *fence)
{ {
uint32_t cur_pages; uint32_t cur_pages;
uint32_t stride_bytes = PAGE_SIZE; uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
uint32_t pitch; uint32_t pitch;
uint32_t stride_pixels; uint32_t stride_pixels;
unsigned ndw; unsigned ndw;
...@@ -737,7 +737,7 @@ int r100_copy_blit(struct radeon_device *rdev, ...@@ -737,7 +737,7 @@ int r100_copy_blit(struct radeon_device *rdev,
/* radeon pitch is /64 */ /* radeon pitch is /64 */
pitch = stride_bytes / 64; pitch = stride_bytes / 64;
stride_pixels = stride_bytes / 4; stride_pixels = stride_bytes / 4;
num_loops = DIV_ROUND_UP(num_pages, 8191); num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
/* Ask for enough room for blit + flush + fence */ /* Ask for enough room for blit + flush + fence */
ndw = 64 + (10 * num_loops); ndw = 64 + (10 * num_loops);
...@@ -746,12 +746,12 @@ int r100_copy_blit(struct radeon_device *rdev, ...@@ -746,12 +746,12 @@ int r100_copy_blit(struct radeon_device *rdev,
DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw); DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
return -EINVAL; return -EINVAL;
} }
while (num_pages > 0) { while (num_gpu_pages > 0) {
cur_pages = num_pages; cur_pages = num_gpu_pages;
if (cur_pages > 8191) { if (cur_pages > 8191) {
cur_pages = 8191; cur_pages = 8191;
} }
num_pages -= cur_pages; num_gpu_pages -= cur_pages;
/* pages are in Y direction - height /* pages are in Y direction - height
page width in X direction - width */ page width in X direction - width */
...@@ -773,8 +773,8 @@ int r100_copy_blit(struct radeon_device *rdev, ...@@ -773,8 +773,8 @@ int r100_copy_blit(struct radeon_device *rdev,
radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
radeon_ring_write(rdev, 0); radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
radeon_ring_write(rdev, num_pages); radeon_ring_write(rdev, cur_pages);
radeon_ring_write(rdev, num_pages); radeon_ring_write(rdev, cur_pages);
radeon_ring_write(rdev, cur_pages | (stride_pixels << 16)); radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
} }
radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
...@@ -990,7 +990,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) ...@@ -990,7 +990,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
/* Force read & write ptr to 0 */ /* Force read & write ptr to 0 */
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE); WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
WREG32(RADEON_CP_RB_RPTR_WR, 0); WREG32(RADEON_CP_RB_RPTR_WR, 0);
WREG32(RADEON_CP_RB_WPTR, 0); rdev->cp.wptr = 0;
WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
/* set the wb address whether it's enabled or not */ /* set the wb address whether it's enabled or not */
WREG32(R_00070C_CP_RB_RPTR_ADDR, WREG32(R_00070C_CP_RB_RPTR_ADDR,
...@@ -1007,9 +1008,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) ...@@ -1007,9 +1008,6 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(RADEON_CP_RB_CNTL, tmp); WREG32(RADEON_CP_RB_CNTL, tmp);
udelay(10); udelay(10);
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
/* protect against crazy HW on resume */
rdev->cp.wptr &= rdev->cp.ptr_mask;
/* Set cp mode to bus mastering & enable cp*/ /* Set cp mode to bus mastering & enable cp*/
WREG32(RADEON_CP_CSQ_MODE, WREG32(RADEON_CP_CSQ_MODE,
REG_SET(RADEON_INDIRECT2_START, indirect2_start) | REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
......
...@@ -84,7 +84,7 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0) ...@@ -84,7 +84,7 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
int r200_copy_dma(struct radeon_device *rdev, int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_pages, unsigned num_gpu_pages,
struct radeon_fence *fence) struct radeon_fence *fence)
{ {
uint32_t size; uint32_t size;
...@@ -93,7 +93,7 @@ int r200_copy_dma(struct radeon_device *rdev, ...@@ -93,7 +93,7 @@ int r200_copy_dma(struct radeon_device *rdev,
int r = 0; int r = 0;
/* radeon pitch is /64 */ /* radeon pitch is /64 */
size = num_pages << PAGE_SHIFT; size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
num_loops = DIV_ROUND_UP(size, 0x1FFFFF); num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
r = radeon_ring_lock(rdev, num_loops * 4 + 64); r = radeon_ring_lock(rdev, num_loops * 4 + 64);
if (r) { if (r) {
......
...@@ -2209,7 +2209,8 @@ int r600_cp_resume(struct radeon_device *rdev) ...@@ -2209,7 +2209,8 @@ int r600_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */ /* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA); WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0); WREG32(CP_RB_RPTR_WR, 0);
WREG32(CP_RB_WPTR, 0); rdev->cp.wptr = 0;
WREG32(CP_RB_WPTR, rdev->cp.wptr);
/* set the wb address whether it's enabled or not */ /* set the wb address whether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR, WREG32(CP_RB_RPTR_ADDR,
...@@ -2231,7 +2232,6 @@ int r600_cp_resume(struct radeon_device *rdev) ...@@ -2231,7 +2232,6 @@ int r600_cp_resume(struct radeon_device *rdev)
WREG32(CP_DEBUG, (1 << 27) | (1 << 28)); WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
rdev->cp.rptr = RREG32(CP_RB_RPTR); rdev->cp.rptr = RREG32(CP_RB_RPTR);
rdev->cp.wptr = RREG32(CP_RB_WPTR);
r600_cp_start(rdev); r600_cp_start(rdev);
rdev->cp.ready = true; rdev->cp.ready = true;
...@@ -2353,21 +2353,23 @@ void r600_fence_ring_emit(struct radeon_device *rdev, ...@@ -2353,21 +2353,23 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
} }
int r600_copy_blit(struct radeon_device *rdev, int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset,
unsigned num_pages, struct radeon_fence *fence) uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence *fence)
{ {
int r; int r;
mutex_lock(&rdev->r600_blit.mutex); mutex_lock(&rdev->r600_blit.mutex);
rdev->r600_blit.vb_ib = NULL; rdev->r600_blit.vb_ib = NULL;
r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
if (r) { if (r) {
if (rdev->r600_blit.vb_ib) if (rdev->r600_blit.vb_ib)
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
mutex_unlock(&rdev->r600_blit.mutex); mutex_unlock(&rdev->r600_blit.mutex);
return r; return r;
} }
r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
r600_blit_done_copy(rdev, fence); r600_blit_done_copy(rdev, fence);
mutex_unlock(&rdev->r600_blit.mutex); mutex_unlock(&rdev->r600_blit.mutex);
return 0; return 0;
......
...@@ -322,6 +322,7 @@ union radeon_gart_table { ...@@ -322,6 +322,7 @@ union radeon_gart_table {
#define RADEON_GPU_PAGE_SIZE 4096 #define RADEON_GPU_PAGE_SIZE 4096
#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
#define RADEON_GPU_PAGE_SHIFT 12
struct radeon_gart { struct radeon_gart {
dma_addr_t table_addr; dma_addr_t table_addr;
...@@ -914,17 +915,17 @@ struct radeon_asic { ...@@ -914,17 +915,17 @@ struct radeon_asic {
int (*copy_blit)(struct radeon_device *rdev, int (*copy_blit)(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_pages, unsigned num_gpu_pages,
struct radeon_fence *fence); struct radeon_fence *fence);
int (*copy_dma)(struct radeon_device *rdev, int (*copy_dma)(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_pages, unsigned num_gpu_pages,
struct radeon_fence *fence); struct radeon_fence *fence);
int (*copy)(struct radeon_device *rdev, int (*copy)(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_pages, unsigned num_gpu_pages,
struct radeon_fence *fence); struct radeon_fence *fence);
uint32_t (*get_engine_clock)(struct radeon_device *rdev); uint32_t (*get_engine_clock)(struct radeon_device *rdev);
void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
......
...@@ -75,7 +75,7 @@ uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg); ...@@ -75,7 +75,7 @@ uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
int r100_copy_blit(struct radeon_device *rdev, int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_pages, unsigned num_gpu_pages,
struct radeon_fence *fence); struct radeon_fence *fence);
int r100_set_surface_reg(struct radeon_device *rdev, int reg, int r100_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch, uint32_t tiling_flags, uint32_t pitch,
...@@ -143,7 +143,7 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc); ...@@ -143,7 +143,7 @@ extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
extern int r200_copy_dma(struct radeon_device *rdev, extern int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t src_offset,
uint64_t dst_offset, uint64_t dst_offset,
unsigned num_pages, unsigned num_gpu_pages,
struct radeon_fence *fence); struct radeon_fence *fence);
void r200_set_safe_registers(struct radeon_device *rdev); void r200_set_safe_registers(struct radeon_device *rdev);
...@@ -311,7 +311,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); ...@@ -311,7 +311,7 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev); int r600_ring_test(struct radeon_device *rdev);
int r600_copy_blit(struct radeon_device *rdev, int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence); unsigned num_gpu_pages, struct radeon_fence *fence);
void r600_hpd_init(struct radeon_device *rdev); void r600_hpd_init(struct radeon_device *rdev);
void r600_hpd_fini(struct radeon_device *rdev); void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
...@@ -403,7 +403,7 @@ void evergreen_bandwidth_update(struct radeon_device *rdev); ...@@ -403,7 +403,7 @@ void evergreen_bandwidth_update(struct radeon_device *rdev);
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int evergreen_copy_blit(struct radeon_device *rdev, int evergreen_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence); unsigned num_gpu_pages, struct radeon_fence *fence);
void evergreen_hpd_init(struct radeon_device *rdev); void evergreen_hpd_init(struct radeon_device *rdev);
void evergreen_hpd_fini(struct radeon_device *rdev); void evergreen_hpd_fini(struct radeon_device *rdev);
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
......
...@@ -473,8 +473,8 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, ...@@ -473,8 +473,8 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
spin_lock_irqsave(&dev->event_lock, flags); spin_lock_irqsave(&dev->event_lock, flags);
radeon_crtc->unpin_work = NULL; radeon_crtc->unpin_work = NULL;
unlock_free: unlock_free:
drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
spin_unlock_irqrestore(&dev->event_lock, flags); spin_unlock_irqrestore(&dev->event_lock, flags);
drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
radeon_fence_unref(&work->fence); radeon_fence_unref(&work->fence);
kfree(work); kfree(work);
......
...@@ -277,7 +277,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, ...@@ -277,7 +277,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
DRM_ERROR("Trying to move memory with CP turned off.\n"); DRM_ERROR("Trying to move memory with CP turned off.\n");
return -EINVAL; return -EINVAL;
} }
r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
r = radeon_copy(rdev, old_start, new_start,
new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
fence);
/* FIXME: handle copy error */ /* FIXME: handle copy error */
r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
evict, no_wait_reserve, no_wait_gpu, new_mem); evict, no_wait_reserve, no_wait_gpu, new_mem);
......
...@@ -394,7 +394,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, ...@@ -394,7 +394,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
if (bo->ttm == NULL) { if (bo->ttm == NULL) {
ret = ttm_bo_add_ttm(bo, false); bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
ret = ttm_bo_add_ttm(bo, zero);
if (ret) if (ret)
goto out_err; goto out_err;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册