提交 ded5107e 编写于 作者: D Dave Airlie

Merge branch 'drm-next-3.13' of git://people.freedesktop.org/~agd5f/linux into drm-fixes

More fixes for radeon.  This adds new queries for tiling on CIK, and
fixes a crash in handling acpi atif backlight events on CIK.
Some fixes for radeon for 3.13.  Mostly CI stability fixes.  I think
I've tracked down the stability problems with dpm on Trinity/Richland,
so I'm going to enable that by default now.

* 'drm-next-3.13' of git://people.freedesktop.org/~agd5f/linux:
  drm/radeon: hook up backlight functions for CI and KV family.
  drm/radeon/cik: Add macrotile mode array query
  drm/radeon/cik: Return backend map information to userspace
  drm/radeon: enable DPM by default in TN asics
  drm/radeon: adjust TN dpm parameters for stability (v2)
  drm/radeon: use a single doorbell for cik kms compute
  drm/radeon/vm: don't attempt to update ptes if ib allocation fails
  drm/radeon: disable CIK CP semaphores for now
  drm/radeon: allow semaphore emission to fail
  drm/radeon: add semaphore trace point
  radeon: workaround pinning failure on low ram gpu
  radeon/i2c: do not count reg index in number of i2c byte we are writing.
  drm/radeon: cypress_dpm: Fix unused variable warning when CONFIG_ACPI=n
  drm: radeon: ni_dpm: Fix unused variable warning when CONFIG_ACPI=n
...@@ -56,8 +56,10 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, ...@@ -56,8 +56,10 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
return -EINVAL; return -EINVAL;
} }
args.ucRegIndex = buf[0]; args.ucRegIndex = buf[0];
if (num > 1) if (num > 1) {
memcpy(&out, &buf[1], num - 1); num--;
memcpy(&out, &buf[1], num);
}
args.lpI2CDataOut = cpu_to_le16(out); args.lpI2CDataOut = cpu_to_le16(out);
} else { } else {
if (num > ATOM_MAX_HW_I2C_READ) { if (num > ATOM_MAX_HW_I2C_READ) {
......
...@@ -1560,17 +1560,17 @@ u32 cik_get_xclk(struct radeon_device *rdev) ...@@ -1560,17 +1560,17 @@ u32 cik_get_xclk(struct radeon_device *rdev)
* cik_mm_rdoorbell - read a doorbell dword * cik_mm_rdoorbell - read a doorbell dword
* *
* @rdev: radeon_device pointer * @rdev: radeon_device pointer
* @offset: byte offset into the aperture * @index: doorbell index
* *
* Returns the value in the doorbell aperture at the * Returns the value in the doorbell aperture at the
* requested offset (CIK). * requested doorbell index (CIK).
*/ */
u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset) u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index)
{ {
if (offset < rdev->doorbell.size) { if (index < rdev->doorbell.num_doorbells) {
return readl(((void __iomem *)rdev->doorbell.ptr) + offset); return readl(rdev->doorbell.ptr + index);
} else { } else {
DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", offset); DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
return 0; return 0;
} }
} }
...@@ -1579,18 +1579,18 @@ u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset) ...@@ -1579,18 +1579,18 @@ u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset)
* cik_mm_wdoorbell - write a doorbell dword * cik_mm_wdoorbell - write a doorbell dword
* *
* @rdev: radeon_device pointer * @rdev: radeon_device pointer
* @offset: byte offset into the aperture * @index: doorbell index
* @v: value to write * @v: value to write
* *
* Writes @v to the doorbell aperture at the * Writes @v to the doorbell aperture at the
* requested offset (CIK). * requested doorbell index (CIK).
*/ */
void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v) void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v)
{ {
if (offset < rdev->doorbell.size) { if (index < rdev->doorbell.num_doorbells) {
writel(v, ((void __iomem *)rdev->doorbell.ptr) + offset); writel(v, rdev->doorbell.ptr + index);
} else { } else {
DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", offset); DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
} }
} }
...@@ -2427,6 +2427,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) ...@@ -2427,6 +2427,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0; gb_tile_moden = 0;
break; break;
} }
rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
} }
} else if (num_pipe_configs == 4) { } else if (num_pipe_configs == 4) {
...@@ -2773,6 +2774,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) ...@@ -2773,6 +2774,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0; gb_tile_moden = 0;
break; break;
} }
rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
} }
} else if (num_pipe_configs == 2) { } else if (num_pipe_configs == 2) {
...@@ -2990,6 +2992,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev) ...@@ -2990,6 +2992,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0; gb_tile_moden = 0;
break; break;
} }
rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
} }
} else } else
...@@ -3556,17 +3559,24 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev, ...@@ -3556,17 +3559,24 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, 0); radeon_ring_write(ring, 0);
} }
void cik_semaphore_ring_emit(struct radeon_device *rdev, bool cik_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring, struct radeon_ring *ring,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
bool emit_wait) bool emit_wait)
{ {
/* TODO: figure out why semaphore cause lockups */
#if 0
uint64_t addr = semaphore->gpu_addr; uint64_t addr = semaphore->gpu_addr;
unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL; unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
radeon_ring_write(ring, addr & 0xffffffff); radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel);
return true;
#else
return false;
#endif
} }
/** /**
...@@ -3609,13 +3619,8 @@ int cik_copy_cpdma(struct radeon_device *rdev, ...@@ -3609,13 +3619,8 @@ int cik_copy_cpdma(struct radeon_device *rdev,
return r; return r;
} }
if (radeon_fence_need_sync(*fence, ring->idx)) { radeon_semaphore_sync_to(sem, *fence);
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, radeon_semaphore_sync_rings(rdev, sem, ring->idx);
ring->idx);
radeon_fence_note_sync(*fence, ring->idx);
} else {
radeon_semaphore_free(rdev, &sem, NULL);
}
for (i = 0; i < num_loops; i++) { for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes; cur_size_in_bytes = size_in_bytes;
...@@ -4052,7 +4057,7 @@ void cik_compute_ring_set_wptr(struct radeon_device *rdev, ...@@ -4052,7 +4057,7 @@ void cik_compute_ring_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring) struct radeon_ring *ring)
{ {
rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr); rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr);
WDOORBELL32(ring->doorbell_offset, ring->wptr); WDOORBELL32(ring->doorbell_index, ring->wptr);
} }
/** /**
...@@ -4393,10 +4398,6 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) ...@@ -4393,10 +4398,6 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
return r; return r;
} }
/* doorbell offset */
rdev->ring[idx].doorbell_offset =
(rdev->ring[idx].doorbell_page_num * PAGE_SIZE) + 0;
/* init the mqd struct */ /* init the mqd struct */
memset(buf, 0, sizeof(struct bonaire_mqd)); memset(buf, 0, sizeof(struct bonaire_mqd));
...@@ -4508,7 +4509,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) ...@@ -4508,7 +4509,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
RREG32(CP_HQD_PQ_DOORBELL_CONTROL); RREG32(CP_HQD_PQ_DOORBELL_CONTROL);
mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_OFFSET_MASK; mqd->queue_state.cp_hqd_pq_doorbell_control &= ~DOORBELL_OFFSET_MASK;
mqd->queue_state.cp_hqd_pq_doorbell_control |= mqd->queue_state.cp_hqd_pq_doorbell_control |=
DOORBELL_OFFSET(rdev->ring[idx].doorbell_offset / 4); DOORBELL_OFFSET(rdev->ring[idx].doorbell_index);
mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN; mqd->queue_state.cp_hqd_pq_doorbell_control |= DOORBELL_EN;
mqd->queue_state.cp_hqd_pq_doorbell_control &= mqd->queue_state.cp_hqd_pq_doorbell_control &=
~(DOORBELL_SOURCE | DOORBELL_HIT); ~(DOORBELL_SOURCE | DOORBELL_HIT);
...@@ -7839,14 +7840,14 @@ int cik_init(struct radeon_device *rdev) ...@@ -7839,14 +7840,14 @@ int cik_init(struct radeon_device *rdev)
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]; ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
ring->ring_obj = NULL; ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024); r600_ring_init(rdev, ring, 1024 * 1024);
r = radeon_doorbell_get(rdev, &ring->doorbell_page_num); r = radeon_doorbell_get(rdev, &ring->doorbell_index);
if (r) if (r)
return r; return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]; ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
ring->ring_obj = NULL; ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024); r600_ring_init(rdev, ring, 1024 * 1024);
r = radeon_doorbell_get(rdev, &ring->doorbell_page_num); r = radeon_doorbell_get(rdev, &ring->doorbell_index);
if (r) if (r)
return r; return r;
......
...@@ -130,7 +130,7 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev, ...@@ -130,7 +130,7 @@ void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
* Add a DMA semaphore packet to the ring wait on or signal * Add a DMA semaphore packet to the ring wait on or signal
* other rings (CIK). * other rings (CIK).
*/ */
void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring, struct radeon_ring *ring,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
bool emit_wait) bool emit_wait)
...@@ -141,6 +141,8 @@ void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, ...@@ -141,6 +141,8 @@ void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits)); radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
radeon_ring_write(ring, addr & 0xfffffff8); radeon_ring_write(ring, addr & 0xfffffff8);
radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff); radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
return true;
} }
/** /**
...@@ -443,13 +445,8 @@ int cik_copy_dma(struct radeon_device *rdev, ...@@ -443,13 +445,8 @@ int cik_copy_dma(struct radeon_device *rdev,
return r; return r;
} }
if (radeon_fence_need_sync(*fence, ring->idx)) { radeon_semaphore_sync_to(sem, *fence);
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, radeon_semaphore_sync_rings(rdev, sem, ring->idx);
ring->idx);
radeon_fence_note_sync(*fence, ring->idx);
} else {
radeon_semaphore_free(rdev, &sem, NULL);
}
for (i = 0; i < num_loops; i++) { for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes; cur_size_in_bytes = size_in_bytes;
......
...@@ -299,7 +299,9 @@ void cypress_program_response_times(struct radeon_device *rdev) ...@@ -299,7 +299,9 @@ void cypress_program_response_times(struct radeon_device *rdev)
static int cypress_pcie_performance_request(struct radeon_device *rdev, static int cypress_pcie_performance_request(struct radeon_device *rdev,
u8 perf_req, bool advertise) u8 perf_req, bool advertise)
{ {
#if defined(CONFIG_ACPI)
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
#endif
u32 tmp; u32 tmp;
udelay(10); udelay(10);
......
...@@ -131,13 +131,8 @@ int evergreen_copy_dma(struct radeon_device *rdev, ...@@ -131,13 +131,8 @@ int evergreen_copy_dma(struct radeon_device *rdev,
return r; return r;
} }
if (radeon_fence_need_sync(*fence, ring->idx)) { radeon_semaphore_sync_to(sem, *fence);
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, radeon_semaphore_sync_rings(rdev, sem, ring->idx);
ring->idx);
radeon_fence_note_sync(*fence, ring->idx);
} else {
radeon_semaphore_free(rdev, &sem, NULL);
}
for (i = 0; i < num_loops; i++) { for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw; cur_size_in_dw = size_in_dw;
......
...@@ -3445,9 +3445,9 @@ static int ni_enable_smc_cac(struct radeon_device *rdev, ...@@ -3445,9 +3445,9 @@ static int ni_enable_smc_cac(struct radeon_device *rdev,
static int ni_pcie_performance_request(struct radeon_device *rdev, static int ni_pcie_performance_request(struct radeon_device *rdev,
u8 perf_req, bool advertise) u8 perf_req, bool advertise)
{ {
#if defined(CONFIG_ACPI)
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
#if defined(CONFIG_ACPI)
if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) || if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) ||
(perf_req == PCIE_PERF_REQ_PECI_GEN2)) { (perf_req == PCIE_PERF_REQ_PECI_GEN2)) {
if (eg_pi->pcie_performance_request_registered == false) if (eg_pi->pcie_performance_request_registered == false)
......
...@@ -869,13 +869,14 @@ void r100_fence_ring_emit(struct radeon_device *rdev, ...@@ -869,13 +869,14 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, RADEON_SW_INT_FIRE); radeon_ring_write(ring, RADEON_SW_INT_FIRE);
} }
void r100_semaphore_ring_emit(struct radeon_device *rdev, bool r100_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring, struct radeon_ring *ring,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
bool emit_wait) bool emit_wait)
{ {
/* Unused on older asics, since we don't have semaphores or multiple rings */ /* Unused on older asics, since we don't have semaphores or multiple rings */
BUG(); BUG();
return false;
} }
int r100_copy_blit(struct radeon_device *rdev, int r100_copy_blit(struct radeon_device *rdev,
......
...@@ -2650,7 +2650,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev, ...@@ -2650,7 +2650,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
} }
} }
void r600_semaphore_ring_emit(struct radeon_device *rdev, bool r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring, struct radeon_ring *ring,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
bool emit_wait) bool emit_wait)
...@@ -2664,6 +2664,8 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev, ...@@ -2664,6 +2664,8 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1)); radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
radeon_ring_write(ring, addr & 0xffffffff); radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
return true;
} }
/** /**
...@@ -2706,13 +2708,8 @@ int r600_copy_cpdma(struct radeon_device *rdev, ...@@ -2706,13 +2708,8 @@ int r600_copy_cpdma(struct radeon_device *rdev,
return r; return r;
} }
if (radeon_fence_need_sync(*fence, ring->idx)) { radeon_semaphore_sync_to(sem, *fence);
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, radeon_semaphore_sync_rings(rdev, sem, ring->idx);
ring->idx);
radeon_fence_note_sync(*fence, ring->idx);
} else {
radeon_semaphore_free(rdev, &sem, NULL);
}
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
......
...@@ -311,7 +311,7 @@ void r600_dma_fence_ring_emit(struct radeon_device *rdev, ...@@ -311,7 +311,7 @@ void r600_dma_fence_ring_emit(struct radeon_device *rdev,
* Add a DMA semaphore packet to the ring wait on or signal * Add a DMA semaphore packet to the ring wait on or signal
* other rings (r6xx-SI). * other rings (r6xx-SI).
*/ */
void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring, struct radeon_ring *ring,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
bool emit_wait) bool emit_wait)
...@@ -322,6 +322,8 @@ void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, ...@@ -322,6 +322,8 @@ void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0)); radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
radeon_ring_write(ring, addr & 0xfffffffc); radeon_ring_write(ring, addr & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(addr) & 0xff); radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
return true;
} }
/** /**
...@@ -462,13 +464,8 @@ int r600_copy_dma(struct radeon_device *rdev, ...@@ -462,13 +464,8 @@ int r600_copy_dma(struct radeon_device *rdev,
return r; return r;
} }
if (radeon_fence_need_sync(*fence, ring->idx)) { radeon_semaphore_sync_to(sem, *fence);
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, radeon_semaphore_sync_rings(rdev, sem, ring->idx);
ring->idx);
radeon_fence_note_sync(*fence, ring->idx);
} else {
radeon_semaphore_free(rdev, &sem, NULL);
}
for (i = 0; i < num_loops; i++) { for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw; cur_size_in_dw = size_in_dw;
......
...@@ -348,6 +348,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, i ...@@ -348,6 +348,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, i
void radeon_fence_process(struct radeon_device *rdev, int ring); void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence); bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
int radeon_fence_wait_locked(struct radeon_fence *fence);
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring); int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring); int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_any(struct radeon_device *rdev, int radeon_fence_wait_any(struct radeon_device *rdev,
...@@ -548,17 +549,20 @@ struct radeon_semaphore { ...@@ -548,17 +549,20 @@ struct radeon_semaphore {
struct radeon_sa_bo *sa_bo; struct radeon_sa_bo *sa_bo;
signed waiters; signed waiters;
uint64_t gpu_addr; uint64_t gpu_addr;
struct radeon_fence *sync_to[RADEON_NUM_RINGS];
}; };
int radeon_semaphore_create(struct radeon_device *rdev, int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore); struct radeon_semaphore **semaphore);
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore); struct radeon_semaphore *semaphore);
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore); struct radeon_semaphore *semaphore);
void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
struct radeon_fence *fence);
int radeon_semaphore_sync_rings(struct radeon_device *rdev, int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
int signaler, int waiter); int waiting_ring);
void radeon_semaphore_free(struct radeon_device *rdev, void radeon_semaphore_free(struct radeon_device *rdev,
struct radeon_semaphore **semaphore, struct radeon_semaphore **semaphore,
struct radeon_fence *fence); struct radeon_fence *fence);
...@@ -645,13 +649,15 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg); ...@@ -645,13 +649,15 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
/* /*
* GPU doorbell structures, functions & helpers * GPU doorbell structures, functions & helpers
*/ */
#define RADEON_MAX_DOORBELLS 1024 /* Reserve at most 1024 doorbell slots for radeon-owned rings. */
struct radeon_doorbell { struct radeon_doorbell {
u32 num_pages;
bool free[1024];
/* doorbell mmio */ /* doorbell mmio */
resource_size_t base; resource_size_t base;
resource_size_t size; resource_size_t size;
void __iomem *ptr; u32 __iomem *ptr;
u32 num_doorbells; /* Number of doorbells actually reserved for radeon. */
unsigned long used[DIV_ROUND_UP(RADEON_MAX_DOORBELLS, BITS_PER_LONG)];
}; };
int radeon_doorbell_get(struct radeon_device *rdev, u32 *page); int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
...@@ -765,7 +771,6 @@ struct radeon_ib { ...@@ -765,7 +771,6 @@ struct radeon_ib {
struct radeon_fence *fence; struct radeon_fence *fence;
struct radeon_vm *vm; struct radeon_vm *vm;
bool is_const_ib; bool is_const_ib;
struct radeon_fence *sync_to[RADEON_NUM_RINGS];
struct radeon_semaphore *semaphore; struct radeon_semaphore *semaphore;
}; };
...@@ -799,8 +804,7 @@ struct radeon_ring { ...@@ -799,8 +804,7 @@ struct radeon_ring {
u32 pipe; u32 pipe;
u32 queue; u32 queue;
struct radeon_bo *mqd_obj; struct radeon_bo *mqd_obj;
u32 doorbell_page_num; u32 doorbell_index;
u32 doorbell_offset;
unsigned wptr_offs; unsigned wptr_offs;
}; };
...@@ -921,7 +925,6 @@ int radeon_ib_get(struct radeon_device *rdev, int ring, ...@@ -921,7 +925,6 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, struct radeon_vm *vm, struct radeon_ib *ib, struct radeon_vm *vm,
unsigned size); unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib); void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib); struct radeon_ib *const_ib);
int radeon_ib_pool_init(struct radeon_device *rdev); int radeon_ib_pool_init(struct radeon_device *rdev);
...@@ -1638,7 +1641,7 @@ struct radeon_asic_ring { ...@@ -1638,7 +1641,7 @@ struct radeon_asic_ring {
/* command emmit functions */ /* command emmit functions */
void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence); void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp, bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
struct radeon_semaphore *semaphore, bool emit_wait); struct radeon_semaphore *semaphore, bool emit_wait);
void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
...@@ -1979,6 +1982,7 @@ struct cik_asic { ...@@ -1979,6 +1982,7 @@ struct cik_asic {
unsigned tile_config; unsigned tile_config;
uint32_t tile_mode_array[32]; uint32_t tile_mode_array[32];
uint32_t macrotile_mode_array[16];
}; };
union radeon_asic_config { union radeon_asic_config {
...@@ -2239,8 +2243,8 @@ void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v, ...@@ -2239,8 +2243,8 @@ void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg); u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 offset); u32 cik_mm_rdoorbell(struct radeon_device *rdev, u32 index);
void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v); void cik_mm_wdoorbell(struct radeon_device *rdev, u32 index, u32 v);
/* /*
* Cast helper * Cast helper
...@@ -2303,8 +2307,8 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v); ...@@ -2303,8 +2307,8 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
#define RREG32_IO(reg) r100_io_rreg(rdev, (reg)) #define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v)) #define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
#define RDOORBELL32(offset) cik_mm_rdoorbell(rdev, (offset)) #define RDOORBELL32(index) cik_mm_rdoorbell(rdev, (index))
#define WDOORBELL32(offset, v) cik_mm_wdoorbell(rdev, (offset), (v)) #define WDOORBELL32(index, v) cik_mm_wdoorbell(rdev, (index), (v))
/* /*
* Indirect registers accessor * Indirect registers accessor
......
...@@ -2015,6 +2015,8 @@ static struct radeon_asic ci_asic = { ...@@ -2015,6 +2015,8 @@ static struct radeon_asic ci_asic = {
.bandwidth_update = &dce8_bandwidth_update, .bandwidth_update = &dce8_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter, .get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank, .wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &evergreen_hdmi_enable, .hdmi_enable = &evergreen_hdmi_enable,
.hdmi_setmode = &evergreen_hdmi_setmode, .hdmi_setmode = &evergreen_hdmi_setmode,
}, },
...@@ -2114,6 +2116,8 @@ static struct radeon_asic kv_asic = { ...@@ -2114,6 +2116,8 @@ static struct radeon_asic kv_asic = {
.bandwidth_update = &dce8_bandwidth_update, .bandwidth_update = &dce8_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter, .get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank, .wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
.hdmi_enable = &evergreen_hdmi_enable, .hdmi_enable = &evergreen_hdmi_enable,
.hdmi_setmode = &evergreen_hdmi_setmode, .hdmi_setmode = &evergreen_hdmi_setmode,
}, },
......
...@@ -80,7 +80,7 @@ int r100_irq_set(struct radeon_device *rdev); ...@@ -80,7 +80,7 @@ int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev); int r100_irq_process(struct radeon_device *rdev);
void r100_fence_ring_emit(struct radeon_device *rdev, void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence); struct radeon_fence *fence);
void r100_semaphore_ring_emit(struct radeon_device *rdev, bool r100_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp, struct radeon_ring *cp,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
bool emit_wait); bool emit_wait);
...@@ -313,13 +313,13 @@ int r600_cs_parse(struct radeon_cs_parser *p); ...@@ -313,13 +313,13 @@ int r600_cs_parse(struct radeon_cs_parser *p);
int r600_dma_cs_parse(struct radeon_cs_parser *p); int r600_dma_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev, void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence); struct radeon_fence *fence);
void r600_semaphore_ring_emit(struct radeon_device *rdev, bool r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp, struct radeon_ring *cp,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
bool emit_wait); bool emit_wait);
void r600_dma_fence_ring_emit(struct radeon_device *rdev, void r600_dma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence); struct radeon_fence *fence);
void r600_dma_semaphore_ring_emit(struct radeon_device *rdev, bool r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring, struct radeon_ring *ring,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
bool emit_wait); bool emit_wait);
...@@ -566,10 +566,6 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev, ...@@ -566,10 +566,6 @@ int sumo_dpm_force_performance_level(struct radeon_device *rdev,
*/ */
void cayman_fence_ring_emit(struct radeon_device *rdev, void cayman_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence); struct radeon_fence *fence);
void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait);
void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev); void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
int cayman_init(struct radeon_device *rdev); int cayman_init(struct radeon_device *rdev);
void cayman_fini(struct radeon_device *rdev); void cayman_fini(struct radeon_device *rdev);
...@@ -697,7 +693,7 @@ void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); ...@@ -697,7 +693,7 @@ void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
void cik_sdma_fence_ring_emit(struct radeon_device *rdev, void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence); struct radeon_fence *fence);
void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, bool cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring, struct radeon_ring *ring,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
bool emit_wait); bool emit_wait);
...@@ -717,7 +713,7 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev, ...@@ -717,7 +713,7 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence); struct radeon_fence *fence);
void cik_fence_compute_ring_emit(struct radeon_device *rdev, void cik_fence_compute_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence); struct radeon_fence *fence);
void cik_semaphore_ring_emit(struct radeon_device *rdev, bool cik_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp, struct radeon_ring *cp,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
bool emit_wait); bool emit_wait);
...@@ -807,7 +803,7 @@ void uvd_v1_0_stop(struct radeon_device *rdev); ...@@ -807,7 +803,7 @@ void uvd_v1_0_stop(struct radeon_device *rdev);
int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring); int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void uvd_v1_0_semaphore_emit(struct radeon_device *rdev, bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring, struct radeon_ring *ring,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
bool emit_wait); bool emit_wait);
...@@ -819,7 +815,7 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev, ...@@ -819,7 +815,7 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
struct radeon_fence *fence); struct radeon_fence *fence);
/* uvd v3.1 */ /* uvd v3.1 */
void uvd_v3_1_semaphore_emit(struct radeon_device *rdev, bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring, struct radeon_ring *ring,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
bool emit_wait); bool emit_wait);
......
...@@ -159,7 +159,8 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p) ...@@ -159,7 +159,8 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
if (!p->relocs[i].robj) if (!p->relocs[i].robj)
continue; continue;
radeon_ib_sync_to(&p->ib, p->relocs[i].robj->tbo.sync_obj); radeon_semaphore_sync_to(p->ib.semaphore,
p->relocs[i].robj->tbo.sync_obj);
} }
} }
...@@ -411,9 +412,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, ...@@ -411,9 +412,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
goto out; goto out;
} }
radeon_cs_sync_rings(parser); radeon_cs_sync_rings(parser);
radeon_ib_sync_to(&parser->ib, vm->fence); radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence);
radeon_ib_sync_to(&parser->ib, radeon_vm_grab_id( radeon_semaphore_sync_to(parser->ib.semaphore,
rdev, vm, parser->ring)); radeon_vm_grab_id(rdev, vm, parser->ring));
if ((rdev->family >= CHIP_TAHITI) && if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) { (parser->chunk_const_ib_idx != -1)) {
......
...@@ -251,28 +251,23 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) ...@@ -251,28 +251,23 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
*/ */
int radeon_doorbell_init(struct radeon_device *rdev) int radeon_doorbell_init(struct radeon_device *rdev)
{ {
int i;
/* doorbell bar mapping */ /* doorbell bar mapping */
rdev->doorbell.base = pci_resource_start(rdev->pdev, 2); rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
rdev->doorbell.size = pci_resource_len(rdev->pdev, 2); rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
/* limit to 4 MB for now */ rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
if (rdev->doorbell.size > (4 * 1024 * 1024)) if (rdev->doorbell.num_doorbells == 0)
rdev->doorbell.size = 4 * 1024 * 1024; return -EINVAL;
rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.size); rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
if (rdev->doorbell.ptr == NULL) { if (rdev->doorbell.ptr == NULL) {
return -ENOMEM; return -ENOMEM;
} }
DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base); DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size); DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
rdev->doorbell.num_pages = rdev->doorbell.size / PAGE_SIZE; memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
for (i = 0; i < rdev->doorbell.num_pages; i++) {
rdev->doorbell.free[i] = true;
}
return 0; return 0;
} }
...@@ -290,40 +285,38 @@ void radeon_doorbell_fini(struct radeon_device *rdev) ...@@ -290,40 +285,38 @@ void radeon_doorbell_fini(struct radeon_device *rdev)
} }
/** /**
* radeon_doorbell_get - Allocate a doorbell page * radeon_doorbell_get - Allocate a doorbell entry
* *
* @rdev: radeon_device pointer * @rdev: radeon_device pointer
* @doorbell: doorbell page number * @doorbell: doorbell index
* *
* Allocate a doorbell page for use by the driver (all asics). * Allocate a doorbell for use by the driver (all asics).
* Returns 0 on success or -EINVAL on failure. * Returns 0 on success or -EINVAL on failure.
*/ */
int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell) int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
{ {
int i; unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
if (offset < rdev->doorbell.num_doorbells) {
for (i = 0; i < rdev->doorbell.num_pages; i++) { __set_bit(offset, rdev->doorbell.used);
if (rdev->doorbell.free[i]) { *doorbell = offset;
rdev->doorbell.free[i] = false; return 0;
*doorbell = i; } else {
return 0; return -EINVAL;
}
} }
return -EINVAL;
} }
/** /**
* radeon_doorbell_free - Free a doorbell page * radeon_doorbell_free - Free a doorbell entry
* *
* @rdev: radeon_device pointer * @rdev: radeon_device pointer
* @doorbell: doorbell page number * @doorbell: doorbell index
* *
* Free a doorbell page allocated for use by the driver (all asics) * Free a doorbell allocated for use by the driver (all asics)
*/ */
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell) void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
{ {
if (doorbell < rdev->doorbell.num_pages) if (doorbell < rdev->doorbell.num_doorbells)
rdev->doorbell.free[doorbell] = true; __clear_bit(doorbell, rdev->doorbell.used);
} }
/* /*
......
...@@ -76,9 +76,10 @@ ...@@ -76,9 +76,10 @@
* 2.32.0 - new info request for rings working * 2.32.0 - new info request for rings working
* 2.33.0 - Add SI tiling mode array query * 2.33.0 - Add SI tiling mode array query
* 2.34.0 - Add CIK tiling mode array query * 2.34.0 - Add CIK tiling mode array query
* 2.35.0 - Add CIK macrotile mode array query
*/ */
#define KMS_DRIVER_MAJOR 2 #define KMS_DRIVER_MAJOR 2
#define KMS_DRIVER_MINOR 34 #define KMS_DRIVER_MINOR 35
#define KMS_DRIVER_PATCHLEVEL 0 #define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev); int radeon_driver_unload_kms(struct drm_device *dev);
......
...@@ -471,6 +471,36 @@ int radeon_fence_wait_any(struct radeon_device *rdev, ...@@ -471,6 +471,36 @@ int radeon_fence_wait_any(struct radeon_device *rdev,
return 0; return 0;
} }
/**
* radeon_fence_wait_locked - wait for a fence to signal
*
* @fence: radeon fence object
*
* Wait for the requested fence to signal (all asics).
* Returns 0 if the fence has passed, error for all other cases.
*/
int radeon_fence_wait_locked(struct radeon_fence *fence)
{
uint64_t seq[RADEON_NUM_RINGS] = {};
int r;
if (fence == NULL) {
WARN(1, "Querying an invalid fence : %p !\n", fence);
return -EINVAL;
}
seq[fence->ring] = fence->seq;
if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
return 0;
r = radeon_fence_wait_seq(fence->rdev, seq, false, false);
if (r)
return r;
fence->seq = RADEON_FENCE_SIGNALED_SEQ;
return 0;
}
/** /**
* radeon_fence_wait_next_locked - wait for the next fence to signal * radeon_fence_wait_next_locked - wait for the next fence to signal
* *
......
...@@ -651,7 +651,7 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) ...@@ -651,7 +651,7 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr, radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr,
0, pd_entries, 0, 0); 0, pd_entries, 0, 0);
radeon_ib_sync_to(&ib, vm->fence); radeon_semaphore_sync_to(ib.semaphore, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) { if (r) {
radeon_ib_free(rdev, &ib); radeon_ib_free(rdev, &ib);
...@@ -1209,6 +1209,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, ...@@ -1209,6 +1209,8 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
return -ENOMEM; return -ENOMEM;
r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4); r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
if (r)
return r;
ib.length_dw = 0; ib.length_dw = 0;
r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset); r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
...@@ -1220,7 +1222,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, ...@@ -1220,7 +1222,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset, radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
addr, radeon_vm_page_flags(bo_va->flags)); addr, radeon_vm_page_flags(bo_va->flags));
radeon_ib_sync_to(&ib, vm->fence); radeon_semaphore_sync_to(ib.semaphore, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) { if (r) {
radeon_ib_free(rdev, &ib); radeon_ib_free(rdev, &ib);
......
...@@ -340,7 +340,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -340,7 +340,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
break; break;
case RADEON_INFO_BACKEND_MAP: case RADEON_INFO_BACKEND_MAP:
if (rdev->family >= CHIP_BONAIRE) if (rdev->family >= CHIP_BONAIRE)
return -EINVAL; *value = rdev->config.cik.backend_map;
else if (rdev->family >= CHIP_TAHITI) else if (rdev->family >= CHIP_TAHITI)
*value = rdev->config.si.backend_map; *value = rdev->config.si.backend_map;
else if (rdev->family >= CHIP_CAYMAN) else if (rdev->family >= CHIP_CAYMAN)
...@@ -449,6 +449,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -449,6 +449,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return -EINVAL; return -EINVAL;
} }
break; break;
case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY:
if (rdev->family >= CHIP_BONAIRE) {
value = rdev->config.cik.macrotile_mode_array;
value_size = sizeof(uint32_t)*16;
} else {
DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n");
return -EINVAL;
}
break;
case RADEON_INFO_SI_CP_DMA_COMPUTE: case RADEON_INFO_SI_CP_DMA_COMPUTE:
*value = 1; *value = 1;
break; break;
......
...@@ -422,6 +422,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc, ...@@ -422,6 +422,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
/* Pin framebuffer & get tilling informations */ /* Pin framebuffer & get tilling informations */
obj = radeon_fb->obj; obj = radeon_fb->obj;
rbo = gem_to_radeon_bo(obj); rbo = gem_to_radeon_bo(obj);
retry:
r = radeon_bo_reserve(rbo, false); r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
...@@ -430,6 +431,33 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc, ...@@ -430,6 +431,33 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
&base); &base);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
radeon_bo_unreserve(rbo); radeon_bo_unreserve(rbo);
/* On old GPU like RN50 with little vram pining can fails because
* current fb is taking all space needed. So instead of unpining
* the old buffer after pining the new one, first unpin old one
* and then retry pining new one.
*
* As only master can set mode only master can pin and it is
* unlikely the master client will race with itself especialy
* on those old gpu with single crtc.
*
* We don't shutdown the display controller because new buffer
* will end up in same spot.
*/
if (!atomic && fb && fb != crtc->fb) {
struct radeon_bo *old_rbo;
unsigned long nsize, osize;
old_rbo = gem_to_radeon_bo(to_radeon_framebuffer(fb)->obj);
osize = radeon_bo_size(old_rbo);
nsize = radeon_bo_size(rbo);
if (nsize <= osize && !radeon_bo_reserve(old_rbo, false)) {
radeon_bo_unpin(old_rbo);
radeon_bo_unreserve(old_rbo);
fb = NULL;
goto retry;
}
}
return -EINVAL; return -EINVAL;
} }
radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
......
...@@ -1252,7 +1252,6 @@ int radeon_pm_init(struct radeon_device *rdev) ...@@ -1252,7 +1252,6 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_RS780: case CHIP_RS780:
case CHIP_RS880: case CHIP_RS880:
case CHIP_CAYMAN: case CHIP_CAYMAN:
case CHIP_ARUBA:
case CHIP_BONAIRE: case CHIP_BONAIRE:
case CHIP_KABINI: case CHIP_KABINI:
case CHIP_KAVERI: case CHIP_KAVERI:
...@@ -1284,6 +1283,7 @@ int radeon_pm_init(struct radeon_device *rdev) ...@@ -1284,6 +1283,7 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_BARTS: case CHIP_BARTS:
case CHIP_TURKS: case CHIP_TURKS:
case CHIP_CAICOS: case CHIP_CAICOS:
case CHIP_ARUBA:
case CHIP_TAHITI: case CHIP_TAHITI:
case CHIP_PITCAIRN: case CHIP_PITCAIRN:
case CHIP_VERDE: case CHIP_VERDE:
......
...@@ -61,7 +61,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring, ...@@ -61,7 +61,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, struct radeon_vm *vm, struct radeon_ib *ib, struct radeon_vm *vm,
unsigned size) unsigned size)
{ {
int i, r; int r;
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true); r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
if (r) { if (r) {
...@@ -87,8 +87,6 @@ int radeon_ib_get(struct radeon_device *rdev, int ring, ...@@ -87,8 +87,6 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo); ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
} }
ib->is_const_ib = false; ib->is_const_ib = false;
for (i = 0; i < RADEON_NUM_RINGS; ++i)
ib->sync_to[i] = NULL;
return 0; return 0;
} }
...@@ -108,25 +106,6 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) ...@@ -108,25 +106,6 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_fence_unref(&ib->fence); radeon_fence_unref(&ib->fence);
} }
/**
* radeon_ib_sync_to - sync to fence before executing the IB
*
* @ib: IB object to add fence to
* @fence: fence to sync to
*
* Sync to the fence before executing the IB
*/
void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence)
{
struct radeon_fence *other;
if (!fence)
return;
other = ib->sync_to[fence->ring];
ib->sync_to[fence->ring] = radeon_fence_later(fence, other);
}
/** /**
* radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
* *
...@@ -151,8 +130,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, ...@@ -151,8 +130,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib) struct radeon_ib *const_ib)
{ {
struct radeon_ring *ring = &rdev->ring[ib->ring]; struct radeon_ring *ring = &rdev->ring[ib->ring];
bool need_sync = false; int r = 0;
int i, r = 0;
if (!ib->length_dw || !ring->ready) { if (!ib->length_dw || !ring->ready) {
/* TODO: Nothings in the ib we should report. */ /* TODO: Nothings in the ib we should report. */
...@@ -166,19 +144,15 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, ...@@ -166,19 +144,15 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
dev_err(rdev->dev, "scheduling IB failed (%d).\n", r); dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
return r; return r;
} }
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
struct radeon_fence *fence = ib->sync_to[i]; /* sync with other rings */
if (radeon_fence_need_sync(fence, ib->ring)) { r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
need_sync = true; if (r) {
radeon_semaphore_sync_rings(rdev, ib->semaphore, dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
fence->ring, ib->ring); radeon_ring_unlock_undo(rdev, ring);
radeon_fence_note_sync(fence, ib->ring); return r;
}
}
/* immediately free semaphore when we don't need to sync */
if (!need_sync) {
radeon_semaphore_free(rdev, &ib->semaphore, NULL);
} }
/* if we can't remember our last VM flush then flush now! */ /* if we can't remember our last VM flush then flush now! */
/* XXX figure out why we have to flush for every IB */ /* XXX figure out why we have to flush for every IB */
if (ib->vm /*&& !ib->vm->last_flush*/) { if (ib->vm /*&& !ib->vm->last_flush*/) {
......
...@@ -29,12 +29,12 @@ ...@@ -29,12 +29,12 @@
*/ */
#include <drm/drmP.h> #include <drm/drmP.h>
#include "radeon.h" #include "radeon.h"
#include "radeon_trace.h"
int radeon_semaphore_create(struct radeon_device *rdev, int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore) struct radeon_semaphore **semaphore)
{ {
int r; int i, r;
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
if (*semaphore == NULL) { if (*semaphore == NULL) {
...@@ -50,54 +50,121 @@ int radeon_semaphore_create(struct radeon_device *rdev, ...@@ -50,54 +50,121 @@ int radeon_semaphore_create(struct radeon_device *rdev,
(*semaphore)->waiters = 0; (*semaphore)->waiters = 0;
(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo); (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
*((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0; *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
for (i = 0; i < RADEON_NUM_RINGS; ++i)
(*semaphore)->sync_to[i] = NULL;
return 0; return 0;
} }
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ridx,
struct radeon_semaphore *semaphore) struct radeon_semaphore *semaphore)
{ {
--semaphore->waiters; struct radeon_ring *ring = &rdev->ring[ridx];
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
trace_radeon_semaphore_signale(ridx, semaphore);
if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, false)) {
--semaphore->waiters;
/* for debugging lockup only, used by sysfs debug files */
ring->last_semaphore_signal_addr = semaphore->gpu_addr;
return true;
}
return false;
} }
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx,
struct radeon_semaphore *semaphore) struct radeon_semaphore *semaphore)
{ {
++semaphore->waiters; struct radeon_ring *ring = &rdev->ring[ridx];
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
trace_radeon_semaphore_wait(ridx, semaphore);
if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, true)) {
++semaphore->waiters;
/* for debugging lockup only, used by sysfs debug files */
ring->last_semaphore_wait_addr = semaphore->gpu_addr;
return true;
}
return false;
}
/**
* radeon_semaphore_sync_to - use the semaphore to sync to a fence
*
* @semaphore: semaphore object to add fence to
* @fence: fence to sync to
*
* Sync to the fence using this semaphore object
*/
void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
struct radeon_fence *fence)
{
struct radeon_fence *other;
if (!fence)
return;
other = semaphore->sync_to[fence->ring];
semaphore->sync_to[fence->ring] = radeon_fence_later(fence, other);
} }
/* caller must hold ring lock */ /**
* radeon_semaphore_sync_rings - sync ring to all registered fences
*
* @rdev: radeon_device pointer
* @semaphore: semaphore object to use for sync
* @ring: ring that needs sync
*
* Ensure that all registered fences are signaled before letting
* the ring continue. The caller must hold the ring lock.
*/
int radeon_semaphore_sync_rings(struct radeon_device *rdev, int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
int signaler, int waiter) int ring)
{ {
int r; int i, r;
/* no need to signal and wait on the same ring */ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (signaler == waiter) { struct radeon_fence *fence = semaphore->sync_to[i];
return 0;
}
/* prevent GPU deadlocks */ /* check if we really need to sync */
if (!rdev->ring[signaler].ready) { if (!radeon_fence_need_sync(fence, ring))
dev_err(rdev->dev, "Trying to sync to a disabled ring!"); continue;
return -EINVAL;
}
r = radeon_ring_alloc(rdev, &rdev->ring[signaler], 8); /* prevent GPU deadlocks */
if (r) { if (!rdev->ring[i].ready) {
return r; dev_err(rdev->dev, "Syncing to a disabled ring!");
} return -EINVAL;
radeon_semaphore_emit_signal(rdev, signaler, semaphore); }
radeon_ring_commit(rdev, &rdev->ring[signaler]);
/* we assume caller has already allocated space on waiters ring */ /* allocate enough space for sync command */
radeon_semaphore_emit_wait(rdev, waiter, semaphore); r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
if (r) {
return r;
}
/* for debugging lockup only, used by sysfs debug files */ /* emit the signal semaphore */
rdev->ring[signaler].last_semaphore_signal_addr = semaphore->gpu_addr; if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
rdev->ring[waiter].last_semaphore_wait_addr = semaphore->gpu_addr; /* signaling wasn't successful wait manually */
radeon_ring_undo(&rdev->ring[i]);
radeon_fence_wait_locked(fence);
continue;
}
/* we assume caller has already allocated space on waiters ring */
if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
/* waiting wasn't successful wait manually */
radeon_ring_undo(&rdev->ring[i]);
radeon_fence_wait_locked(fence);
continue;
}
radeon_ring_commit(rdev, &rdev->ring[i]);
radeon_fence_note_sync(fence, ring);
}
return 0; return 0;
} }
......
...@@ -111,6 +111,42 @@ DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end, ...@@ -111,6 +111,42 @@ DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
TP_ARGS(dev, seqno) TP_ARGS(dev, seqno)
); );
DECLARE_EVENT_CLASS(radeon_semaphore_request,
TP_PROTO(int ring, struct radeon_semaphore *sem),
TP_ARGS(ring, sem),
TP_STRUCT__entry(
__field(int, ring)
__field(signed, waiters)
__field(uint64_t, gpu_addr)
),
TP_fast_assign(
__entry->ring = ring;
__entry->waiters = sem->waiters;
__entry->gpu_addr = sem->gpu_addr;
),
TP_printk("ring=%u, waiters=%d, addr=%010Lx", __entry->ring,
__entry->waiters, __entry->gpu_addr)
);
DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_signale,
TP_PROTO(int ring, struct radeon_semaphore *sem),
TP_ARGS(ring, sem)
);
DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_wait,
TP_PROTO(int ring, struct radeon_semaphore *sem),
TP_ARGS(ring, sem)
);
#endif #endif
/* This part must be outside protection */ /* This part must be outside protection */
......
...@@ -66,13 +66,8 @@ int rv770_copy_dma(struct radeon_device *rdev, ...@@ -66,13 +66,8 @@ int rv770_copy_dma(struct radeon_device *rdev,
return r; return r;
} }
if (radeon_fence_need_sync(*fence, ring->idx)) { radeon_semaphore_sync_to(sem, *fence);
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, radeon_semaphore_sync_rings(rdev, sem, ring->idx);
ring->idx);
radeon_fence_note_sync(*fence, ring->idx);
} else {
radeon_semaphore_free(rdev, &sem, NULL);
}
for (i = 0; i < num_loops; i++) { for (i = 0; i < num_loops; i++) {
cur_size_in_dw = size_in_dw; cur_size_in_dw = size_in_dw;
......
...@@ -195,13 +195,8 @@ int si_copy_dma(struct radeon_device *rdev, ...@@ -195,13 +195,8 @@ int si_copy_dma(struct radeon_device *rdev,
return r; return r;
} }
if (radeon_fence_need_sync(*fence, ring->idx)) { radeon_semaphore_sync_to(sem, *fence);
radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, radeon_semaphore_sync_rings(rdev, sem, ring->idx);
ring->idx);
radeon_fence_note_sync(*fence, ring->idx);
} else {
radeon_semaphore_free(rdev, &sem, NULL);
}
for (i = 0; i < num_loops; i++) { for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes; cur_size_in_bytes = size_in_bytes;
......
...@@ -1873,9 +1873,9 @@ int trinity_dpm_init(struct radeon_device *rdev) ...@@ -1873,9 +1873,9 @@ int trinity_dpm_init(struct radeon_device *rdev)
pi->enable_sclk_ds = true; pi->enable_sclk_ds = true;
pi->enable_gfx_power_gating = true; pi->enable_gfx_power_gating = true;
pi->enable_gfx_clock_gating = true; pi->enable_gfx_clock_gating = true;
pi->enable_mg_clock_gating = true; pi->enable_mg_clock_gating = false;
pi->enable_gfx_dynamic_mgpg = true; /* ??? */ pi->enable_gfx_dynamic_mgpg = false;
pi->override_dynamic_mgpg = true; pi->override_dynamic_mgpg = false;
pi->enable_auto_thermal_throttling = true; pi->enable_auto_thermal_throttling = true;
pi->voltage_drop_in_dce = false; /* need to restructure dpm/modeset interaction */ pi->voltage_drop_in_dce = false; /* need to restructure dpm/modeset interaction */
pi->uvd_dpm = true; /* ??? */ pi->uvd_dpm = true; /* ??? */
......
...@@ -357,7 +357,7 @@ int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) ...@@ -357,7 +357,7 @@ int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
* *
* Emit a semaphore command (either wait or signal) to the UVD ring. * Emit a semaphore command (either wait or signal) to the UVD ring.
*/ */
void uvd_v1_0_semaphore_emit(struct radeon_device *rdev, bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring, struct radeon_ring *ring,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
bool emit_wait) bool emit_wait)
...@@ -372,6 +372,8 @@ void uvd_v1_0_semaphore_emit(struct radeon_device *rdev, ...@@ -372,6 +372,8 @@ void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
radeon_ring_write(ring, emit_wait ? 1 : 0); radeon_ring_write(ring, emit_wait ? 1 : 0);
return true;
} }
/** /**
......
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
* *
* Emit a semaphore command (either wait or signal) to the UVD ring. * Emit a semaphore command (either wait or signal) to the UVD ring.
*/ */
void uvd_v3_1_semaphore_emit(struct radeon_device *rdev, bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
struct radeon_ring *ring, struct radeon_ring *ring,
struct radeon_semaphore *semaphore, struct radeon_semaphore *semaphore,
bool emit_wait) bool emit_wait)
...@@ -52,4 +52,6 @@ void uvd_v3_1_semaphore_emit(struct radeon_device *rdev, ...@@ -52,4 +52,6 @@ void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
return true;
} }
...@@ -981,6 +981,8 @@ struct drm_radeon_cs { ...@@ -981,6 +981,8 @@ struct drm_radeon_cs {
#define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16 #define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16
/* query if CP DMA is supported on the compute ring */ /* query if CP DMA is supported on the compute ring */
#define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17 #define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17
/* CIK macrotile mode array */
#define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18
struct drm_radeon_info { struct drm_radeon_info {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册