提交 2ece2e8b 编写于 作者: A Alex Deucher 提交者: Dave Airlie

drm/radeon/kms: add IB and fence dispatch functions for SI

Support both IBs (DE) and CONST IBs (CE).
Signed-off-by: NAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: NDave Airlie <airlied@redhat.com>
上级 48c0c902
......@@ -1862,6 +1862,84 @@ static void si_gpu_init(struct radeon_device *rdev)
udelay(50);
}
/*
* GPU scratch registers helpers function.
*/
static void si_scratch_init(struct radeon_device *rdev)
{
int i;
rdev->scratch.num_reg = 7;
rdev->scratch.reg_base = SCRATCH_REG0;
for (i = 0; i < rdev->scratch.num_reg; i++) {
rdev->scratch.free[i] = true;
rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
}
}
void si_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
/* flush read cache over gart */
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
PACKET3_TC_ACTION_ENA |
PACKET3_SH_KCACHE_ACTION_ENA |
PACKET3_SH_ICACHE_ACTION_ENA);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
/* EVENT_WRITE_EOP - flush caches, send int */
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
}
/*
* IB stuff
*/
void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
u32 header;
if (ib->is_const_ib)
header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
radeon_ring_write(ring, header);
radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24));
/* flush read cache over gart for this vmid */
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(ring, ib->vm_id);
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
PACKET3_TC_ACTION_ENA |
PACKET3_SH_KCACHE_ACTION_ENA |
PACKET3_SH_ICACHE_ACTION_ENA);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
}
/*
* CP.
*/
......
......@@ -294,6 +294,8 @@
#define CP_PFP_HALT (1 << 26)
#define CP_ME_HALT (1 << 28)
#define CP_COHER_CNTL2 0x85E8
#define CP_RB2_RPTR 0x86f8
#define CP_RB1_RPTR 0x86fc
#define CP_RB0_RPTR 0x8700
......@@ -511,6 +513,45 @@
#define CP_DEBUG 0xC1FC
#define VGT_EVENT_INITIATOR 0x28a90
# define SAMPLE_STREAMOUTSTATS1 (1 << 0)
# define SAMPLE_STREAMOUTSTATS2 (2 << 0)
# define SAMPLE_STREAMOUTSTATS3 (3 << 0)
# define CACHE_FLUSH_TS (4 << 0)
# define CACHE_FLUSH (6 << 0)
# define CS_PARTIAL_FLUSH (7 << 0)
# define VGT_STREAMOUT_RESET (10 << 0)
# define END_OF_PIPE_INCR_DE (11 << 0)
# define END_OF_PIPE_IB_END (12 << 0)
# define RST_PIX_CNT (13 << 0)
# define VS_PARTIAL_FLUSH (15 << 0)
# define PS_PARTIAL_FLUSH (16 << 0)
# define CACHE_FLUSH_AND_INV_TS_EVENT (20 << 0)
# define ZPASS_DONE (21 << 0)
# define CACHE_FLUSH_AND_INV_EVENT (22 << 0)
# define PERFCOUNTER_START (23 << 0)
# define PERFCOUNTER_STOP (24 << 0)
# define PIPELINESTAT_START (25 << 0)
# define PIPELINESTAT_STOP (26 << 0)
# define PERFCOUNTER_SAMPLE (27 << 0)
# define SAMPLE_PIPELINESTAT (30 << 0)
# define SAMPLE_STREAMOUTSTATS (32 << 0)
# define RESET_VTX_CNT (33 << 0)
# define VGT_FLUSH (36 << 0)
# define BOTTOM_OF_PIPE_TS (40 << 0)
# define DB_CACHE_FLUSH_AND_INV (42 << 0)
# define FLUSH_AND_INV_DB_DATA_TS (43 << 0)
# define FLUSH_AND_INV_DB_META (44 << 0)
# define FLUSH_AND_INV_CB_DATA_TS (45 << 0)
# define FLUSH_AND_INV_CB_META (46 << 0)
# define CS_DONE (47 << 0)
# define PS_DONE (48 << 0)
# define FLUSH_AND_INV_CB_PIXEL_DATA (49 << 0)
# define THREAD_TRACE_START (51 << 0)
# define THREAD_TRACE_STOP (52 << 0)
# define THREAD_TRACE_FLUSH (54 << 0)
# define THREAD_TRACE_FINISH (55 << 0)
/*
* PM4
*/
......@@ -606,7 +647,31 @@
#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
#define PACKET3_COND_WRITE 0x45
#define PACKET3_EVENT_WRITE 0x46
#define EVENT_TYPE(x) ((x) << 0)
#define EVENT_INDEX(x) ((x) << 8)
/* 0 - any non-TS event
* 1 - ZPASS_DONE
* 2 - SAMPLE_PIPELINESTAT
* 3 - SAMPLE_STREAMOUTSTAT*
* 4 - *S_PARTIAL_FLUSH
* 5 - EOP events
* 6 - EOS events
* 7 - CACHE_FLUSH, CACHE_FLUSH_AND_INV_EVENT
*/
#define INV_L2 (1 << 20)
/* INV TC L2 cache when EVENT_INDEX = 7 */
#define PACKET3_EVENT_WRITE_EOP 0x47
#define DATA_SEL(x) ((x) << 29)
/* 0 - discard
* 1 - send low 32bit data
* 2 - send 64bit data
* 3 - send 64bit counter value
*/
#define INT_SEL(x) ((x) << 24)
/* 0 - none
* 1 - interrupt only (DATA_SEL = 0)
* 2 - interrupt when data write is confirmed
*/
#define PACKET3_EVENT_WRITE_EOS 0x48
#define PACKET3_PREAMBLE_CNTL 0x4A
# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册