提交 03f62abd 编写于 作者: C Christian König 提交者: Alex Deucher

drm/radeon: split PT setup in more functions

Move the decision what to use into the common VM code.
Signed-off-by: NChristian König <christian.koenig@amd.com>
Signed-off-by: NAlex Deucher <alexander.deucher@amd.com>
上级 5a341be2
...@@ -749,37 +749,28 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) ...@@ -749,37 +749,28 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
} }
/** /**
* cik_sdma_vm_set_page - update the page tables using sDMA * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
* *
* @rdev: radeon_device pointer * @rdev: radeon_device pointer
* @ib: indirect buffer to fill with commands * @ib: indirect buffer to fill with commands
* @pe: addr of the page entry * @pe: addr of the page entry
* @addr: dst addr to write into pe * @src: src addr to copy from
* @count: number of page entries to update * @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
* *
* Update the page tables using sDMA (CIK). * Update PTEs by copying them from the GART using sDMA (CIK).
*/ */
void cik_sdma_vm_set_page(struct radeon_device *rdev, void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
struct radeon_ib *ib, struct radeon_ib *ib,
uint64_t pe, uint64_t pe, uint64_t src,
uint64_t addr, unsigned count, unsigned count)
uint32_t incr, uint32_t flags)
{ {
uint64_t value;
unsigned ndw;
trace_radeon_vm_set_page(pe, addr, count, incr, flags);
if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
while (count) { while (count) {
unsigned bytes = count * 8; unsigned bytes = count * 8;
if (bytes > 0x1FFFF8) if (bytes > 0x1FFFF8)
bytes = 0x1FFFF8; bytes = 0x1FFFF8;
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
ib->ptr[ib->length_dw++] = bytes; ib->ptr[ib->length_dw++] = bytes;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src); ib->ptr[ib->length_dw++] = lower_32_bits(src);
...@@ -791,27 +782,80 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev, ...@@ -791,27 +782,80 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
src += bytes; src += bytes;
count -= bytes / 8; count -= bytes / 8;
} }
} else if (flags & R600_PTE_SYSTEM) { }
/**
* cik_sdma_vm_write_pages - update PTEs by writing them manually
*
* @rdev: radeon_device pointer
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
* Update PTEs by writing them manually using sDMA (CIK).
*/
void cik_sdma_vm_write_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
uint64_t value;
unsigned ndw;
while (count) { while (count) {
ndw = count * 2; ndw = count * 2;
if (ndw > 0xFFFFE) if (ndw > 0xFFFFE)
ndw = 0xFFFFE; ndw = 0xFFFFE;
/* for non-physically contiguous pages (system) */ /* for non-physically contiguous pages (system) */
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
ib->ptr[ib->length_dw++] = pe; ib->ptr[ib->length_dw++] = pe;
ib->ptr[ib->length_dw++] = upper_32_bits(pe); ib->ptr[ib->length_dw++] = upper_32_bits(pe);
ib->ptr[ib->length_dw++] = ndw; ib->ptr[ib->length_dw++] = ndw;
for (; ndw > 0; ndw -= 2, --count, pe += 8) { for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr); value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL; value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) {
value = addr;
} else {
value = 0;
}
addr += incr; addr += incr;
value |= flags; value |= flags;
ib->ptr[ib->length_dw++] = value; ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value); ib->ptr[ib->length_dw++] = upper_32_bits(value);
} }
} }
} else { }
/**
* cik_sdma_vm_set_pages - update the page tables using sDMA
*
* @rdev: radeon_device pointer
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
* Update the page tables using sDMA (CIK).
*/
void cik_sdma_vm_set_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
uint64_t value;
unsigned ndw;
while (count) { while (count) {
ndw = count; ndw = count;
if (ndw > 0x7FFFF) if (ndw > 0x7FFFF)
...@@ -821,6 +865,7 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev, ...@@ -821,6 +865,7 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
value = addr; value = addr;
else else
value = 0; value = 0;
/* for physically contiguous pages (vram) */ /* for physically contiguous pages (vram) */
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
ib->ptr[ib->length_dw++] = pe; /* dst addr */ ib->ptr[ib->length_dw++] = pe; /* dst addr */
...@@ -832,11 +877,21 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev, ...@@ -832,11 +877,21 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev,
ib->ptr[ib->length_dw++] = incr; /* increment size */ ib->ptr[ib->length_dw++] = incr; /* increment size */
ib->ptr[ib->length_dw++] = 0; ib->ptr[ib->length_dw++] = 0;
ib->ptr[ib->length_dw++] = ndw; /* number of entries */ ib->ptr[ib->length_dw++] = ndw; /* number of entries */
pe += ndw * 8; pe += ndw * 8;
addr += ndw * incr; addr += ndw * incr;
count -= ndw; count -= ndw;
} }
} }
/**
* cik_sdma_vm_pad_ib - pad the IB to the required number of dw
*
* @ib: indirect buffer to fill with padding
*
*/
void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
{
while (ib->length_dw & 0x7) while (ib->length_dw & 0x7)
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0); ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
} }
......
...@@ -307,31 +307,23 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) ...@@ -307,31 +307,23 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
} }
/** /**
* cayman_dma_vm_set_page - update the page tables using the DMA * cayman_dma_vm_copy_pages - update PTEs by copying them from the GART
* *
* @rdev: radeon_device pointer * @rdev: radeon_device pointer
* @ib: indirect buffer to fill with commands * @ib: indirect buffer to fill with commands
* @pe: addr of the page entry * @pe: addr of the page entry
* @addr: dst addr to write into pe * @src: src addr where to copy from
* @count: number of page entries to update * @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: hw access flags
* *
* Update the page tables using the DMA (cayman/TN). * Update PTEs by copying them from the GART using the DMA (cayman/TN).
*/ */
void cayman_dma_vm_set_page(struct radeon_device *rdev, void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
struct radeon_ib *ib, struct radeon_ib *ib,
uint64_t pe, uint64_t pe, uint64_t src,
uint64_t addr, unsigned count, unsigned count)
uint32_t incr, uint32_t flags)
{ {
uint64_t value;
unsigned ndw; unsigned ndw;
trace_radeon_vm_set_page(pe, addr, count, incr, flags);
if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
while (count) { while (count) {
ndw = count * 2; ndw = count * 2;
if (ndw > 0xFFFFE) if (ndw > 0xFFFFE)
...@@ -348,15 +340,38 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev, ...@@ -348,15 +340,38 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
src += ndw * 4; src += ndw * 4;
count -= ndw / 2; count -= ndw / 2;
} }
}
/**
* cayman_dma_vm_write_pages - update PTEs by writing them manually
*
* @rdev: radeon_device pointer
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: hw access flags
*
* Update PTEs by writing them manually using the DMA (cayman/TN).
*/
void cayman_dma_vm_write_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
uint64_t value;
unsigned ndw;
} else if ((flags & R600_PTE_SYSTEM) || (count == 1)) {
while (count) { while (count) {
ndw = count * 2; ndw = count * 2;
if (ndw > 0xFFFFE) if (ndw > 0xFFFFE)
ndw = 0xFFFFE; ndw = 0xFFFFE;
/* for non-physically contiguous pages (system) */ /* for non-physically contiguous pages (system) */
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw); ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE,
0, 0, ndw);
ib->ptr[ib->length_dw++] = pe; ib->ptr[ib->length_dw++] = pe;
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
for (; ndw > 0; ndw -= 2, --count, pe += 8) { for (; ndw > 0; ndw -= 2, --count, pe += 8) {
...@@ -374,7 +389,30 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev, ...@@ -374,7 +389,30 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
ib->ptr[ib->length_dw++] = upper_32_bits(value); ib->ptr[ib->length_dw++] = upper_32_bits(value);
} }
} }
} else { }
/**
* cayman_dma_vm_set_pages - update the page tables using the DMA
*
* @rdev: radeon_device pointer
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: hw access flags
*
* Update the page tables using the DMA (cayman/TN).
*/
void cayman_dma_vm_set_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
uint64_t value;
unsigned ndw;
while (count) { while (count) {
ndw = count * 2; ndw = count * 2;
if (ndw > 0xFFFFE) if (ndw > 0xFFFFE)
...@@ -384,6 +422,7 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev, ...@@ -384,6 +422,7 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
value = addr; value = addr;
else else
value = 0; value = 0;
/* for physically contiguous pages (vram) */ /* for physically contiguous pages (vram) */
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
ib->ptr[ib->length_dw++] = pe; /* dst addr */ ib->ptr[ib->length_dw++] = pe; /* dst addr */
...@@ -394,11 +433,21 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev, ...@@ -394,11 +433,21 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev,
ib->ptr[ib->length_dw++] = upper_32_bits(value); ib->ptr[ib->length_dw++] = upper_32_bits(value);
ib->ptr[ib->length_dw++] = incr; /* increment size */ ib->ptr[ib->length_dw++] = incr; /* increment size */
ib->ptr[ib->length_dw++] = 0; ib->ptr[ib->length_dw++] = 0;
pe += ndw * 4; pe += ndw * 4;
addr += (ndw / 2) * incr; addr += (ndw / 2) * incr;
count -= ndw / 2; count -= ndw / 2;
} }
} }
/**
* cayman_dma_vm_pad_ib - pad the IB to the required number of dw
*
* @ib: indirect buffer to fill with padding
*
*/
void cayman_dma_vm_pad_ib(struct radeon_ib *ib)
{
while (ib->length_dw & 0x7) while (ib->length_dw & 0x7)
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0); ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
} }
......
...@@ -1797,11 +1797,21 @@ struct radeon_asic { ...@@ -1797,11 +1797,21 @@ struct radeon_asic {
struct { struct {
int (*init)(struct radeon_device *rdev); int (*init)(struct radeon_device *rdev);
void (*fini)(struct radeon_device *rdev); void (*fini)(struct radeon_device *rdev);
void (*set_page)(struct radeon_device *rdev, void (*copy_pages)(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe, uint64_t src,
unsigned count);
void (*write_pages)(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
void (*set_pages)(struct radeon_device *rdev,
struct radeon_ib *ib, struct radeon_ib *ib,
uint64_t pe, uint64_t pe,
uint64_t addr, unsigned count, uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags); uint32_t incr, uint32_t flags);
void (*pad_ib)(struct radeon_ib *ib);
} vm; } vm;
/* ring specific callbacks */ /* ring specific callbacks */
struct radeon_asic_ring *ring[RADEON_NUM_RINGS]; struct radeon_asic_ring *ring[RADEON_NUM_RINGS];
...@@ -2761,7 +2771,10 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v); ...@@ -2761,7 +2771,10 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f)) #define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f))
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags))) #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
#define radeon_asic_vm_write_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
#define radeon_asic_vm_set_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
#define radeon_asic_vm_pad_ib(rdev, ib) ((rdev)->asic->vm.pad_ib((ib)))
#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp)) #define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp))
#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp)) #define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp))
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp)) #define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp))
......
...@@ -1613,7 +1613,10 @@ static struct radeon_asic cayman_asic = { ...@@ -1613,7 +1613,10 @@ static struct radeon_asic cayman_asic = {
.vm = { .vm = {
.init = &cayman_vm_init, .init = &cayman_vm_init,
.fini = &cayman_vm_fini, .fini = &cayman_vm_fini,
.set_page = &cayman_dma_vm_set_page, .copy_pages = &cayman_dma_vm_copy_pages,
.write_pages = &cayman_dma_vm_write_pages,
.set_pages = &cayman_dma_vm_set_pages,
.pad_ib = &cayman_dma_vm_pad_ib,
}, },
.ring = { .ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring, [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
...@@ -1713,7 +1716,10 @@ static struct radeon_asic trinity_asic = { ...@@ -1713,7 +1716,10 @@ static struct radeon_asic trinity_asic = {
.vm = { .vm = {
.init = &cayman_vm_init, .init = &cayman_vm_init,
.fini = &cayman_vm_fini, .fini = &cayman_vm_fini,
.set_page = &cayman_dma_vm_set_page, .copy_pages = &cayman_dma_vm_copy_pages,
.write_pages = &cayman_dma_vm_write_pages,
.set_pages = &cayman_dma_vm_set_pages,
.pad_ib = &cayman_dma_vm_pad_ib,
}, },
.ring = { .ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring, [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
...@@ -1843,7 +1849,10 @@ static struct radeon_asic si_asic = { ...@@ -1843,7 +1849,10 @@ static struct radeon_asic si_asic = {
.vm = { .vm = {
.init = &si_vm_init, .init = &si_vm_init,
.fini = &si_vm_fini, .fini = &si_vm_fini,
.set_page = &si_dma_vm_set_page, .copy_pages = &si_dma_vm_copy_pages,
.write_pages = &si_dma_vm_write_pages,
.set_pages = &si_dma_vm_set_pages,
.pad_ib = &cayman_dma_vm_pad_ib,
}, },
.ring = { .ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring, [RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
...@@ -2001,7 +2010,10 @@ static struct radeon_asic ci_asic = { ...@@ -2001,7 +2010,10 @@ static struct radeon_asic ci_asic = {
.vm = { .vm = {
.init = &cik_vm_init, .init = &cik_vm_init,
.fini = &cik_vm_fini, .fini = &cik_vm_fini,
.set_page = &cik_sdma_vm_set_page, .copy_pages = &cik_sdma_vm_copy_pages,
.write_pages = &cik_sdma_vm_write_pages,
.set_pages = &cik_sdma_vm_set_pages,
.pad_ib = &cik_sdma_vm_pad_ib,
}, },
.ring = { .ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring, [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
...@@ -2105,7 +2117,10 @@ static struct radeon_asic kv_asic = { ...@@ -2105,7 +2117,10 @@ static struct radeon_asic kv_asic = {
.vm = { .vm = {
.init = &cik_vm_init, .init = &cik_vm_init,
.fini = &cik_vm_fini, .fini = &cik_vm_fini,
.set_page = &cik_sdma_vm_set_page, .copy_pages = &cik_sdma_vm_copy_pages,
.write_pages = &cik_sdma_vm_write_pages,
.set_pages = &cik_sdma_vm_set_pages,
.pad_ib = &cik_sdma_vm_pad_ib,
}, },
.ring = { .ring = {
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring, [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
......
...@@ -607,11 +607,22 @@ void cayman_dma_ring_ib_execute(struct radeon_device *rdev, ...@@ -607,11 +607,22 @@ void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
struct radeon_ib *ib); struct radeon_ib *ib);
bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
void cayman_dma_vm_set_page(struct radeon_device *rdev,
void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe, uint64_t src,
unsigned count);
void cayman_dma_vm_write_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
void cayman_dma_vm_set_pages(struct radeon_device *rdev,
struct radeon_ib *ib, struct radeon_ib *ib,
uint64_t pe, uint64_t pe,
uint64_t addr, unsigned count, uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags); uint32_t incr, uint32_t flags);
void cayman_dma_vm_pad_ib(struct radeon_ib *ib);
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
...@@ -694,11 +705,22 @@ int si_copy_dma(struct radeon_device *rdev, ...@@ -694,11 +705,22 @@ int si_copy_dma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset, uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, unsigned num_gpu_pages,
struct radeon_fence **fence); struct radeon_fence **fence);
void si_dma_vm_set_page(struct radeon_device *rdev,
void si_dma_vm_copy_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe, uint64_t src,
unsigned count);
void si_dma_vm_write_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
void si_dma_vm_set_pages(struct radeon_device *rdev,
struct radeon_ib *ib, struct radeon_ib *ib,
uint64_t pe, uint64_t pe,
uint64_t addr, unsigned count, uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags); uint32_t incr, uint32_t flags);
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
u32 si_get_xclk(struct radeon_device *rdev); u32 si_get_xclk(struct radeon_device *rdev);
uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev); uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
...@@ -772,11 +794,23 @@ int cik_irq_process(struct radeon_device *rdev); ...@@ -772,11 +794,23 @@ int cik_irq_process(struct radeon_device *rdev);
int cik_vm_init(struct radeon_device *rdev); int cik_vm_init(struct radeon_device *rdev);
void cik_vm_fini(struct radeon_device *rdev); void cik_vm_fini(struct radeon_device *rdev);
void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
void cik_sdma_vm_set_page(struct radeon_device *rdev,
void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe, uint64_t src,
unsigned count);
void cik_sdma_vm_write_pages(struct radeon_device *rdev,
struct radeon_ib *ib, struct radeon_ib *ib,
uint64_t pe, uint64_t pe,
uint64_t addr, unsigned count, uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags); uint32_t incr, uint32_t flags);
void cik_sdma_vm_set_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
void cik_sdma_vm_pad_ib(struct radeon_ib *ib);
void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
u32 cik_gfx_get_rptr(struct radeon_device *rdev, u32 cik_gfx_get_rptr(struct radeon_device *rdev,
......
...@@ -340,6 +340,42 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, ...@@ -340,6 +340,42 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
return bo_va; return bo_va;
} }
/**
* radeon_vm_set_pages - helper to call the right asic function
*
* @rdev: radeon_device pointer
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: hw access flags
*
* Traces the parameters and calls the right asic functions
* to setup the page table using the DMA.
*/
static void radeon_vm_set_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
trace_radeon_vm_set_page(pe, addr, count, incr, flags);
if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
radeon_asic_vm_copy_pages(rdev, ib, pe, src, count);
} else if ((flags & R600_PTE_SYSTEM) || (count < 3)) {
radeon_asic_vm_write_pages(rdev, ib, pe, addr,
count, incr, flags);
} else {
radeon_asic_vm_set_pages(rdev, ib, pe, addr,
count, incr, flags);
}
}
/** /**
* radeon_vm_clear_bo - initially clear the page dir/table * radeon_vm_clear_bo - initially clear the page dir/table
* *
...@@ -381,7 +417,8 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev, ...@@ -381,7 +417,8 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
ib.length_dw = 0; ib.length_dw = 0;
radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0); radeon_vm_set_pages(rdev, &ib, addr, 0, entries, 0, 0);
radeon_asic_vm_pad_ib(rdev, &ib);
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) if (r)
...@@ -634,7 +671,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, ...@@ -634,7 +671,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
((last_pt + incr * count) != pt)) { ((last_pt + incr * count) != pt)) {
if (count) { if (count) {
radeon_asic_vm_set_page(rdev, &ib, last_pde, radeon_vm_set_pages(rdev, &ib, last_pde,
last_pt, count, incr, last_pt, count, incr,
R600_PTE_VALID); R600_PTE_VALID);
} }
...@@ -648,10 +685,11 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, ...@@ -648,10 +685,11 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
} }
if (count) if (count)
radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count, radeon_vm_set_pages(rdev, &ib, last_pde, last_pt, count,
incr, R600_PTE_VALID); incr, R600_PTE_VALID);
if (ib.length_dw != 0) { if (ib.length_dw != 0) {
radeon_asic_vm_pad_ib(rdev, &ib);
radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj); radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL);
...@@ -719,7 +757,7 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev, ...@@ -719,7 +757,7 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
(frag_start >= frag_end)) { (frag_start >= frag_end)) {
count = (pe_end - pe_start) / 8; count = (pe_end - pe_start) / 8;
radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count, radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
RADEON_GPU_PAGE_SIZE, flags); RADEON_GPU_PAGE_SIZE, flags);
return; return;
} }
...@@ -727,21 +765,21 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev, ...@@ -727,21 +765,21 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
/* handle the 4K area at the beginning */ /* handle the 4K area at the beginning */
if (pe_start != frag_start) { if (pe_start != frag_start) {
count = (frag_start - pe_start) / 8; count = (frag_start - pe_start) / 8;
radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count, radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
RADEON_GPU_PAGE_SIZE, flags); RADEON_GPU_PAGE_SIZE, flags);
addr += RADEON_GPU_PAGE_SIZE * count; addr += RADEON_GPU_PAGE_SIZE * count;
} }
/* handle the area in the middle */ /* handle the area in the middle */
count = (frag_end - frag_start) / 8; count = (frag_end - frag_start) / 8;
radeon_asic_vm_set_page(rdev, ib, frag_start, addr, count, radeon_vm_set_pages(rdev, ib, frag_start, addr, count,
RADEON_GPU_PAGE_SIZE, flags | frag_flags); RADEON_GPU_PAGE_SIZE, flags | frag_flags);
/* handle the 4K area at the end */ /* handle the 4K area at the end */
if (frag_end != pe_end) { if (frag_end != pe_end) {
addr += RADEON_GPU_PAGE_SIZE * count; addr += RADEON_GPU_PAGE_SIZE * count;
count = (pe_end - frag_end) / 8; count = (pe_end - frag_end) / 8;
radeon_asic_vm_set_page(rdev, ib, frag_end, addr, count, radeon_vm_set_pages(rdev, ib, frag_end, addr, count,
RADEON_GPU_PAGE_SIZE, flags); RADEON_GPU_PAGE_SIZE, flags);
} }
} }
...@@ -900,6 +938,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev, ...@@ -900,6 +938,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
bo_va->it.last + 1, addr, bo_va->it.last + 1, addr,
radeon_vm_page_flags(bo_va->flags)); radeon_vm_page_flags(bo_va->flags));
radeon_asic_vm_pad_ib(rdev, &ib);
radeon_semaphore_sync_to(ib.semaphore, vm->fence); radeon_semaphore_sync_to(ib.semaphore, vm->fence);
r = radeon_ib_schedule(rdev, &ib, NULL); r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) { if (r) {
......
...@@ -56,31 +56,21 @@ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) ...@@ -56,31 +56,21 @@ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
} }
/** /**
* si_dma_vm_set_page - update the page tables using the DMA * si_dma_vm_copy_pages - update PTEs by copying them from the GART
* *
* @rdev: radeon_device pointer * @rdev: radeon_device pointer
* @ib: indirect buffer to fill with commands * @ib: indirect buffer to fill with commands
* @pe: addr of the page entry * @pe: addr of the page entry
* @addr: dst addr to write into pe * @src: src addr where to copy from
* @count: number of page entries to update * @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
* *
* Update the page tables using the DMA (SI). * Update PTEs by copying them from the GART using the DMA (SI).
*/ */
void si_dma_vm_set_page(struct radeon_device *rdev, void si_dma_vm_copy_pages(struct radeon_device *rdev,
struct radeon_ib *ib, struct radeon_ib *ib,
uint64_t pe, uint64_t pe, uint64_t src,
uint64_t addr, unsigned count, unsigned count)
uint32_t incr, uint32_t flags)
{ {
uint64_t value;
unsigned ndw;
trace_radeon_vm_set_page(pe, addr, count, incr, flags);
if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
while (count) { while (count) {
unsigned bytes = count * 8; unsigned bytes = count * 8;
if (bytes > 0xFFFF8) if (bytes > 0xFFFF8)
...@@ -97,7 +87,30 @@ void si_dma_vm_set_page(struct radeon_device *rdev, ...@@ -97,7 +87,30 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
src += bytes; src += bytes;
count -= bytes / 8; count -= bytes / 8;
} }
} else if (flags & R600_PTE_SYSTEM) { }
/**
* si_dma_vm_write_pages - update PTEs by writing them manually
*
* @rdev: radeon_device pointer
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
* Update PTEs by writing them manually using the DMA (SI).
*/
void si_dma_vm_write_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
uint64_t value;
unsigned ndw;
while (count) { while (count) {
ndw = count * 2; ndw = count * 2;
if (ndw > 0xFFFFE) if (ndw > 0xFFFFE)
...@@ -108,15 +121,44 @@ void si_dma_vm_set_page(struct radeon_device *rdev, ...@@ -108,15 +121,44 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
ib->ptr[ib->length_dw++] = pe; ib->ptr[ib->length_dw++] = pe;
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
for (; ndw > 0; ndw -= 2, --count, pe += 8) { for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr); value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL; value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) {
value = addr;
} else {
value = 0;
}
addr += incr; addr += incr;
value |= flags; value |= flags;
ib->ptr[ib->length_dw++] = value; ib->ptr[ib->length_dw++] = value;
ib->ptr[ib->length_dw++] = upper_32_bits(value); ib->ptr[ib->length_dw++] = upper_32_bits(value);
} }
} }
} else { }
/**
* si_dma_vm_set_pages - update the page tables using the DMA
*
* @rdev: radeon_device pointer
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
* Update the page tables using the DMA (SI).
*/
void si_dma_vm_set_pages(struct radeon_device *rdev,
struct radeon_ib *ib,
uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
uint64_t value;
unsigned ndw;
while (count) { while (count) {
ndw = count * 2; ndw = count * 2;
if (ndw > 0xFFFFE) if (ndw > 0xFFFFE)
...@@ -126,6 +168,7 @@ void si_dma_vm_set_page(struct radeon_device *rdev, ...@@ -126,6 +168,7 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
value = addr; value = addr;
else else
value = 0; value = 0;
/* for physically contiguous pages (vram) */ /* for physically contiguous pages (vram) */
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
ib->ptr[ib->length_dw++] = pe; /* dst addr */ ib->ptr[ib->length_dw++] = pe; /* dst addr */
...@@ -140,9 +183,6 @@ void si_dma_vm_set_page(struct radeon_device *rdev, ...@@ -140,9 +183,6 @@ void si_dma_vm_set_page(struct radeon_device *rdev,
addr += (ndw / 2) * incr; addr += (ndw / 2) * incr;
count -= ndw / 2; count -= ndw / 2;
} }
}
while (ib->length_dw & 0x7)
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
} }
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册