提交 82568565 编写于 作者: D Dave Airlie

drm/radeon/kms: set gart pages to invalid on unbind and point to dummy page

this uses a new entrypoint to invalidate gart entries instead of using 0.
Changed to rather than pointing to 0 address point empty entry to dummy
page. This might help to avoid hard lockup if for some wrong
reasons GPU try to access unmapped GART entry.

I'm not 100% sure this is going to work, we probably need to allocate
a dummy page and point all the GTT entries at it similiar to what AGP does.
but we can test this first I suppose.
Signed-off-by: NJerome Glisse <jglisse@redhat.com>
Signed-off-by: NDave Airlie <airlied@redhat.com>
上级 e3439895
...@@ -93,6 +93,7 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev) ...@@ -93,6 +93,7 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev); r = radeon_gart_table_vram_pin(rdev);
if (r) if (r)
return r; return r;
radeon_gart_restore(rdev);
/* Setup L2 cache */ /* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
......
...@@ -197,6 +197,7 @@ int r100_pci_gart_enable(struct radeon_device *rdev) ...@@ -197,6 +197,7 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
{ {
uint32_t tmp; uint32_t tmp;
radeon_gart_restore(rdev);
/* discard memory request outside of configured range */ /* discard memory request outside of configured range */
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32(RADEON_AIC_CNTL, tmp); WREG32(RADEON_AIC_CNTL, tmp);
......
...@@ -117,6 +117,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev) ...@@ -117,6 +117,7 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev); r = radeon_gart_table_vram_pin(rdev);
if (r) if (r)
return r; return r;
radeon_gart_restore(rdev);
/* discard memory request outside of configured range */ /* discard memory request outside of configured range */
tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
......
...@@ -416,6 +416,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev) ...@@ -416,6 +416,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev); r = radeon_gart_table_vram_pin(rdev);
if (r) if (r)
return r; return r;
radeon_gart_restore(rdev);
/* Setup L2 cache */ /* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
......
...@@ -1145,6 +1145,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) ...@@ -1145,6 +1145,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
/* AGP */ /* AGP */
extern void radeon_agp_disable(struct radeon_device *rdev); extern void radeon_agp_disable(struct radeon_device *rdev);
extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
extern void radeon_gart_restore(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev); extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev); extern void radeon_modeset_fini(struct radeon_device *rdev);
extern bool radeon_card_posted(struct radeon_device *rdev); extern bool radeon_card_posted(struct radeon_device *rdev);
...@@ -1269,7 +1270,6 @@ extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); ...@@ -1269,7 +1270,6 @@ extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_cp_resume(struct radeon_device *rdev); extern int r600_cp_resume(struct radeon_device *rdev);
extern void r600_cp_fini(struct radeon_device *rdev); extern void r600_cp_fini(struct radeon_device *rdev);
extern int r600_count_pipe_bits(uint32_t val); extern int r600_count_pipe_bits(uint32_t val);
extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
extern int r600_mc_wait_for_idle(struct radeon_device *rdev); extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
extern int r600_pcie_gart_init(struct radeon_device *rdev); extern int r600_pcie_gart_init(struct radeon_device *rdev);
extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
......
...@@ -238,6 +238,8 @@ bool radeon_boot_test_post_card(struct radeon_device *rdev) ...@@ -238,6 +238,8 @@ bool radeon_boot_test_post_card(struct radeon_device *rdev)
int radeon_dummy_page_init(struct radeon_device *rdev) int radeon_dummy_page_init(struct radeon_device *rdev)
{ {
if (rdev->dummy_page.page)
return 0;
rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
if (rdev->dummy_page.page == NULL) if (rdev->dummy_page.page == NULL)
return -ENOMEM; return -ENOMEM;
......
...@@ -139,6 +139,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, ...@@ -139,6 +139,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
unsigned t; unsigned t;
unsigned p; unsigned p;
int i, j; int i, j;
u64 page_base;
if (!rdev->gart.ready) { if (!rdev->gart.ready) {
WARN(1, "trying to unbind memory to unitialized GART !\n"); WARN(1, "trying to unbind memory to unitialized GART !\n");
...@@ -151,9 +152,11 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, ...@@ -151,9 +152,11 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
rdev->gart.pages[p] = NULL; rdev->gart.pages[p] = NULL;
rdev->gart.pages_addr[p] = 0; rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
radeon_gart_set_page(rdev, t, 0); radeon_gart_set_page(rdev, t, page_base);
page_base += RADEON_GPU_PAGE_SIZE;
} }
} }
} }
...@@ -199,8 +202,26 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, ...@@ -199,8 +202,26 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
return 0; return 0;
} }
void radeon_gart_restore(struct radeon_device *rdev)
{
int i, j, t;
u64 page_base;
for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
page_base = rdev->gart.pages_addr[i];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
radeon_gart_set_page(rdev, t, page_base);
page_base += RADEON_GPU_PAGE_SIZE;
}
}
mb();
radeon_gart_tlb_flush(rdev);
}
int radeon_gart_init(struct radeon_device *rdev) int radeon_gart_init(struct radeon_device *rdev)
{ {
int r, i;
if (rdev->gart.pages) { if (rdev->gart.pages) {
return 0; return 0;
} }
...@@ -209,6 +230,9 @@ int radeon_gart_init(struct radeon_device *rdev) ...@@ -209,6 +230,9 @@ int radeon_gart_init(struct radeon_device *rdev)
DRM_ERROR("Page size is smaller than GPU page size!\n"); DRM_ERROR("Page size is smaller than GPU page size!\n");
return -EINVAL; return -EINVAL;
} }
r = radeon_dummy_page_init(rdev);
if (r)
return r;
/* Compute table size */ /* Compute table size */
rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE; rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE; rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
...@@ -227,6 +251,10 @@ int radeon_gart_init(struct radeon_device *rdev) ...@@ -227,6 +251,10 @@ int radeon_gart_init(struct radeon_device *rdev)
radeon_gart_fini(rdev); radeon_gart_fini(rdev);
return -ENOMEM; return -ENOMEM;
} }
/* set GART entry to point to the dummy page by default */
for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
}
return 0; return 0;
} }
......
...@@ -113,6 +113,7 @@ int rs400_gart_enable(struct radeon_device *rdev) ...@@ -113,6 +113,7 @@ int rs400_gart_enable(struct radeon_device *rdev)
uint32_t size_reg; uint32_t size_reg;
uint32_t tmp; uint32_t tmp;
radeon_gart_restore(rdev);
tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH); tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS; tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp); WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
......
...@@ -213,6 +213,7 @@ int rs600_gart_enable(struct radeon_device *rdev) ...@@ -213,6 +213,7 @@ int rs600_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev); r = radeon_gart_table_vram_pin(rdev);
if (r) if (r)
return r; return r;
radeon_gart_restore(rdev);
/* Enable bus master */ /* Enable bus master */
tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS; tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
WREG32(R_00004C_BUS_CNTL, tmp); WREG32(R_00004C_BUS_CNTL, tmp);
......
...@@ -56,6 +56,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev) ...@@ -56,6 +56,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev); r = radeon_gart_table_vram_pin(rdev);
if (r) if (r)
return r; return r;
radeon_gart_restore(rdev);
/* Setup L2 cache */ /* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册