提交 8d2e6308 编写于 作者: B Ben Widawsky 提交者: Daniel Vetter

drm/i915: Needs_dmar, not

The reasoning behind our code taking two paths depending upon whether or
not we may have been configured for IOMMU isn't clear to me. It should
always be safe to use the pci mapping functions as they are designed to
abstract the decision we were handling in i915.

Aside from simpler code, removing another member for the intel_gtt
struct is a nice motivation.

I ran this by Chris, and he wasn't concerned about the extra kzalloc,
and memory references vs. page_to_phys calculation in the case without
IOMMU.

v2: Update commit message

v3: Remove needs_dmar addition from Zhenyu upstream

This reverts (and then other stuff)
commit 20652097
Author: Zhenyu Wang <zhenyuw@linux.intel.com>
Date:   Thu Dec 13 23:47:47 2012 +0800

    drm/i915: Fix missed needs_dmar setting

Reviewed-by: Rodrigo Vivi <rodrigo.vivi@gmail.com> (v2)
Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
Signed-off-by: NBen Widawsky <ben@bwidawsk.net>
[danvet: Squash in follow-up fix to remove the bogus hunk which
deleted the dma_mask configuration for gen6+.]
Signed-off-by: NDaniel Vetter <daniel.vetter@ffwll.ch>
上级 9c61a32d
......@@ -77,6 +77,8 @@ static struct _intel_private {
struct page *scratch_page;
phys_addr_t scratch_page_dma;
int refcount;
/* Whether i915 needs to use the dmar apis or not. */
unsigned int needs_dmar : 1;
} intel_private;
#define INTEL_GTT_GEN intel_private.driver->gen
......@@ -292,7 +294,7 @@ static int intel_gtt_setup_scratch_page(void)
get_page(page);
set_pages_uc(page, 1);
if (intel_private.base.needs_dmar) {
if (intel_private.needs_dmar) {
dma_addr = pci_map_page(intel_private.pcidev, page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
......@@ -608,7 +610,7 @@ static int intel_gtt_init(void)
intel_private.base.stolen_size = intel_gtt_stolen_size();
intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
ret = intel_gtt_setup_scratch_page();
if (ret != 0) {
......@@ -866,7 +868,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
if (!mem->is_flushed)
global_cache_flush();
if (intel_private.base.needs_dmar) {
if (intel_private.needs_dmar) {
struct sg_table st;
ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
......@@ -907,7 +909,7 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem,
intel_gtt_clear_range(pg_start, mem->page_count);
if (intel_private.base.needs_dmar) {
if (intel_private.needs_dmar) {
intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
mem->sg_list = NULL;
mem->num_sg = 0;
......
......@@ -138,28 +138,23 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
goto err_pt_alloc;
}
if (dev_priv->mm.gtt->needs_dmar) {
ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
*ppgtt->num_pd_entries,
GFP_KERNEL);
if (!ppgtt->pt_dma_addr)
goto err_pt_alloc;
ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
GFP_KERNEL);
if (!ppgtt->pt_dma_addr)
goto err_pt_alloc;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
0, 4096,
PCI_DMA_BIDIRECTIONAL);
pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev,
pt_addr)) {
ret = -EIO;
goto err_pd_pin;
if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
ret = -EIO;
goto err_pd_pin;
}
ppgtt->pt_dma_addr[i] = pt_addr;
}
ppgtt->pt_dma_addr[i] = pt_addr;
}
ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
......@@ -294,11 +289,7 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
if (dev_priv->mm.gtt->needs_dmar)
pt_addr = ppgtt->pt_dma_addr[i];
else
pt_addr = page_to_phys(ppgtt->pt_pages[i]);
pt_addr = ppgtt->pt_dma_addr[i];
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
pd_entry |= GEN6_PDE_VALID;
......@@ -730,16 +721,12 @@ int i915_gem_gtt_init(struct drm_device *dev)
return 0;
}
dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
if (!dev_priv->mm.gtt)
return -ENOMEM;
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
#ifdef CONFIG_INTEL_IOMMU
dev_priv->mm.gtt->needs_dmar = 1;
#endif
dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
if (!dev_priv->mm.gtt)
return -ENOMEM;
/* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
......
......@@ -11,8 +11,6 @@ struct intel_gtt {
/* Part of the gtt that is mappable by the cpu, for those chips where
* this is not the full gtt. */
unsigned int gtt_mappable_entries;
/* Whether i915 needs to use the dmar apis or not. */
unsigned int needs_dmar : 1;
/* needed for ioremap in drm/i915 */
phys_addr_t gma_bus_addr;
} *intel_gtt_get(void);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册