提交 9e138ea1 编写于 作者: D Daniele Ceraolo Spurio 提交者: Tvrtko Ursulin

drm/i915/gvt: decouple check_vgpu() from uncore_init()

With multiple uncore to initialize (GT vs Display), it makes little
sense to have the vgpu_check inside uncore_init(). We also have
a catch-22 scenario where the uncore is required to read the vgpu
capabilities while the vgpu capabilities are required to decide if
we need to initialize forcewake support. To remove this circular
dependency, we can perform the required MMIO access by mmapping just
the vgtif shared page in mmio space and use raw accessors.

v2: rename check_vgpu to detect_vgpu (Chris)
Signed-off-by: NDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: NChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: NTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190620010021.20637-7-daniele.ceraolospurio@intel.com
上级 f833cdb0
......@@ -1899,6 +1899,8 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
i915_detect_vgpu(dev_priv);
ret = i915_driver_init_mmio(dev_priv);
if (ret < 0)
goto out_runtime_pm_put;
......
......@@ -110,8 +110,9 @@ struct vgt_if {
u32 rsv7[0x200 - 24]; /* pad to one page */
} __packed;
#define vgtif_reg(x) \
_MMIO((VGT_PVINFO_PAGE + offsetof(struct vgt_if, x)))
#define vgtif_offset(x) (offsetof(struct vgt_if, x))
#define vgtif_reg(x) _MMIO(VGT_PVINFO_PAGE + vgtif_offset(x))
/* vGPU display status to be used by the host side */
#define VGT_DRV_DISPLAY_NOT_READY 0
......
......@@ -52,34 +52,53 @@
*/
/**
* i915_check_vgpu - detect virtual GPU
* i915_detect_vgpu - detect virtual GPU
* @dev_priv: i915 device private
*
* This function is called at the initialization stage, to detect whether
* running on a vGPU.
*/
void i915_check_vgpu(struct drm_i915_private *dev_priv)
void i915_detect_vgpu(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
struct pci_dev *pdev = dev_priv->drm.pdev;
u64 magic;
u16 version_major;
void __iomem *shared_area;
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
magic = __raw_uncore_read64(uncore, vgtif_reg(magic));
if (magic != VGT_MAGIC)
/*
* This is called before we setup the main MMIO BAR mappings used via
* the uncore structure, so we need to access the BAR directly. Since
* we do not support VGT on older gens, return early so we don't have
* to consider differently numbered or sized MMIO bars
*/
if (INTEL_GEN(dev_priv) < 6)
return;
shared_area = pci_iomap_range(pdev, 0, VGT_PVINFO_PAGE, VGT_PVINFO_SIZE);
if (!shared_area) {
DRM_ERROR("failed to map MMIO bar to check for VGT\n");
return;
}
magic = readq(shared_area + vgtif_offset(magic));
if (magic != VGT_MAGIC)
goto out;
version_major = __raw_uncore_read16(uncore, vgtif_reg(version_major));
version_major = readw(shared_area + vgtif_offset(version_major));
if (version_major < VGT_VERSION_MAJOR) {
DRM_INFO("VGT interface version mismatch!\n");
return;
goto out;
}
dev_priv->vgpu.caps = __raw_uncore_read32(uncore, vgtif_reg(vgt_caps));
dev_priv->vgpu.caps = readl(shared_area + vgtif_offset(vgt_caps));
dev_priv->vgpu.active = true;
DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
out:
pci_iounmap(pdev, shared_area);
}
bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv)
......
......@@ -26,7 +26,7 @@
#include "i915_pvinfo.h"
void i915_check_vgpu(struct drm_i915_private *dev_priv);
void i915_detect_vgpu(struct drm_i915_private *dev_priv);
bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv);
......
......@@ -1650,8 +1650,6 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
if (ret)
return ret;
i915_check_vgpu(i915);
if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
uncore->flags |= UNCORE_HAS_FORCEWAKE;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册