提交 01385758 编写于 作者: D Daniele Ceraolo Spurio 提交者: Tvrtko Ursulin

drm/i915: kill uncore_to_i915

Let's get rid of it before it proliferates, since with split GT/Display
uncores the container_of won't work anymore.

I've kept the rpm pointer as well to minimize the pointer chasing in the
MMIO accessors.

v2: swap parameter order for intel_uncore_init_early (Tvrtko)
Signed-off-by: NDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: NTvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: NTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190620010021.20637-4-daniele.ceraolospurio@intel.com
上级 19e0a8d4
......@@ -900,7 +900,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
intel_device_info_subplatform_init(dev_priv);
intel_uncore_init_early(&dev_priv->uncore);
intel_uncore_init_early(&dev_priv->uncore, dev_priv);
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
......
......@@ -1950,11 +1950,6 @@ static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
return container_of(huc, struct drm_i915_private, huc);
}
static inline struct drm_i915_private *uncore_to_i915(struct intel_uncore *uncore)
{
return container_of(uncore, struct drm_i915_private, uncore);
}
/* Simple iterator over all initialised engines */
#define for_each_engine(engine__, dev_priv__, id__) \
for ((id__) = 0; \
......
......@@ -322,7 +322,7 @@ static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
/* On VLV, FIFO will be shared by both SW and HW.
* So, we need to read the FREE_ENTRIES everytime */
if (IS_VALLEYVIEW(uncore_to_i915(uncore)))
if (IS_VALLEYVIEW(uncore->i915))
n = fifo_free_entries(uncore);
else
n = uncore->fifo_count;
......@@ -493,7 +493,7 @@ static void __intel_uncore_early_sanitize(struct intel_uncore *uncore,
DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
/* WaDisableShadowRegForCpd:chv */
if (IS_CHERRYVIEW(uncore_to_i915(uncore))) {
if (IS_CHERRYVIEW(uncore->i915)) {
__raw_uncore_write32(uncore, GTFIFOCTL,
__raw_uncore_read32(uncore, GTFIFOCTL) |
GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
......@@ -622,7 +622,7 @@ void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
spin_lock_irq(&uncore->lock);
if (!--uncore->user_forcewake.count) {
if (intel_uncore_unclaimed_mmio(uncore))
dev_info(uncore_to_i915(uncore)->drm.dev,
dev_info(uncore->i915->drm.dev,
"Invalid mmio detected during user access\n");
uncore->unclaimed_mmio_check =
......@@ -1346,7 +1346,7 @@ static void fw_domain_fini(struct intel_uncore *uncore,
static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
{
struct drm_i915_private *i915 = uncore_to_i915(uncore);
struct drm_i915_private *i915 = uncore->i915;
if (!intel_uncore_has_forcewake(uncore))
return;
......@@ -1499,7 +1499,7 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
static int uncore_mmio_setup(struct intel_uncore *uncore)
{
struct drm_i915_private *i915 = uncore_to_i915(uncore);
struct drm_i915_private *i915 = uncore->i915;
struct pci_dev *pdev = i915->drm.pdev;
int mmio_bar;
int mmio_size;
......@@ -1529,20 +1529,22 @@ static int uncore_mmio_setup(struct intel_uncore *uncore)
static void uncore_mmio_cleanup(struct intel_uncore *uncore)
{
struct drm_i915_private *i915 = uncore_to_i915(uncore);
struct pci_dev *pdev = i915->drm.pdev;
struct pci_dev *pdev = uncore->i915->drm.pdev;
pci_iounmap(pdev, uncore->regs);
}
void intel_uncore_init_early(struct intel_uncore *uncore)
void intel_uncore_init_early(struct intel_uncore *uncore,
struct drm_i915_private *i915)
{
spin_lock_init(&uncore->lock);
uncore->i915 = i915;
uncore->rpm = &i915->runtime_pm;
}
int intel_uncore_init_mmio(struct intel_uncore *uncore)
{
struct drm_i915_private *i915 = uncore_to_i915(uncore);
struct drm_i915_private *i915 = uncore->i915;
int ret;
ret = uncore_mmio_setup(uncore);
......@@ -1561,8 +1563,6 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
uncore->pmic_bus_access_nb.notifier_call =
i915_pmic_bus_access_notifier;
uncore->rpm = &i915->runtime_pm;
if (!intel_uncore_has_forcewake(uncore)) {
if (IS_GEN(i915, 5)) {
ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
......@@ -1627,7 +1627,7 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
*/
void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore)
{
struct drm_i915_private *i915 = uncore_to_i915(uncore);
struct drm_i915_private *i915 = uncore->i915;
if (INTEL_GEN(i915) >= 11) {
enum forcewake_domains fw_domains = uncore->fw_domains;
......
......@@ -102,6 +102,7 @@ struct intel_forcewake_range {
struct intel_uncore {
void __iomem *regs;
struct drm_i915_private *i915;
struct intel_runtime_pm *rpm;
spinlock_t lock; /** lock is also taken in irq contexts. */
......@@ -182,7 +183,8 @@ intel_uncore_has_fifo(const struct intel_uncore *uncore)
return uncore->flags & UNCORE_HAS_FIFO;
}
void intel_uncore_init_early(struct intel_uncore *uncore);
void intel_uncore_init_early(struct intel_uncore *uncore,
struct drm_i915_private *i915);
int intel_uncore_init_mmio(struct intel_uncore *uncore);
void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore);
bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册