提交 c5c7bc71 编写于 作者: D Dave Airlie

Merge tag 'drm-intel-next-2017-09-29' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

2nd batch of v4.15 features:

- lib/scatterlist updates, use for userptr allocations (Tvrtko)
- Fixed point wrapper cleanup (Mahesh)
- Gen9+ transition watermarks, watermark optimization and fixes (Mahesh)
- Display IPC (Isochronous Priority Control) support (Mahesh)
- GEM workaround fixes (Oscar)
- GVT: PCI config sanitize series (Changbin)
- GVT: Workload submission error handling series (Fred)
- PSR fixes and refactoring (Rodrigo)
- HWSP based optimizations (Chris)
- Private PAT management (Zhi)
- IRQ handling fixes and refactoring (Ville)
- Module parameter refactoring and variable name clash fix (Michal)
- Execlist refactoring, incomplete request unwinding on reset (Chris)
- GuC scheduling improvements (Michal)
- OA updates (Lionel)
- Coffeelake out of alpha support (Rodrigo)
- seqno fixes (Chris)
- Execlist refactoring (Mika)
- DP and DP MST cleanups (Dhinakaran)
- Cannonlake slice/sublice config (Ben)
- Numerous fixes all around (Everyone)

* tag 'drm-intel-next-2017-09-29' of git://anongit.freedesktop.org/drm/drm-intel: (168 commits)
  drm/i915: Update DRIVER_DATE to 20170929
  drm/i915: Use memset64() to prefill the GTT page
  drm/i915: Also discard second CRC on gen8+ platforms.
  drm/i915/psr: Set frames before SU entry for psr2
  drm/dp: Add defines for latency in sink
  drm/i915: Allow optimized platform checks
  drm/i915: Avoid using dev_priv->info.gen directly.
  i915: Use %pS printk format for direct addresses
  drm/i915/execlists: Notify context-out for lost requests
  drm/i915/cnl: Add support slice/subslice/eu configs
  drm/i915: Compact device info access by a small re-ordering
  drm/i915: Add IS_PLATFORM macro
  drm/i915/selftests: Try to recover from a wedged GPU during reset tests
  drm/i915/huc: Reorganize HuC authentication
  drm/i915: Fix default values of some modparams
  drm/i915: Extend I915_PARAMS_FOR_EACH with default member value
  drm/i915: Make I915_PARAMS_FOR_EACH macro more flexible
  drm/i915: Enable scanline read based on frame timestamps
  drm/i915/execlists: Microoptimise execlists_cancel_port_request()
  drm/i915: Don't rmw PIPESTAT enable bits
  ...
无相关合并请求
......@@ -12,6 +12,7 @@ config DRM_I915
select DRM_PANEL
select DRM_MIPI_DSI
select RELAY
select IRQ_WORK
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
select BACKLIGHT_LCD_SUPPORT if ACPI
......
......@@ -139,7 +139,8 @@ i915-y += i915_perf.o \
i915_oa_bxt.o \
i915_oa_kblgt2.o \
i915_oa_kblgt3.o \
i915_oa_glk.o
i915_oa_glk.o \
i915_oa_cflgt2.o
ifeq ($(CONFIG_DRM_I915_GVT),y)
i915-y += intel_gvt.o
......
......@@ -101,7 +101,7 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
if (WARN_ON(bytes > 4))
return -EINVAL;
if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
return -EINVAL;
memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
......@@ -110,13 +110,25 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
static int map_aperture(struct intel_vgpu *vgpu, bool map)
{
u64 first_gfn, first_mfn;
phys_addr_t aperture_pa = vgpu_aperture_pa_base(vgpu);
unsigned long aperture_sz = vgpu_aperture_sz(vgpu);
u64 first_gfn;
u64 val;
int ret;
if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked)
return 0;
if (map) {
vgpu->gm.aperture_va = memremap(aperture_pa, aperture_sz,
MEMREMAP_WC);
if (!vgpu->gm.aperture_va)
return -ENOMEM;
} else {
memunmap(vgpu->gm.aperture_va);
vgpu->gm.aperture_va = NULL;
}
val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_2];
if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
val = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
......@@ -124,14 +136,16 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map)
val = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_2);
first_gfn = (val + vgpu_aperture_offset(vgpu)) >> PAGE_SHIFT;
first_mfn = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT;
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
first_mfn,
vgpu_aperture_sz(vgpu) >>
PAGE_SHIFT, map);
if (ret)
aperture_pa >> PAGE_SHIFT,
aperture_sz >> PAGE_SHIFT,
map);
if (ret) {
memunmap(vgpu->gm.aperture_va);
vgpu->gm.aperture_va = NULL;
return ret;
}
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map;
return 0;
......@@ -275,7 +289,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
if (WARN_ON(bytes > 4))
return -EINVAL;
if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
return -EINVAL;
/* First check if it's PCI_COMMAND */
......
......@@ -1576,11 +1576,11 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
return 1;
}
static uint32_t find_bb_size(struct parser_exec_state *s)
static int find_bb_size(struct parser_exec_state *s)
{
unsigned long gma = 0;
struct cmd_info *info;
uint32_t bb_size = 0;
int bb_size = 0;
uint32_t cmd_len = 0;
bool met_bb_end = false;
struct intel_vgpu *vgpu = s->vgpu;
......@@ -1637,6 +1637,8 @@ static int perform_bb_shadow(struct parser_exec_state *s)
/* get the size of the batch buffer */
bb_size = find_bb_size(s);
if (bb_size < 0)
return -EINVAL;
/* allocate shadow batch buffer */
entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL);
......@@ -2603,7 +2605,8 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
u32 *cs;
void *shadow_ring_buffer_va;
int ring_id = workload->ring_id;
int ret;
guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
......@@ -2616,34 +2619,42 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_tail = workload->rb_start + workload->rb_tail;
gma_top = workload->rb_start + guest_rb_size;
/* allocate shadow ring buffer */
cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
if (IS_ERR(cs))
return PTR_ERR(cs);
if (workload->rb_len > vgpu->reserve_ring_buffer_size[ring_id]) {
void *va = vgpu->reserve_ring_buffer_va[ring_id];
/* realloc the new ring buffer if needed */
vgpu->reserve_ring_buffer_va[ring_id] =
krealloc(va, workload->rb_len, GFP_KERNEL);
if (!vgpu->reserve_ring_buffer_va[ring_id]) {
gvt_vgpu_err("fail to alloc reserve ring buffer\n");
return -ENOMEM;
}
vgpu->reserve_ring_buffer_size[ring_id] = workload->rb_len;
}
shadow_ring_buffer_va = vgpu->reserve_ring_buffer_va[ring_id];
/* get shadow ring buffer va */
workload->shadow_ring_buffer_va = cs;
workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
/* head > tail --> copy head <-> top */
if (gma_head > gma_tail) {
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
gma_head, gma_top, cs);
gma_head, gma_top, shadow_ring_buffer_va);
if (ret < 0) {
gvt_vgpu_err("fail to copy guest ring buffer\n");
return ret;
}
cs += ret / sizeof(u32);
shadow_ring_buffer_va += ret;
gma_head = workload->rb_start;
}
/* copy head or start <-> tail */
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, cs);
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail,
shadow_ring_buffer_va);
if (ret < 0) {
gvt_vgpu_err("fail to copy guest ring buffer\n");
return ret;
}
cs += ret / sizeof(u32);
intel_ring_advance(workload->req, cs);
return 0;
}
......
......@@ -368,7 +368,7 @@ static void free_workload(struct intel_vgpu_workload *workload)
#define get_desc_from_elsp_dwords(ed, i) \
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
struct intel_shadow_bb_entry *entry_obj;
......@@ -379,7 +379,7 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
if (IS_ERR(vma)) {
return;
return PTR_ERR(vma);
}
/* FIXME: we are not tracking our pinned VMA leaving it
......@@ -392,6 +392,7 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
if (gmadr_bytes == 8)
entry_obj->bb_start_cmd_va[2] = 0;
}
return 0;
}
static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
......@@ -420,7 +421,7 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
return 0;
}
static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
struct i915_vma *vma;
unsigned char *per_ctx_va =
......@@ -428,12 +429,12 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
wa_ctx->indirect_ctx.size;
if (wa_ctx->indirect_ctx.size == 0)
return;
return 0;
vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
0, CACHELINE_BYTES, 0);
if (IS_ERR(vma)) {
return;
return PTR_ERR(vma);
}
/* FIXME: we are not tracking our pinned VMA leaving it
......@@ -447,26 +448,7 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
memset(per_ctx_va, 0, CACHELINE_BYTES);
update_wa_ctx_2_shadow_ctx(wa_ctx);
}
static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct execlist_ctx_descriptor_format ctx[2];
int ring_id = workload->ring_id;
intel_vgpu_pin_mm(workload->shadow_mm);
intel_vgpu_sync_oos_pages(workload->vgpu);
intel_vgpu_flush_post_shadow(workload->vgpu);
prepare_shadow_batch_buffer(workload);
prepare_shadow_wa_ctx(&workload->wa_ctx);
if (!workload->emulate_schedule_in)
return 0;
ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
return 0;
}
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
......@@ -489,13 +471,62 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
}
}
static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
{
if (!wa_ctx->indirect_ctx.obj)
return;
struct intel_vgpu *vgpu = workload->vgpu;
struct execlist_ctx_descriptor_format ctx[2];
int ring_id = workload->ring_id;
int ret;
ret = intel_vgpu_pin_mm(workload->shadow_mm);
if (ret) {
gvt_vgpu_err("fail to vgpu pin mm\n");
goto out;
}
ret = intel_vgpu_sync_oos_pages(workload->vgpu);
if (ret) {
gvt_vgpu_err("fail to vgpu sync oos pages\n");
goto err_unpin_mm;
}
i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
i915_gem_object_put(wa_ctx->indirect_ctx.obj);
ret = intel_vgpu_flush_post_shadow(workload->vgpu);
if (ret) {
gvt_vgpu_err("fail to flush post shadow\n");
goto err_unpin_mm;
}
ret = prepare_shadow_batch_buffer(workload);
if (ret) {
gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
goto err_unpin_mm;
}
ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
if (ret) {
gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
goto err_shadow_batch;
}
if (!workload->emulate_schedule_in)
return 0;
ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
ret = emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
if (!ret)
goto out;
else
gvt_vgpu_err("fail to emulate execlist schedule in\n");
release_shadow_wa_ctx(&workload->wa_ctx);
err_shadow_batch:
release_shadow_batch_buffer(workload);
err_unpin_mm:
intel_vgpu_unpin_mm(workload->shadow_mm);
out:
return ret;
}
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
......@@ -511,8 +542,10 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
gvt_dbg_el("complete workload %p status %d\n", workload,
workload->status);
release_shadow_batch_buffer(workload);
release_shadow_wa_ctx(&workload->wa_ctx);
if (!workload->status) {
release_shadow_batch_buffer(workload);
release_shadow_wa_ctx(&workload->wa_ctx);
}
if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
/* if workload->status is not successful means HW GPU
......@@ -820,10 +853,21 @@ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
{
enum intel_engine_id i;
struct intel_engine_cs *engine;
clean_workloads(vgpu, ALL_ENGINES);
kmem_cache_destroy(vgpu->workloads);
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
kfree(vgpu->reserve_ring_buffer_va[i]);
vgpu->reserve_ring_buffer_va[i] = NULL;
vgpu->reserve_ring_buffer_size[i] = 0;
}
}
#define RESERVE_RING_BUFFER_SIZE ((1 * PAGE_SIZE)/8)
int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
{
enum intel_engine_id i;
......@@ -843,7 +887,26 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
if (!vgpu->workloads)
return -ENOMEM;
/* each ring has a shadow ring buffer until vgpu destroyed */
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
vgpu->reserve_ring_buffer_va[i] =
kmalloc(RESERVE_RING_BUFFER_SIZE, GFP_KERNEL);
if (!vgpu->reserve_ring_buffer_va[i]) {
gvt_vgpu_err("fail to alloc reserve ring buffer\n");
goto out;
}
vgpu->reserve_ring_buffer_size[i] = RESERVE_RING_BUFFER_SIZE;
}
return 0;
out:
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
if (vgpu->reserve_ring_buffer_size[i]) {
kfree(vgpu->reserve_ring_buffer_va[i]);
vgpu->reserve_ring_buffer_va[i] = NULL;
vgpu->reserve_ring_buffer_size[i] = 0;
}
}
return -ENOMEM;
}
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
......
......@@ -1647,14 +1647,13 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
if (WARN_ON(mm->type != INTEL_GVT_MM_PPGTT))
return 0;
atomic_inc(&mm->pincount);
if (!mm->shadowed) {
ret = shadow_mm(mm);
if (ret)
return ret;
}
atomic_inc(&mm->pincount);
list_del_init(&mm->lru_list);
list_add_tail(&mm->lru_list, &mm->vgpu->gvt->gtt.mm_lru_list_head);
return 0;
......@@ -1972,7 +1971,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
*/
se.val64 |= _PAGE_PRESENT | _PAGE_RW;
if (type == GTT_TYPE_PPGTT_PDE_PT)
se.val64 |= PPAT_CACHED_INDEX;
se.val64 |= PPAT_CACHED;
for (i = 0; i < page_entry_num; i++)
ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
......
......@@ -111,7 +111,7 @@ static void init_device_info(struct intel_gvt *gvt)
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
|| IS_KABYLAKE(gvt->dev_priv)) {
info->max_support_vgpus = 8;
info->cfg_space_size = 256;
info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
info->mmio_size = 2 * 1024 * 1024;
info->mmio_bar = 0;
info->gtt_start_offset = 8 * 1024 * 1024;
......
......@@ -80,6 +80,7 @@ struct intel_gvt_device_info {
struct intel_vgpu_gm {
u64 aperture_sz;
u64 hidden_sz;
void *aperture_va;
struct drm_mm_node low_gm_node;
struct drm_mm_node high_gm_node;
};
......@@ -99,7 +100,6 @@ struct intel_vgpu_mmio {
bool disable_warn_untrack;
};
#define INTEL_GVT_MAX_CFG_SPACE_SZ 256
#define INTEL_GVT_MAX_BAR_NUM 4
struct intel_vgpu_pci_bar {
......@@ -108,7 +108,7 @@ struct intel_vgpu_pci_bar {
};
struct intel_vgpu_cfg_space {
unsigned char virtual_cfg_space[INTEL_GVT_MAX_CFG_SPACE_SZ];
unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE];
struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
};
......@@ -165,6 +165,9 @@ struct intel_vgpu {
struct list_head workload_q_head[I915_NUM_ENGINES];
struct kmem_cache *workloads;
atomic_t running_workload_num;
/* 1/2K for each reserve ring buffer */
void *reserve_ring_buffer_va[I915_NUM_ENGINES];
int reserve_ring_buffer_size[I915_NUM_ENGINES];
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx;
DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
......@@ -474,6 +477,13 @@ int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar)
{
/* We are 64bit bar. */
return (*(u64 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
PCI_BASE_ADDRESS_MEM_MASK;
}
void intel_gvt_clean_opregion(struct intel_gvt *gvt);
int intel_gvt_init_opregion(struct intel_gvt *gvt);
......
......@@ -609,21 +609,20 @@ static void intel_vgpu_release_work(struct work_struct *work)
__intel_vgpu_release(vgpu);
}
static uint64_t intel_vgpu_get_bar0_addr(struct intel_vgpu *vgpu)
static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
{
u32 start_lo, start_hi;
u32 mem_type;
int pos = PCI_BASE_ADDRESS_0;
start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) &
start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
PCI_BASE_ADDRESS_MEM_MASK;
mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + pos)) &
mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
PCI_BASE_ADDRESS_MEM_TYPE_MASK;
switch (mem_type) {
case PCI_BASE_ADDRESS_MEM_TYPE_64:
start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
+ pos + 4));
+ bar + 4));
break;
case PCI_BASE_ADDRESS_MEM_TYPE_32:
case PCI_BASE_ADDRESS_MEM_TYPE_1M:
......@@ -637,6 +636,21 @@ static uint64_t intel_vgpu_get_bar0_addr(struct intel_vgpu *vgpu)
return ((u64)start_hi << 32) | start_lo;
}
static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
void *buf, unsigned int count, bool is_write)
{
uint64_t bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
int ret;
if (is_write)
ret = intel_gvt_ops->emulate_mmio_write(vgpu,
bar_start + off, buf, count);
else
ret = intel_gvt_ops->emulate_mmio_read(vgpu,
bar_start + off, buf, count);
return ret;
}
static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
size_t count, loff_t *ppos, bool is_write)
{
......@@ -661,20 +675,14 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
buf, count);
break;
case VFIO_PCI_BAR0_REGION_INDEX:
case VFIO_PCI_BAR1_REGION_INDEX:
if (is_write) {
uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu);
ret = intel_gvt_ops->emulate_mmio_write(vgpu,
bar0_start + pos, buf, count);
} else {
uint64_t bar0_start = intel_vgpu_get_bar0_addr(vgpu);
ret = intel_gvt_ops->emulate_mmio_read(vgpu,
bar0_start + pos, buf, count);
}
ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos,
buf, count, is_write);
break;
case VFIO_PCI_BAR2_REGION_INDEX:
ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_2, pos,
buf, count, is_write);
break;
case VFIO_PCI_BAR1_REGION_INDEX:
case VFIO_PCI_BAR3_REGION_INDEX:
case VFIO_PCI_BAR4_REGION_INDEX:
case VFIO_PCI_BAR5_REGION_INDEX:
......@@ -970,7 +978,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
switch (info.index) {
case VFIO_PCI_CONFIG_REGION_INDEX:
info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
info.size = INTEL_GVT_MAX_CFG_SPACE_SZ;
info.size = vgpu->gvt->device_info.cfg_space_size;
info.flags = VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE;
break;
......
......@@ -45,8 +45,7 @@
*/
int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
{
u64 gttmmio_gpa = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0) &
~GENMASK(3, 0);
u64 gttmmio_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
return gpa - gttmmio_gpa;
}
......@@ -57,6 +56,38 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
(reg >= gvt->device_info.gtt_start_offset \
&& reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
static bool vgpu_gpa_is_aperture(struct intel_vgpu *vgpu, uint64_t gpa)
{
u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2);
u64 aperture_sz = vgpu_aperture_sz(vgpu);
return gpa >= aperture_gpa && gpa < aperture_gpa + aperture_sz;
}
static int vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t gpa,
void *pdata, unsigned int size, bool is_read)
{
u64 aperture_gpa = intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_2);
u64 offset = gpa - aperture_gpa;
if (!vgpu_gpa_is_aperture(vgpu, gpa + size - 1)) {
gvt_vgpu_err("Aperture rw out of range, offset %llx, size %d\n",
offset, size);
return -EINVAL;
}
if (!vgpu->gm.aperture_va) {
gvt_vgpu_err("BAR is not enabled\n");
return -ENXIO;
}
if (is_read)
memcpy(pdata, vgpu->gm.aperture_va + offset, size);
else
memcpy(vgpu->gm.aperture_va + offset, pdata, size);
return 0;
}
static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
void *p_data, unsigned int bytes, bool read)
{
......@@ -133,6 +164,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
}
mutex_lock(&gvt->lock);
if (vgpu_gpa_is_aperture(vgpu, pa)) {
ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true);
mutex_unlock(&gvt->lock);
return ret;
}
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
struct intel_vgpu_guest_page *gp;
......@@ -224,6 +261,12 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
mutex_lock(&gvt->lock);
if (vgpu_gpa_is_aperture(vgpu, pa)) {
ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false);
mutex_unlock(&gvt->lock);
return ret;
}
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
struct intel_vgpu_guest_page *gp;
......
......@@ -293,7 +293,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
*/
if (mmio->in_context &&
((ctx_ctrl & inhibit_mask) != inhibit_mask) &&
i915.enable_execlists)
i915_modparams.enable_execlists)
continue;
if (mmio->mask)
......
......@@ -87,7 +87,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
return -EINVAL;
}
page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
dst = kmap(page);
intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
GTT_PAGE_SIZE);
......@@ -201,6 +201,43 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
ce->lrc_desc = desc;
}
static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
void *shadow_ring_buffer_va;
u32 *cs;
/* allocate shadow ring buffer */
cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
if (IS_ERR(cs)) {
gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
workload->rb_len);
return PTR_ERR(cs);
}
shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
/* get shadow ring buffer va */
workload->shadow_ring_buffer_va = cs;
memcpy(cs, shadow_ring_buffer_va,
workload->rb_len);
cs += workload->rb_len / sizeof(u32);
intel_ring_advance(workload->req, cs);
return 0;
}
void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
if (!wa_ctx->indirect_ctx.obj)
return;
i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
i915_gem_object_put(wa_ctx->indirect_ctx.obj);
}
/**
* intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
* shadow it as well, include ringbuffer,wa_ctx and ctx.
......@@ -214,8 +251,10 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
int ring_id = workload->ring_id;
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_ring *ring;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
......@@ -231,35 +270,56 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
shadow_context_descriptor_update(shadow_ctx,
dev_priv->engine[ring_id]);
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
gvt_vgpu_err("fail to allocate gem request\n");
ret = PTR_ERR(rq);
goto out;
}
gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
workload->req = i915_gem_request_get(rq);
ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
if (ret)
goto out;
goto err_scan;
if ((workload->ring_id == RCS) &&
(workload->wa_ctx.indirect_ctx.size != 0)) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
goto out;
goto err_scan;
}
/* pin shadow context by gvt even the shadow context will be pinned
* when i915 alloc request. That is because gvt will update the guest
* context from shadow context when workload is completed, and at that
* moment, i915 may already unpined the shadow context to make the
* shadow_ctx pages invalid. So gvt need to pin itself. After update
* the guest context, gvt can unpin the shadow_ctx safely.
*/
ring = engine->context_pin(engine, shadow_ctx);
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
gvt_vgpu_err("fail to pin shadow context\n");
goto err_shadow;
}
ret = populate_shadow_context(workload);
if (ret)
goto out;
goto err_unpin;
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
gvt_vgpu_err("fail to allocate gem request\n");
ret = PTR_ERR(rq);
goto err_unpin;
}
gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
workload->req = i915_gem_request_get(rq);
ret = copy_workload_to_ring_buffer(workload);
if (ret)
goto err_unpin;
workload->shadowed = true;
return 0;
out:
err_unpin:
engine->context_unpin(engine, shadow_ctx);
err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
err_scan:
return ret;
}
......@@ -269,8 +329,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_ring *ring;
int ret = 0;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
......@@ -284,22 +342,10 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
if (workload->prepare) {
ret = workload->prepare(workload);
if (ret)
if (ret) {
engine->context_unpin(engine, shadow_ctx);
goto out;
}
/* pin shadow context by gvt even the shadow context will be pinned
* when i915 alloc request. That is because gvt will update the guest
* context from shadow context when workload is completed, and at that
* moment, i915 may already unpined the shadow context to make the
* shadow_ctx pages invalid. So gvt need to pin itself. After update
* the guest context, gvt can unpin the shadow_ctx safely.
*/
ring = engine->context_pin(engine, shadow_ctx);
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
gvt_vgpu_err("fail to pin shadow context\n");
goto out;
}
}
out:
......@@ -408,7 +454,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
return;
}
page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
src = kmap(page);
intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
GTT_PAGE_SIZE);
......
......@@ -140,4 +140,5 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
#endif
......@@ -67,7 +67,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
#undef PRINT_FLAG
kernel_param_lock(THIS_MODULE);
#define PRINT_PARAM(T, x) seq_print_param(m, #x, #T, &i915.x);
#define PRINT_PARAM(T, x, ...) seq_print_param(m, #x, #T, &i915_modparams.x);
I915_PARAMS_FOR_EACH(PRINT_PARAM);
#undef PRINT_PARAM
kernel_param_unlock(THIS_MODULE);
......@@ -1267,7 +1267,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
seq_puts(m, "struct_mutex blocked for reset\n");
if (!i915.enable_hangcheck) {
if (!i915_modparams.enable_hangcheck) {
seq_puts(m, "Hangcheck disabled\n");
return 0;
}
......@@ -1422,6 +1422,9 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
struct intel_uncore_forcewake_domain *fw_domain;
unsigned int tmp;
seq_printf(m, "user.bypass_count = %u\n",
i915->uncore.user_forcewake.count);
for_each_fw_domain(fw_domain, i915, tmp)
seq_printf(m, "%s.wake_count = %u\n",
intel_uncore_forcewake_domain_to_str(fw_domain->id),
......@@ -1699,7 +1702,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
seq_printf(m, "Enabled by kernel parameter: %s\n",
yesno(i915.enable_ips));
yesno(i915_modparams.enable_ips));
if (INTEL_GEN(dev_priv) >= 8) {
seq_puts(m, "Currently: unknown\n");
......@@ -2014,7 +2017,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
enum intel_engine_id id;
int ret;
if (!i915.enable_execlists) {
if (!i915_modparams.enable_execlists) {
seq_printf(m, "Logical Ring Contexts are disabled\n");
return 0;
}
......@@ -2443,12 +2446,8 @@ static void i915_guc_client_info(struct seq_file *m,
seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
client->priority, client->stage_id, client->proc_desc_offset);
seq_printf(m, "\tDoorbell id %d, offset: 0x%lx, cookie 0x%x\n",
client->doorbell_id, client->doorbell_offset, client->doorbell_cookie);
seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
client->wq_size, client->wq_offset, client->wq_tail);
seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space);
seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
client->doorbell_id, client->doorbell_offset);
for_each_engine(engine, dev_priv, id) {
u64 submissions = client->submissions[id];
......@@ -2594,7 +2593,7 @@ static int i915_guc_log_control_get(void *data, u64 *val)
if (!dev_priv->guc.log.vma)
return -EINVAL;
*val = i915.guc_log_level;
*val = i915_modparams.guc_log_level;
return 0;
}
......@@ -3312,7 +3311,9 @@ static int i915_engine_info(struct seq_file *m, void *unused)
seq_printf(m, "\tBBADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
if (i915.enable_execlists) {
if (i915_modparams.enable_execlists) {
const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
struct intel_engine_execlists * const execlists = &engine->execlists;
u32 ptr, read, write;
unsigned int idx;
......@@ -3323,8 +3324,10 @@ static int i915_engine_info(struct seq_file *m, void *unused)
ptr = I915_READ(RING_CONTEXT_STATUS_PTR(engine));
read = GEN8_CSB_READ_PTR(ptr);
write = GEN8_CSB_WRITE_PTR(ptr);
seq_printf(m, "\tExeclist CSB read %d, write %d, interrupt posted? %s\n",
read, write,
seq_printf(m, "\tExeclist CSB read %d [%d cached], write %d [%d from hws], interrupt posted? %s\n",
read, execlists->csb_head,
write,
intel_read_status_page(engine, intel_hws_csb_write_index(engine->i915)),
yesno(test_bit(ENGINE_IRQ_EXECLIST,
&engine->irq_posted)));
if (read >= GEN8_CSB_ENTRIES)
......@@ -3335,18 +3338,19 @@ static int i915_engine_info(struct seq_file *m, void *unused)
write += GEN8_CSB_ENTRIES;
while (read < write) {
idx = ++read % GEN8_CSB_ENTRIES;
seq_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
seq_printf(m, "\tExeclist CSB[%d]: 0x%08x [0x%08x in hwsp], context: %d [%d in hwsp]\n",
idx,
I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
hws[idx * 2],
I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)),
hws[idx * 2 + 1]);
}
rcu_read_lock();
for (idx = 0; idx < ARRAY_SIZE(engine->execlist_port); idx++) {
for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
unsigned int count;
rq = port_unpack(&engine->execlist_port[idx],
&count);
rq = port_unpack(&execlists->port[idx], &count);
if (rq) {
seq_printf(m, "\t\tELSP[%d] count=%d, ",
idx, count);
......@@ -3359,7 +3363,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
rcu_read_unlock();
spin_lock_irq(&engine->timeline->lock);
for (rb = engine->execlist_first; rb; rb = rb_next(rb)){
for (rb = execlists->first; rb; rb = rb_next(rb)) {
struct i915_priolist *p =
rb_entry(rb, typeof(*p), node);
......@@ -3403,7 +3407,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
enum intel_engine_id id;
int j, ret;
if (!i915.semaphores) {
if (!i915_modparams.semaphores) {
seq_puts(m, "Semaphores are disabled\n");
return 0;
}
......@@ -3523,6 +3527,57 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
return 0;
}
static int i915_ipc_status_show(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = m->private;
seq_printf(m, "Isochronous Priority Control: %s\n",
yesno(dev_priv->ipc_enabled));
return 0;
}
static int i915_ipc_status_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *dev_priv = inode->i_private;
if (!HAS_IPC(dev_priv))
return -ENODEV;
return single_open(file, i915_ipc_status_show, dev_priv);
}
static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
struct drm_i915_private *dev_priv = m->private;
int ret;
bool enable;
ret = kstrtobool_from_user(ubuf, len, &enable);
if (ret < 0)
return ret;
intel_runtime_pm_get(dev_priv);
if (!dev_priv->ipc_enabled && enable)
DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
dev_priv->wm.distrust_bios_wm = true;
dev_priv->ipc_enabled = enable;
intel_enable_ipc(dev_priv);
intel_runtime_pm_put(dev_priv);
return len;
}
static const struct file_operations i915_ipc_status_fops = {
.owner = THIS_MODULE,
.open = i915_ipc_status_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = i915_ipc_status_write
};
static int i915_ddb_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
......@@ -4674,26 +4729,26 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *dev_priv = inode->i_private;
struct drm_i915_private *i915 = inode->i_private;
if (INTEL_GEN(dev_priv) < 6)
if (INTEL_GEN(i915) < 6)
return 0;
intel_runtime_pm_get(dev_priv);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
intel_runtime_pm_get(i915);
intel_uncore_forcewake_user_get(i915);
return 0;
}
static int i915_forcewake_release(struct inode *inode, struct file *file)
{
struct drm_i915_private *dev_priv = inode->i_private;
struct drm_i915_private *i915 = inode->i_private;
if (INTEL_GEN(dev_priv) < 6)
if (INTEL_GEN(i915) < 6)
return 0;
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
intel_runtime_pm_put(dev_priv);
intel_uncore_forcewake_user_put(i915);
intel_runtime_pm_put(i915);
return 0;
}
......@@ -4859,7 +4914,8 @@ static const struct i915_debugfs_files {
{"i915_dp_test_type", &i915_displayport_test_type_fops},
{"i915_dp_test_active", &i915_displayport_test_active_fops},
{"i915_guc_log_control", &i915_guc_log_control_fops},
{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}
{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
{"i915_ipc_status", &i915_ipc_status_fops}
};
int i915_debugfs_register(struct drm_i915_private *dev_priv)
......
......@@ -58,12 +58,12 @@ static unsigned int i915_load_fail_count;
bool __i915_inject_load_failure(const char *func, int line)
{
if (i915_load_fail_count >= i915.inject_load_failure)
if (i915_load_fail_count >= i915_modparams.inject_load_failure)
return false;
if (++i915_load_fail_count == i915.inject_load_failure) {
if (++i915_load_fail_count == i915_modparams.inject_load_failure) {
DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
i915.inject_load_failure, func, line);
i915_modparams.inject_load_failure, func, line);
return true;
}
......@@ -106,8 +106,8 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
static bool i915_error_injected(struct drm_i915_private *dev_priv)
{
return i915.inject_load_failure &&
i915_load_fail_count == i915.inject_load_failure;
return i915_modparams.inject_load_failure &&
i915_load_fail_count == i915_modparams.inject_load_failure;
}
#define i915_load_error(dev_priv, fmt, ...) \
......@@ -321,7 +321,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = USES_PPGTT(dev_priv);
break;
case I915_PARAM_HAS_SEMAPHORES:
value = i915.semaphores;
value = i915_modparams.semaphores;
break;
case I915_PARAM_HAS_SECURE_BATCHES:
value = capable(CAP_SYS_ADMIN);
......@@ -340,7 +340,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
return -ENODEV;
break;
case I915_PARAM_HAS_GPU_RESET:
value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
value = i915_modparams.enable_hangcheck &&
intel_has_gpu_reset(dev_priv);
if (value && intel_has_reset_engine(dev_priv))
value = 2;
break;
......@@ -869,6 +870,10 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
memcpy(device_info, match_info, sizeof(*device_info));
device_info->device_id = dev_priv->drm.pdev->device;
BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
sizeof(device_info->platform_mask) * BITS_PER_BYTE);
device_info->platform_mask = BIT(device_info->platform);
BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
device_info->gen_mask = BIT(device_info->gen - 1);
......@@ -1031,9 +1036,9 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
{
i915.enable_execlists =
i915_modparams.enable_execlists =
intel_sanitize_enable_execlists(dev_priv,
i915.enable_execlists);
i915_modparams.enable_execlists);
/*
* i915.enable_ppgtt is read-only, so do an early pass to validate the
......@@ -1041,12 +1046,15 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
* do this now so that we can print out any log messages once rather
* than every time we check intel_enable_ppgtt().
*/
i915.enable_ppgtt =
intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
i915_modparams.enable_ppgtt =
intel_sanitize_enable_ppgtt(dev_priv,
i915_modparams.enable_ppgtt);
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt);
i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores);
DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores));
i915_modparams.semaphores =
intel_sanitize_semaphores(dev_priv, i915_modparams.semaphores);
DRM_DEBUG_DRIVER("use GPU semaphores? %s\n",
yesno(i915_modparams.semaphores));
intel_uc_sanitize_options(dev_priv);
......@@ -1277,7 +1285,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
/* Enable nuclear pageflip on ILK+ */
if (!i915.nuclear_pageflip && match_info->gen < 5)
if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
driver.driver_features &= ~DRIVER_ATOMIC;
ret = -ENOMEM;
......@@ -1341,7 +1349,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
intel_runtime_pm_enable(dev_priv);
dev_priv->ipc_enabled = false;
intel_init_ipc(dev_priv);
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
DRM_INFO("DRM_I915_DEBUG enabled\n");
......@@ -2609,6 +2617,8 @@ static int intel_runtime_resume(struct device *kdev)
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_init(dev_priv);
intel_enable_ipc(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
if (ret)
......
......@@ -80,8 +80,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20170907"
#define DRIVER_TIMESTAMP 1504772900
#define DRIVER_DATE "20170929"
#define DRIVER_TIMESTAMP 1506682238
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
......@@ -93,7 +93,7 @@
#define I915_STATE_WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
if (!WARN(i915.verbose_state_checks, format)) \
if (!WARN(i915_modparams.verbose_state_checks, format)) \
DRM_ERROR(format); \
unlikely(__ret_warn_on); \
})
......@@ -126,7 +126,7 @@ static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val)
{
uint_fixed_16_16_t fp;
WARN_ON(val >> 16);
WARN_ON(val > U16_MAX);
fp.val = val << 16;
return fp;
......@@ -163,8 +163,8 @@ static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val)
{
uint_fixed_16_16_t fp;
WARN_ON(val >> 32);
fp.val = clamp_t(uint32_t, val, 0, ~0);
WARN_ON(val > U32_MAX);
fp.val = (uint32_t) val;
return fp;
}
......@@ -181,8 +181,8 @@ static inline uint32_t mul_round_up_u32_fixed16(uint32_t val,
intermediate_val = (uint64_t) val * mul.val;
intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16);
WARN_ON(intermediate_val >> 32);
return clamp_t(uint32_t, intermediate_val, 0, ~0);
WARN_ON(intermediate_val > U32_MAX);
return (uint32_t) intermediate_val;
}
static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
......@@ -211,8 +211,8 @@ static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
interm_val = (uint64_t)val << 16;
interm_val = DIV_ROUND_UP_ULL(interm_val, d.val);
WARN_ON(interm_val >> 32);
return clamp_t(uint32_t, interm_val, 0, ~0);
WARN_ON(interm_val > U32_MAX);
return (uint32_t) interm_val;
}
static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val,
......@@ -776,7 +776,6 @@ struct intel_csr {
func(has_fpga_dbg); \
func(has_full_ppgtt); \
func(has_full_48bit_ppgtt); \
func(has_gmbus_irq); \
func(has_gmch_display); \
func(has_guc); \
func(has_guc_ct); \
......@@ -797,7 +796,8 @@ struct intel_csr {
func(cursor_needs_physical); \
func(hws_needs_physical); \
func(overlay_needs_physical); \
func(supports_tv);
func(supports_tv); \
func(has_ipc);
struct sseu_dev_info {
u8 slice_mask;
......@@ -851,21 +851,28 @@ enum intel_platform {
};
struct intel_device_info {
u32 display_mmio_offset;
u16 device_id;
u8 num_pipes;
u8 num_sprites[I915_MAX_PIPES];
u8 num_scalers[I915_MAX_PIPES];
u8 gen;
u16 gen_mask;
enum intel_platform platform;
u8 gen;
u8 gt; /* GT number, 0 if undefined */
u8 ring_mask; /* Rings supported by the HW */
u8 num_rings;
u8 ring_mask; /* Rings supported by the HW */
enum intel_platform platform;
u32 platform_mask;
u32 display_mmio_offset;
u8 num_pipes;
u8 num_sprites[I915_MAX_PIPES];
u8 num_scalers[I915_MAX_PIPES];
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
#undef DEFINE_FLAG
u16 ddb_size; /* in blocks */
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
int trans_offsets[I915_MAX_TRANSCODERS];
......@@ -1000,7 +1007,8 @@ struct i915_gpu_state {
u32 seqno;
u32 head;
u32 tail;
} *requests, execlist[2];
} *requests, execlist[EXECLIST_MAX_PORTS];
unsigned int num_ports;
struct drm_i915_error_waiter {
char comm[TASK_COMM_LEN];
......@@ -1178,6 +1186,14 @@ struct i915_psr {
bool y_cord_support;
bool colorimetry_support;
bool alpm;
void (*enable_source)(struct intel_dp *,
const struct intel_crtc_state *);
void (*disable_source)(struct intel_dp *,
const struct intel_crtc_state *);
void (*enable_sink)(struct intel_dp *);
void (*activate)(struct intel_dp *);
void (*setup_vsc)(struct intel_dp *, const struct intel_crtc_state *);
};
enum intel_pch {
......@@ -1836,6 +1852,20 @@ struct skl_wm_level {
uint8_t plane_res_l;
};
/* Stores plane specific WM parameters */
struct skl_wm_params {
bool x_tiled, y_tiled;
bool rc_surface;
uint32_t width;
uint8_t cpp;
uint32_t plane_pixel_rate;
uint32_t y_min_scanlines;
uint32_t plane_bytes_per_line;
uint_fixed_16_16_t plane_blocks_per_line;
uint_fixed_16_16_t y_tile_minimum;
uint32_t linetime_us;
};
/*
* This struct helps tracking the state needed for runtime PM, which puts the
* device in PCI D3 state. Notice that when this happens, nothing on the
......@@ -2331,6 +2361,8 @@ struct drm_i915_private {
DECLARE_HASHTABLE(mm_structs, 7);
struct mutex mm_lock;
struct intel_ppat ppat;
/* Kernel Modesetting */
struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
......@@ -2811,8 +2843,8 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
#define for_each_sgt_dma(__dmap, __iter, __sgt) \
for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
((__dmap) = (__iter).dma + (__iter).curr); \
(((__iter).curr += PAGE_SIZE) < (__iter).max) || \
((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0))
(((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
(__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
/**
* for_each_sgt_page - iterate over the pages of the given sg_table
......@@ -2824,8 +2856,23 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
for ((__iter) = __sgt_iter((__sgt)->sgl, false); \
((__pp) = (__iter).pfn == 0 ? NULL : \
pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
(((__iter).curr += PAGE_SIZE) < (__iter).max) || \
((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
(((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
(__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
static inline unsigned int i915_sg_segment_size(void)
{
unsigned int size = swiotlb_max_segment();
if (size == 0)
return SCATTERLIST_MAX_SEGMENT;
size = rounddown(size, PAGE_SIZE);
/* swiotlb_max_segment_size can return 1 byte when it means one page. */
if (size < PAGE_SIZE)
size = PAGE_SIZE;
return size;
}
static inline const struct intel_device_info *
intel_info(const struct drm_i915_private *dev_priv)
......@@ -2842,23 +2889,21 @@ intel_info(const struct drm_i915_private *dev_priv)
#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
#define GEN_FOREVER (0)
#define INTEL_GEN_MASK(s, e) ( \
BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
GENMASK((e) != GEN_FOREVER ? (e) - 1 : BITS_PER_LONG - 1, \
(s) != GEN_FOREVER ? (s) - 1 : 0) \
)
/*
* Returns true if Gen is in inclusive range [Start, End].
*
* Use GEN_FOREVER for unbound start and or end.
*/
#define IS_GEN(dev_priv, s, e) ({ \
unsigned int __s = (s), __e = (e); \
BUILD_BUG_ON(!__builtin_constant_p(s)); \
BUILD_BUG_ON(!__builtin_constant_p(e)); \
if ((__s) != GEN_FOREVER) \
__s = (s) - 1; \
if ((__e) == GEN_FOREVER) \
__e = BITS_PER_LONG - 1; \
else \
__e = (e) - 1; \
!!((dev_priv)->info.gen_mask & GENMASK((__e), (__s))); \
})
#define IS_GEN(dev_priv, s, e) \
(!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e))))
/*
* Return true if revision is in range [since,until] inclusive.
......@@ -2868,37 +2913,39 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_REVID(p, since, until) \
(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
#define IS_I830(dev_priv) ((dev_priv)->info.platform == INTEL_I830)
#define IS_I845G(dev_priv) ((dev_priv)->info.platform == INTEL_I845G)
#define IS_I85X(dev_priv) ((dev_priv)->info.platform == INTEL_I85X)
#define IS_I865G(dev_priv) ((dev_priv)->info.platform == INTEL_I865G)
#define IS_I915G(dev_priv) ((dev_priv)->info.platform == INTEL_I915G)
#define IS_I915GM(dev_priv) ((dev_priv)->info.platform == INTEL_I915GM)
#define IS_I945G(dev_priv) ((dev_priv)->info.platform == INTEL_I945G)
#define IS_I945GM(dev_priv) ((dev_priv)->info.platform == INTEL_I945GM)
#define IS_I965G(dev_priv) ((dev_priv)->info.platform == INTEL_I965G)
#define IS_I965GM(dev_priv) ((dev_priv)->info.platform == INTEL_I965GM)
#define IS_G45(dev_priv) ((dev_priv)->info.platform == INTEL_G45)
#define IS_GM45(dev_priv) ((dev_priv)->info.platform == INTEL_GM45)
#define IS_PLATFORM(dev_priv, p) ((dev_priv)->info.platform_mask & BIT(p))
#define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
#define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
#define IS_I85X(dev_priv) IS_PLATFORM(dev_priv, INTEL_I85X)
#define IS_I865G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I865G)
#define IS_I915G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915G)
#define IS_I915GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I915GM)
#define IS_I945G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945G)
#define IS_I945GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I945GM)
#define IS_I965G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965G)
#define IS_I965GM(dev_priv) IS_PLATFORM(dev_priv, INTEL_I965GM)
#define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45)
#define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45)
#define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv))
#define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001)
#define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011)
#define IS_PINEVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_PINEVIEW)
#define IS_G33(dev_priv) ((dev_priv)->info.platform == INTEL_G33)
#define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
#define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33)
#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
#define IS_IVYBRIDGE(dev_priv) ((dev_priv)->info.platform == INTEL_IVYBRIDGE)
#define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
#define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
(dev_priv)->info.gt == 1)
#define IS_VALLEYVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_VALLEYVIEW)
#define IS_CHERRYVIEW(dev_priv) ((dev_priv)->info.platform == INTEL_CHERRYVIEW)
#define IS_HASWELL(dev_priv) ((dev_priv)->info.platform == INTEL_HASWELL)
#define IS_BROADWELL(dev_priv) ((dev_priv)->info.platform == INTEL_BROADWELL)
#define IS_SKYLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_SKYLAKE)
#define IS_BROXTON(dev_priv) ((dev_priv)->info.platform == INTEL_BROXTON)
#define IS_KABYLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_KABYLAKE)
#define IS_GEMINILAKE(dev_priv) ((dev_priv)->info.platform == INTEL_GEMINILAKE)
#define IS_COFFEELAKE(dev_priv) ((dev_priv)->info.platform == INTEL_COFFEELAKE)
#define IS_CANNONLAKE(dev_priv) ((dev_priv)->info.platform == INTEL_CANNONLAKE)
#define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
#define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
#define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL)
#define IS_BROADWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROADWELL)
#define IS_SKYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SKYLAKE)
#define IS_BROXTON(dev_priv) IS_PLATFORM(dev_priv, INTEL_BROXTON)
#define IS_KABYLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
#define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
......@@ -2946,6 +2993,8 @@ intel_info(const struct drm_i915_private *dev_priv)
(dev_priv)->info.gt == 3)
#define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
(dev_priv)->info.gt == 2)
#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
......@@ -3036,9 +3085,9 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
((dev_priv)->info.has_logical_ring_contexts)
#define USES_PPGTT(dev_priv) (i915.enable_ppgtt)
#define USES_FULL_PPGTT(dev_priv) (i915.enable_ppgtt >= 2)
#define USES_FULL_48BIT_PPGTT(dev_priv) (i915.enable_ppgtt == 3)
#define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt)
#define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2)
#define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3)
#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
......@@ -3056,9 +3105,12 @@ intel_info(const struct drm_i915_private *dev_priv)
* even when in MSI mode. This results in spurious interrupt warnings if the
* legacy irq no. is shared with another device. The kernel then disables that
* interrupt source and so prevents the other device from working properly.
*
* Since we don't enable MSI anymore on gen4, we can always use GMBUS/AUX
* interrupts.
*/
#define HAS_AUX_IRQ(dev_priv) ((dev_priv)->info.gen >= 5)
#define HAS_GMBUS_IRQ(dev_priv) ((dev_priv)->info.has_gmbus_irq)
#define HAS_AUX_IRQ(dev_priv) true
#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
......@@ -3089,6 +3141,8 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
#define HAS_IPC(dev_priv) ((dev_priv)->info.has_ipc)
/*
* For now, anything with a GuC requires uCode loading, and then supports
* command submission once loaded. But these are logically independent
......@@ -3234,7 +3288,7 @@ static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
{
unsigned long delay;
if (unlikely(!i915.enable_hangcheck))
if (unlikely(!i915_modparams.enable_hangcheck))
return;
/* Don't continually defer the hangcheck so that it is always run at
......@@ -3267,6 +3321,8 @@ static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
return dev_priv->vgpu.active;
}
u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
enum pipe pipe);
void
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
u32 status_mask);
......@@ -4360,4 +4416,12 @@ int remap_io_mapping(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size,
struct io_mapping *iomap);
static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
{
if (INTEL_GEN(i915) >= 10)
return CNL_HWS_CSB_WRITE_INDEX;
else
return I915_HWS_CSB_WRITE_INDEX;
}
#endif
......@@ -179,7 +179,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
* the alignment of the buddy allocation will naturally match.
*/
phys = drm_pci_alloc(obj->base.dev,
obj->base.size,
roundup_pow_of_two(obj->base.size),
roundup_pow_of_two(obj->base.size));
if (!phys)
return ERR_PTR(-ENOMEM);
......@@ -694,10 +694,10 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
switch (obj->base.write_domain) {
case I915_GEM_DOMAIN_GTT:
if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) {
if (!HAS_LLC(dev_priv)) {
intel_runtime_pm_get(dev_priv);
spin_lock_irq(&dev_priv->uncore.lock);
POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
POSTING_READ_FW(RING_HEAD(dev_priv->engine[RCS]->mmio_base));
spin_unlock_irq(&dev_priv->uncore.lock);
intel_runtime_pm_put(dev_priv);
}
......@@ -2303,7 +2303,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct sgt_iter sgt_iter;
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
unsigned int max_segment;
unsigned int max_segment = i915_sg_segment_size();
gfp_t noreclaim;
int ret;
......@@ -2314,10 +2314,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
max_segment = swiotlb_max_segment();
if (!max_segment)
max_segment = rounddown(UINT_MAX, PAGE_SIZE);
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
return ERR_PTR(-ENOMEM);
......@@ -2819,8 +2815,8 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
* Turning off the engine->irq_tasklet until the reset is over
* prevents the race.
*/
tasklet_kill(&engine->irq_tasklet);
tasklet_disable(&engine->irq_tasklet);
tasklet_kill(&engine->execlists.irq_tasklet);
tasklet_disable(&engine->execlists.irq_tasklet);
if (engine->irq_seqno_barrier)
engine->irq_seqno_barrier(engine);
......@@ -2999,7 +2995,7 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
{
tasklet_enable(&engine->irq_tasklet);
tasklet_enable(&engine->execlists.irq_tasklet);
kthread_unpark(engine->breadcrumbs.signaler);
}
......@@ -3026,9 +3022,6 @@ static void nop_submit_request(struct drm_i915_gem_request *request)
static void engine_set_wedged(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request;
unsigned long flags;
/* We need to be sure that no thread is running the old callback as
* we install the nop handler (otherwise we would submit a request
* to hardware that will never complete). In order to prevent this
......@@ -3038,40 +3031,7 @@ static void engine_set_wedged(struct intel_engine_cs *engine)
engine->submit_request = nop_submit_request;
/* Mark all executing requests as skipped */
spin_lock_irqsave(&engine->timeline->lock, flags);
list_for_each_entry(request, &engine->timeline->requests, link)
if (!i915_gem_request_completed(request))
dma_fence_set_error(&request->fence, -EIO);
spin_unlock_irqrestore(&engine->timeline->lock, flags);
/*
* Clear the execlists queue up before freeing the requests, as those
* are the ones that keep the context and ringbuffer backing objects
* pinned in place.
*/
if (i915.enable_execlists) {
struct execlist_port *port = engine->execlist_port;
unsigned long flags;
unsigned int n;
spin_lock_irqsave(&engine->timeline->lock, flags);
for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
i915_gem_request_put(port_request(&port[n]));
memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
engine->execlist_queue = RB_ROOT;
engine->execlist_first = NULL;
spin_unlock_irqrestore(&engine->timeline->lock, flags);
/* The port is checked prior to scheduling a tasklet, but
* just in case we have suspended the tasklet to do the
* wedging make sure that when it wakes, it decides there
* is no work to do by clearing the irq_posted bit.
*/
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
}
engine->cancel_requests(engine);
/* Mark all pending requests as complete so that any concurrent
* (lockless) lookup doesn't try and wait upon the request as we
......@@ -4778,7 +4738,7 @@ bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
return false;
/* TODO: make semaphores and Execlists play nicely together */
if (i915.enable_execlists)
if (i915_modparams.enable_execlists)
return false;
if (value >= 0)
......@@ -4799,7 +4759,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
if (!i915.enable_execlists) {
if (!i915_modparams.enable_execlists) {
dev_priv->gt.resume = intel_legacy_submission_resume;
dev_priv->gt.cleanup_engine = intel_engine_cleanup;
} else {
......
......@@ -314,7 +314,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
* present or not in use we still need a small bias as ring wraparound
* at offset 0 sometimes hangs. No idea why.
*/
if (HAS_GUC(dev_priv) && i915.enable_guc_loading)
if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading)
ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
else
ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
......@@ -407,7 +407,7 @@ i915_gem_context_create_gvt(struct drm_device *dev)
i915_gem_context_set_closed(ctx); /* not user accessible */
i915_gem_context_clear_bannable(ctx);
i915_gem_context_set_force_single_submission(ctx);
if (!i915.enable_guc_submission)
if (!i915_modparams.enable_guc_submission)
ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
......@@ -431,7 +431,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
if (intel_vgpu_active(dev_priv) &&
HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
if (!i915.enable_execlists) {
if (!i915_modparams.enable_execlists) {
DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
return -EINVAL;
}
......@@ -483,7 +483,7 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
}
/* Force the GPU state to be restored on enabling */
if (!i915.enable_execlists) {
if (!i915_modparams.enable_execlists) {
struct i915_gem_context *ctx;
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
......@@ -568,7 +568,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 flags)
enum intel_engine_id id;
const int num_rings =
/* Use an extended w/a on gen7 if signalling from other rings */
(i915.semaphores && INTEL_GEN(dev_priv) == 7) ?
(i915_modparams.semaphores && INTEL_GEN(dev_priv) == 7) ?
INTEL_INFO(dev_priv)->num_rings - 1 :
0;
int len;
......@@ -837,7 +837,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
struct intel_engine_cs *engine = req->engine;
lockdep_assert_held(&req->i915->drm.struct_mutex);
if (i915.enable_execlists)
if (i915_modparams.enable_execlists)
return 0;
if (!req->ctx->engine[engine->id].state) {
......
......@@ -58,6 +58,7 @@ enum {
#define __EXEC_HAS_RELOC BIT(31)
#define __EXEC_VALIDATED BIT(30)
#define __EXEC_INTERNAL_FLAGS (~0u << 30)
#define UPDATE PIN_OFFSET_FIXED
#define BATCH_OFFSET_BIAS (256*1024)
......@@ -679,7 +680,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
static int eb_lookup_vmas(struct i915_execbuffer *eb)
{
struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
struct drm_i915_gem_object *uninitialized_var(obj);
struct drm_i915_gem_object *obj;
unsigned int i;
int err;
......@@ -725,19 +726,17 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
goto err_obj;
}
/* transfer ref to ctx */
vma->open_count++;
list_add(&lut->obj_link, &obj->lut_list);
list_add(&lut->ctx_link, &eb->ctx->handles_list);
lut->ctx = eb->ctx;
lut->handle = handle;
/* transfer ref to ctx */
obj = NULL;
add_vma:
err = eb_add_vma(eb, i, vma);
if (unlikely(err))
goto err_obj;
goto err_vma;
GEM_BUG_ON(vma != eb->vma[i]);
GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
......@@ -766,8 +765,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
return eb_reserve(eb);
err_obj:
if (obj)
i915_gem_object_put(obj);
i915_gem_object_put(obj);
err_vma:
eb->vma[i] = NULL;
return err;
......@@ -1587,7 +1585,7 @@ static int eb_prefault_relocations(const struct i915_execbuffer *eb)
const unsigned int count = eb->buffer_count;
unsigned int i;
if (unlikely(i915.prefault_disable))
if (unlikely(i915_modparams.prefault_disable))
return 0;
for (i = 0; i < count; i++) {
......@@ -2188,6 +2186,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
int out_fence_fd = -1;
int err;
BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS);
BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
~__EXEC_OBJECT_UNKNOWN_FLAGS);
......
......@@ -180,7 +180,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
return 0;
}
if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists) {
if (INTEL_GEN(dev_priv) >= 8 && i915_modparams.enable_execlists) {
if (has_full_48bit_ppgtt)
return 3;
......@@ -230,13 +230,13 @@ static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
switch (level) {
case I915_CACHE_NONE:
pte |= PPAT_UNCACHED_INDEX;
pte |= PPAT_UNCACHED;
break;
case I915_CACHE_WT:
pte |= PPAT_DISPLAY_ELLC_INDEX;
pte |= PPAT_DISPLAY_ELLC;
break;
default:
pte |= PPAT_CACHED_INDEX;
pte |= PPAT_CACHED;
break;
}
......@@ -249,9 +249,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
pde |= addr;
if (level != I915_CACHE_NONE)
pde |= PPAT_CACHED_PDE_INDEX;
pde |= PPAT_CACHED_PDE;
else
pde |= PPAT_UNCACHED_INDEX;
pde |= PPAT_UNCACHED;
return pde;
}
......@@ -481,10 +481,8 @@ static void fill_page_dma(struct i915_address_space *vm,
const u64 val)
{
u64 * const vaddr = kmap_atomic(p->page);
int i;
for (i = 0; i < 512; i++)
vaddr[i] = val;
memset64(vaddr, val, PAGE_SIZE / sizeof(val));
kunmap_atomic(vaddr);
}
......@@ -1168,19 +1166,22 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
unsigned int pde;
gen8_for_each_pde(pt, pd, start, length, pde) {
int count = gen8_pte_count(start, length);
if (pt == vm->scratch_pt) {
pt = alloc_pt(vm);
if (IS_ERR(pt))
goto unwind;
gen8_initialize_pt(vm, pt);
if (count < GEN8_PTES)
gen8_initialize_pt(vm, pt);
gen8_ppgtt_set_pde(vm, pd, pt, pde);
pd->used_pdes++;
GEM_BUG_ON(pd->used_pdes > I915_PDES);
}
pt->used_ptes += gen8_pte_count(start, length);
pt->used_ptes += count;
}
return 0;
......@@ -1969,7 +1970,7 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
/* In the case of execlists, PPGTT is enabled by the context descriptor
* and the PDPs are contained within the context itself. We don't
* need to do anything here. */
if (i915.enable_execlists)
if (i915_modparams.enable_execlists)
return 0;
if (!USES_PPGTT(dev_priv))
......@@ -2816,41 +2817,209 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return 0;
}
static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv)
static struct intel_ppat_entry *
__alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value)
{
struct intel_ppat_entry *entry = &ppat->entries[index];
GEM_BUG_ON(index >= ppat->max_entries);
GEM_BUG_ON(test_bit(index, ppat->used));
entry->ppat = ppat;
entry->value = value;
kref_init(&entry->ref);
set_bit(index, ppat->used);
set_bit(index, ppat->dirty);
return entry;
}
static void __free_ppat_entry(struct intel_ppat_entry *entry)
{
struct intel_ppat *ppat = entry->ppat;
unsigned int index = entry - ppat->entries;
GEM_BUG_ON(index >= ppat->max_entries);
GEM_BUG_ON(!test_bit(index, ppat->used));
entry->value = ppat->clear_value;
clear_bit(index, ppat->used);
set_bit(index, ppat->dirty);
}
/**
* intel_ppat_get - get a usable PPAT entry
* @i915: i915 device instance
* @value: the PPAT value required by the caller
*
* The function tries to search if there is an existing PPAT entry which
* matches with the required value. If perfectly matched, the existing PPAT
* entry will be used. If only partially matched, it will try to check if
* there is any available PPAT index. If yes, it will allocate a new PPAT
* index for the required entry and update the HW. If not, the partially
* matched entry will be used.
*/
const struct intel_ppat_entry *
intel_ppat_get(struct drm_i915_private *i915, u8 value)
{
struct intel_ppat *ppat = &i915->ppat;
struct intel_ppat_entry *entry;
unsigned int scanned, best_score;
int i;
GEM_BUG_ON(!ppat->max_entries);
scanned = best_score = 0;
for_each_set_bit(i, ppat->used, ppat->max_entries) {
unsigned int score;
score = ppat->match(ppat->entries[i].value, value);
if (score > best_score) {
entry = &ppat->entries[i];
if (score == INTEL_PPAT_PERFECT_MATCH) {
kref_get(&entry->ref);
return entry;
}
best_score = score;
}
scanned++;
}
if (scanned == ppat->max_entries) {
if (!best_score)
return ERR_PTR(-ENOSPC);
kref_get(&entry->ref);
return entry;
}
i = find_first_zero_bit(ppat->used, ppat->max_entries);
entry = __alloc_ppat_entry(ppat, i, value);
ppat->update_hw(i915);
return entry;
}
static void release_ppat(struct kref *kref)
{
struct intel_ppat_entry *entry =
container_of(kref, struct intel_ppat_entry, ref);
struct drm_i915_private *i915 = entry->ppat->i915;
__free_ppat_entry(entry);
entry->ppat->update_hw(i915);
}
/**
* intel_ppat_put - put back the PPAT entry got from intel_ppat_get()
* @entry: an intel PPAT entry
*
* Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the
* entry is dynamically allocated, its reference count will be decreased. Once
* the reference count becomes into zero, the PPAT index becomes free again.
*/
void intel_ppat_put(const struct intel_ppat_entry *entry)
{
struct intel_ppat *ppat = entry->ppat;
unsigned int index = entry - ppat->entries;
GEM_BUG_ON(!ppat->max_entries);
kref_put(&ppat->entries[index].ref, release_ppat);
}
static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv)
{
struct intel_ppat *ppat = &dev_priv->ppat;
int i;
for_each_set_bit(i, ppat->dirty, ppat->max_entries) {
I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value);
clear_bit(i, ppat->dirty);
}
}
static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv)
{
struct intel_ppat *ppat = &dev_priv->ppat;
u64 pat = 0;
int i;
for (i = 0; i < ppat->max_entries; i++)
pat |= GEN8_PPAT(i, ppat->entries[i].value);
bitmap_clear(ppat->dirty, 0, ppat->max_entries);
I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
}
static unsigned int bdw_private_pat_match(u8 src, u8 dst)
{
unsigned int score = 0;
enum {
AGE_MATCH = BIT(0),
TC_MATCH = BIT(1),
CA_MATCH = BIT(2),
};
/* Cache attribute has to be matched. */
if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst))
return 0;
score |= CA_MATCH;
if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst))
score |= TC_MATCH;
if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst))
score |= AGE_MATCH;
if (score == (AGE_MATCH | TC_MATCH | CA_MATCH))
return INTEL_PPAT_PERFECT_MATCH;
return score;
}
static unsigned int chv_private_pat_match(u8 src, u8 dst)
{
return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ?
INTEL_PPAT_PERFECT_MATCH : 0;
}
static void cnl_setup_private_ppat(struct intel_ppat *ppat)
{
ppat->max_entries = 8;
ppat->update_hw = cnl_private_pat_update_hw;
ppat->match = bdw_private_pat_match;
ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
/* XXX: spec is unclear if this is still needed for CNL+ */
if (!USES_PPGTT(dev_priv)) {
I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_UC);
if (!USES_PPGTT(ppat->i915)) {
__alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
return;
}
I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_WB | GEN8_PPAT_LLC);
I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC);
I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
__alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);
__alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
__alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
__alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);
__alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
__alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
__alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
__alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
}
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
* bits. When using advanced contexts each context stores its own PAT, but
* writing this data shouldn't be harmful even in those cases. */
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
static void bdw_setup_private_ppat(struct intel_ppat *ppat)
{
u64 pat;
ppat->max_entries = 8;
ppat->update_hw = bdw_private_pat_update_hw;
ppat->match = bdw_private_pat_match;
ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
if (!USES_PPGTT(dev_priv))
if (!USES_PPGTT(ppat->i915)) {
/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
* so RTL will always use the value corresponding to
* pat_sel = 000".
......@@ -2864,17 +3033,26 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
* So we can still hold onto all our assumptions wrt cpu
* clflushing on LLC machines.
*/
pat = GEN8_PPAT(0, GEN8_PPAT_UC);
__alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
return;
}
/* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
* write would work. */
I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
__alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); /* for normal objects, no eLLC */
__alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); /* for something pointing to ptes? */
__alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); /* for scanout with eLLC */
__alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); /* Uncached objects, mostly for scanout */
__alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
__alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
__alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
__alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
}
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
static void chv_setup_private_ppat(struct intel_ppat *ppat)
{
u64 pat;
ppat->max_entries = 8;
ppat->update_hw = bdw_private_pat_update_hw;
ppat->match = chv_private_pat_match;
ppat->clear_value = CHV_PPAT_SNOOP;
/*
* Map WB on BDW to snooped on CHV.
......@@ -2894,17 +3072,15 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
* Which means we must set the snoop bit in PAT entry 0
* in order to keep the global status page working.
*/
pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
GEN8_PPAT(1, 0) |
GEN8_PPAT(2, 0) |
GEN8_PPAT(3, 0) |
GEN8_PPAT(4, CHV_PPAT_SNOOP) |
GEN8_PPAT(5, CHV_PPAT_SNOOP) |
GEN8_PPAT(6, CHV_PPAT_SNOOP) |
GEN8_PPAT(7, CHV_PPAT_SNOOP);
I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
__alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP);
__alloc_ppat_entry(ppat, 1, 0);
__alloc_ppat_entry(ppat, 2, 0);
__alloc_ppat_entry(ppat, 3, 0);
__alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP);
__alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP);
__alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP);
__alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP);
}
static void gen6_gmch_remove(struct i915_address_space *vm)
......@@ -2915,6 +3091,31 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
cleanup_scratch_page(vm);
}
static void setup_private_pat(struct drm_i915_private *dev_priv)
{
struct intel_ppat *ppat = &dev_priv->ppat;
int i;
ppat->i915 = dev_priv;
if (INTEL_GEN(dev_priv) >= 10)
cnl_setup_private_ppat(ppat);
else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
chv_setup_private_ppat(ppat);
else
bdw_setup_private_ppat(ppat);
GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES);
for_each_clear_bit(i, ppat->used, ppat->max_entries) {
ppat->entries[i].value = ppat->clear_value;
ppat->entries[i].ppat = ppat;
set_bit(i, ppat->dirty);
}
ppat->update_hw(dev_priv);
}
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
{
struct drm_i915_private *dev_priv = ggtt->base.i915;
......@@ -2947,14 +3148,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
}
ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
if (INTEL_GEN(dev_priv) >= 10)
cnl_setup_private_ppat(dev_priv);
else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
chv_setup_private_ppat(dev_priv);
else
bdw_setup_private_ppat(dev_priv);
ggtt->base.cleanup = gen6_gmch_remove;
ggtt->base.bind_vma = ggtt_bind_vma;
ggtt->base.unbind_vma = ggtt_unbind_vma;
......@@ -2975,6 +3168,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->invalidate = gen6_ggtt_invalidate;
setup_private_pat(dev_priv);
return ggtt_probe_common(ggtt, size);
}
......@@ -3095,7 +3290,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
* currently don't have any bits spare to pass in this upper
* restriction!
*/
if (HAS_GUC(dev_priv) && i915.enable_guc_loading) {
if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading) {
ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
}
......@@ -3232,13 +3427,10 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
ggtt->base.closed = false;
if (INTEL_GEN(dev_priv) >= 8) {
if (INTEL_GEN(dev_priv) >= 10)
cnl_setup_private_ppat(dev_priv);
else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
chv_setup_private_ppat(dev_priv);
else
bdw_setup_private_ppat(dev_priv);
struct intel_ppat *ppat = &dev_priv->ppat;
bitmap_set(ppat->dirty, 0, ppat->max_entries);
dev_priv->ppat.update_hw(dev_priv);
return;
}
......
......@@ -126,13 +126,13 @@ typedef u64 gen8_ppgtt_pml4e_t;
* tables */
#define GEN8_PDPE_MASK 0x1ff
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
#define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
#define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
#define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD)
#define PPAT_CACHED_PDE 0 /* WB LLC */
#define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */
#define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */
#define CHV_PPAT_SNOOP (1<<6)
#define GEN8_PPAT_AGE(x) (x<<4)
#define GEN8_PPAT_AGE(x) ((x)<<4)
#define GEN8_PPAT_LLCeLLC (3<<2)
#define GEN8_PPAT_LLCELLC (2<<2)
#define GEN8_PPAT_LLC (1<<2)
......@@ -143,6 +143,11 @@ typedef u64 gen8_ppgtt_pml4e_t;
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8))
#define GEN8_PPAT_GET_CA(x) ((x) & 3)
#define GEN8_PPAT_GET_TC(x) ((x) & (3 << 2))
#define GEN8_PPAT_GET_AGE(x) ((x) & (3 << 4))
#define CHV_PPAT_GET_SNOOP(x) ((x) & (1 << 6))
struct sg_table;
struct intel_rotation_info {
......@@ -536,6 +541,37 @@ i915_vm_to_ggtt(struct i915_address_space *vm)
return container_of(vm, struct i915_ggtt, base);
}
#define INTEL_MAX_PPAT_ENTRIES 8
#define INTEL_PPAT_PERFECT_MATCH (~0U)
struct intel_ppat;
struct intel_ppat_entry {
struct intel_ppat *ppat;
struct kref ref;
u8 value;
};
struct intel_ppat {
struct intel_ppat_entry entries[INTEL_MAX_PPAT_ENTRIES];
DECLARE_BITMAP(used, INTEL_MAX_PPAT_ENTRIES);
DECLARE_BITMAP(dirty, INTEL_MAX_PPAT_ENTRIES);
unsigned int max_entries;
u8 clear_value;
/*
* Return a score to show how two PPAT values match,
* a INTEL_PPAT_PERFECT_MATCH indicates a perfect match
*/
unsigned int (*match)(u8 src, u8 dst);
void (*update_hw)(struct drm_i915_private *i915);
struct drm_i915_private *i915;
};
const struct intel_ppat_entry *
intel_ppat_get(struct drm_i915_private *i915, u8 value);
void intel_ppat_put(const struct intel_ppat_entry *entry);
int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915);
void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915);
......
......@@ -1021,12 +1021,28 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu)
return this_cpu != cpu;
}
bool __i915_spin_request(const struct drm_i915_gem_request *req,
u32 seqno, int state, unsigned long timeout_us)
static bool __i915_spin_request(const struct drm_i915_gem_request *req,
u32 seqno, int state, unsigned long timeout_us)
{
struct intel_engine_cs *engine = req->engine;
unsigned int irq, cpu;
GEM_BUG_ON(!seqno);
/*
* Only wait for the request if we know it is likely to complete.
*
* We don't track the timestamps around requests, nor the average
* request length, so we do not have a good indicator that this
* request will complete within the timeout. What we do know is the
* order in which requests are executed by the engine and so we can
* tell if the request has started. If the request hasn't started yet,
* it is a fair assumption that it will not complete within our
* relatively short timeout.
*/
if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1))
return false;
/* When waiting for high frequency requests, e.g. during synchronous
* rendering split between the CPU and GPU, the finite amount of time
* required to set up the irq and wait upon it limits the response
......@@ -1040,12 +1056,8 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
irq = atomic_read(&engine->irq_count);
timeout_us += local_clock_us(&cpu);
do {
if (seqno != i915_gem_request_global_seqno(req))
break;
if (i915_seqno_passed(intel_engine_get_seqno(req->engine),
seqno))
return true;
if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno))
return seqno == i915_gem_request_global_seqno(req);
/* Seqno are meant to be ordered *before* the interrupt. If
* we see an interrupt without a corresponding seqno advance,
......@@ -1156,7 +1168,7 @@ long i915_wait_request(struct drm_i915_gem_request *req,
GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit));
/* Optimistic short spin before touching IRQs */
if (i915_spin_request(req, state, 5))
if (__i915_spin_request(req, wait.seqno, state, 5))
goto complete;
set_current_state(state);
......@@ -1213,7 +1225,7 @@ long i915_wait_request(struct drm_i915_gem_request *req,
continue;
/* Only spin if we know the GPU is processing this request */
if (i915_spin_request(req, state, 2))
if (__i915_spin_request(req, wait.seqno, state, 2))
break;
if (!intel_wait_check_request(&wait, req)) {
......
......@@ -312,26 +312,6 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
return (s32)(seq1 - seq2) >= 0;
}
static inline bool
__i915_gem_request_started(const struct drm_i915_gem_request *req, u32 seqno)
{
GEM_BUG_ON(!seqno);
return i915_seqno_passed(intel_engine_get_seqno(req->engine),
seqno - 1);
}
static inline bool
i915_gem_request_started(const struct drm_i915_gem_request *req)
{
u32 seqno;
seqno = i915_gem_request_global_seqno(req);
if (!seqno)
return false;
return __i915_gem_request_started(req, seqno);
}
static inline bool
__i915_gem_request_completed(const struct drm_i915_gem_request *req, u32 seqno)
{
......@@ -352,21 +332,6 @@ i915_gem_request_completed(const struct drm_i915_gem_request *req)
return __i915_gem_request_completed(req, seqno);
}
bool __i915_spin_request(const struct drm_i915_gem_request *request,
u32 seqno, int state, unsigned long timeout_us);
static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
int state, unsigned long timeout_us)
{
u32 seqno;
seqno = i915_gem_request_global_seqno(request);
if (!seqno)
return 0;
return (__i915_gem_request_started(request, seqno) &&
__i915_spin_request(request, seqno, state, timeout_us));
}
/* We treat requests as fences. This is not be to confused with our
* "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
* We use the fences to synchronize access from the CPU with activity on the
......
......@@ -399,64 +399,42 @@ struct get_pages_work {
struct task_struct *task;
};
#if IS_ENABLED(CONFIG_SWIOTLB)
#define swiotlb_active() swiotlb_nr_tbl()
#else
#define swiotlb_active() 0
#endif
static int
st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
{
struct scatterlist *sg;
int ret, n;
*st = kmalloc(sizeof(**st), GFP_KERNEL);
if (*st == NULL)
return -ENOMEM;
if (swiotlb_active()) {
ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
if (ret)
goto err;
for_each_sg((*st)->sgl, sg, num_pages, n)
sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
} else {
ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
0, num_pages << PAGE_SHIFT,
GFP_KERNEL);
if (ret)
goto err;
}
return 0;
err:
kfree(*st);
*st = NULL;
return ret;
}
static struct sg_table *
__i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
struct page **pvec, int num_pages)
__i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
struct page **pvec, int num_pages)
{
struct sg_table *pages;
unsigned int max_segment = i915_sg_segment_size();
struct sg_table *st;
int ret;
ret = st_set_pages(&pages, pvec, num_pages);
if (ret)
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
return ERR_PTR(-ENOMEM);
alloc_table:
ret = __sg_alloc_table_from_pages(st, pvec, num_pages,
0, num_pages << PAGE_SHIFT,
max_segment,
GFP_KERNEL);
if (ret) {
kfree(st);
return ERR_PTR(ret);
}
ret = i915_gem_gtt_prepare_pages(obj, pages);
ret = i915_gem_gtt_prepare_pages(obj, st);
if (ret) {
sg_free_table(pages);
kfree(pages);
sg_free_table(st);
if (max_segment > PAGE_SIZE) {
max_segment = PAGE_SIZE;
goto alloc_table;
}
kfree(st);
return ERR_PTR(ret);
}
return pages;
return st;
}
static int
......@@ -540,7 +518,8 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
struct sg_table *pages = ERR_PTR(ret);
if (pinned == npages) {
pages = __i915_gem_userptr_set_pages(obj, pvec, npages);
pages = __i915_gem_userptr_alloc_pages(obj, pvec,
npages);
if (!IS_ERR(pages)) {
__i915_gem_object_set_pages(obj, pages);
pinned = 0;
......@@ -661,7 +640,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
pages = __i915_gem_userptr_get_pages_schedule(obj);
active = pages == ERR_PTR(-EAGAIN);
} else {
pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages);
active = !IS_ERR(pages);
}
if (active)
......@@ -834,7 +813,9 @@ int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
hash_init(dev_priv->mm_structs);
dev_priv->mm.userptr_wq =
alloc_workqueue("i915-userptr-acquire", WQ_HIGHPRI, 0);
alloc_workqueue("i915-userptr-acquire",
WQ_HIGHPRI | WQ_MEM_RECLAIM,
0);
if (!dev_priv->mm.userptr_wq)
return -ENOMEM;
......
......@@ -396,6 +396,8 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
static void error_print_engine(struct drm_i915_error_state_buf *m,
const struct drm_i915_error_engine *ee)
{
int n;
err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
err_printf(m, " START: 0x%08x\n", ee->start);
err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
......@@ -465,8 +467,11 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
jiffies_to_msecs(jiffies - ee->hangcheck_timestamp));
err_printf(m, " engine reset count: %u\n", ee->reset_count);
error_print_request(m, " ELSP[0]: ", &ee->execlist[0]);
error_print_request(m, " ELSP[1]: ", &ee->execlist[1]);
for (n = 0; n < ee->num_ports; n++) {
err_printf(m, " ELSP[%d]:", n);
error_print_request(m, " ", &ee->execlist[n]);
}
error_print_context(m, " Active context: ", &ee->context);
}
......@@ -567,7 +572,7 @@ static __always_inline void err_print_param(struct drm_i915_error_state_buf *m,
static void err_print_params(struct drm_i915_error_state_buf *m,
const struct i915_params *p)
{
#define PRINT(T, x) err_print_param(m, #x, #T, &p->x);
#define PRINT(T, x, ...) err_print_param(m, #x, #T, &p->x);
I915_PARAMS_FOR_EACH(PRINT);
#undef PRINT
}
......@@ -861,7 +866,7 @@ void __i915_gpu_state_free(struct kref *error_ref)
kfree(error->overlay);
kfree(error->display);
#define FREE(T, x) free_param(#T, &error->params.x);
#define FREE(T, x, ...) free_param(#T, &error->params.x);
I915_PARAMS_FOR_EACH(FREE);
#undef FREE
......@@ -1327,17 +1332,19 @@ static void engine_record_requests(struct intel_engine_cs *engine,
static void error_record_engine_execlists(struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
{
const struct execlist_port *port = engine->execlist_port;
const struct intel_engine_execlists * const execlists = &engine->execlists;
unsigned int n;
for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) {
struct drm_i915_gem_request *rq = port_request(&port[n]);
for (n = 0; n < execlists_num_ports(execlists); n++) {
struct drm_i915_gem_request *rq = port_request(&execlists->port[n]);
if (!rq)
break;
record_request(rq, &ee->execlist[n]);
}
ee->num_ports = n;
}
static void record_context(struct drm_i915_error_context *e,
......@@ -1554,7 +1561,7 @@ static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv,
struct i915_gpu_state *error)
{
/* Capturing log buf contents won't be useful if logging was disabled */
if (!dev_priv->guc.log.vma || (i915.guc_log_level < 0))
if (!dev_priv->guc.log.vma || (i915_modparams.guc_log_level < 0))
return;
error->guc_log = i915_error_object_create(dev_priv,
......@@ -1696,8 +1703,8 @@ static int capture(void *data)
ktime_to_timeval(ktime_sub(ktime_get(),
error->i915->gt.last_init_time));
error->params = i915;
#define DUP(T, x) dup_param(#T, &error->params.x);
error->params = i915_modparams;
#define DUP(T, x, ...) dup_param(#T, &error->params.x);
I915_PARAMS_FOR_EACH(DUP);
#undef DUP
......@@ -1751,7 +1758,7 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
struct i915_gpu_state *error;
unsigned long flags;
if (!i915.error_capture)
if (!i915_modparams.error_capture)
return;
if (READ_ONCE(dev_priv->gpu_error.first_error))
......
......@@ -192,13 +192,12 @@ static int __create_doorbell(struct i915_guc_client *client)
doorbell = __get_doorbell(client);
doorbell->db_status = GUC_DOORBELL_ENABLED;
doorbell->cookie = client->doorbell_cookie;
doorbell->cookie = 0;
err = __guc_allocate_doorbell(client->guc, client->stage_id);
if (err) {
if (err)
doorbell->db_status = GUC_DOORBELL_DISABLED;
doorbell->cookie = 0;
}
return err;
}
......@@ -306,7 +305,7 @@ static void guc_proc_desc_init(struct intel_guc *guc,
desc->db_base_addr = 0;
desc->stage_id = client->stage_id;
desc->wq_size_bytes = client->wq_size;
desc->wq_size_bytes = GUC_WQ_SIZE;
desc->wq_status = WQ_STATUS_ACTIVE;
desc->priority = client->priority;
}
......@@ -391,8 +390,8 @@ static void guc_stage_desc_init(struct intel_guc *guc,
desc->db_trigger_cpu = (uintptr_t)__get_doorbell(client);
desc->db_trigger_uk = gfx_addr + client->doorbell_offset;
desc->process_desc = gfx_addr + client->proc_desc_offset;
desc->wq_addr = gfx_addr + client->wq_offset;
desc->wq_size = client->wq_size;
desc->wq_addr = gfx_addr + GUC_DB_SIZE;
desc->wq_size = GUC_WQ_SIZE;
desc->desc_private = (uintptr_t)client;
}
......@@ -406,82 +405,23 @@ static void guc_stage_desc_fini(struct intel_guc *guc,
memset(desc, 0, sizeof(*desc));
}
/**
* i915_guc_wq_reserve() - reserve space in the GuC's workqueue
* @request: request associated with the commands
*
* Return: 0 if space is available
* -EAGAIN if space is not currently available
*
* This function must be called (and must return 0) before a request
* is submitted to the GuC via i915_guc_submit() below. Once a result
* of 0 has been returned, it must be balanced by a corresponding
* call to submit().
*
* Reservation allows the caller to determine in advance that space
* will be available for the next submission before committing resources
* to it, and helps avoid late failures with complicated recovery paths.
*/
int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
{
const size_t wqi_size = sizeof(struct guc_wq_item);
struct i915_guc_client *client = request->i915->guc.execbuf_client;
struct guc_process_desc *desc = __get_process_desc(client);
u32 freespace;
int ret;
spin_lock_irq(&client->wq_lock);
freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
freespace -= client->wq_rsvd;
if (likely(freespace >= wqi_size)) {
client->wq_rsvd += wqi_size;
ret = 0;
} else {
client->no_wq_space++;
ret = -EAGAIN;
}
spin_unlock_irq(&client->wq_lock);
return ret;
}
static void guc_client_update_wq_rsvd(struct i915_guc_client *client, int size)
{
unsigned long flags;
spin_lock_irqsave(&client->wq_lock, flags);
client->wq_rsvd += size;
spin_unlock_irqrestore(&client->wq_lock, flags);
}
void i915_guc_wq_unreserve(struct drm_i915_gem_request *request)
{
const int wqi_size = sizeof(struct guc_wq_item);
struct i915_guc_client *client = request->i915->guc.execbuf_client;
GEM_BUG_ON(READ_ONCE(client->wq_rsvd) < wqi_size);
guc_client_update_wq_rsvd(client, -wqi_size);
}
/* Construct a Work Item and append it to the GuC's Work Queue */
static void guc_wq_item_append(struct i915_guc_client *client,
struct drm_i915_gem_request *rq)
{
/* wqi_len is in DWords, and does not include the one-word header */
const size_t wqi_size = sizeof(struct guc_wq_item);
const u32 wqi_len = wqi_size/sizeof(u32) - 1;
const u32 wqi_len = wqi_size / sizeof(u32) - 1;
struct intel_engine_cs *engine = rq->engine;
struct i915_gem_context *ctx = rq->ctx;
struct guc_process_desc *desc = __get_process_desc(client);
struct guc_wq_item *wqi;
u32 freespace, tail, wq_off;
u32 ring_tail, wq_off;
/* Free space is guaranteed, see i915_guc_wq_reserve() above */
freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
GEM_BUG_ON(freespace < wqi_size);
lockdep_assert_held(&client->wq_lock);
/* The GuC firmware wants the tail index in QWords, not bytes */
tail = intel_ring_set_tail(rq->ring, rq->tail) >> 3;
GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
/* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
* should not have the case where structure wqi is across page, neither
......@@ -491,29 +431,29 @@ static void guc_wq_item_append(struct i915_guc_client *client,
* workqueue buffer dw by dw.
*/
BUILD_BUG_ON(wqi_size != 16);
GEM_BUG_ON(client->wq_rsvd < wqi_size);
/* postincrement WQ tail for next time */
wq_off = client->wq_tail;
/* Free space is guaranteed. */
wq_off = READ_ONCE(desc->tail);
GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head),
GUC_WQ_SIZE) < wqi_size);
GEM_BUG_ON(wq_off & (wqi_size - 1));
client->wq_tail += wqi_size;
client->wq_tail &= client->wq_size - 1;
client->wq_rsvd -= wqi_size;
/* WQ starts from the page after doorbell / process_desc */
wqi = client->vaddr + wq_off + GUC_DB_SIZE;
/* Now fill in the 4-word work queue item */
wqi->header = WQ_TYPE_INORDER |
(wqi_len << WQ_LEN_SHIFT) |
(engine->guc_id << WQ_TARGET_SHIFT) |
WQ_NO_WCFLUSH_WAIT;
(wqi_len << WQ_LEN_SHIFT) |
(engine->guc_id << WQ_TARGET_SHIFT) |
WQ_NO_WCFLUSH_WAIT;
/* The GuC wants only the low-order word of the context descriptor */
wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine);
wqi->context_desc = lower_32_bits(intel_lr_context_descriptor(ctx, engine));
wqi->submit_element_info = tail << WQ_RING_TAIL_SHIFT;
wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
wqi->fence_id = rq->global_seqno;
/* Postincrement WQ tail for next time. */
WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
}
static void guc_reset_wq(struct i915_guc_client *client)
......@@ -522,106 +462,64 @@ static void guc_reset_wq(struct i915_guc_client *client)
desc->head = 0;
desc->tail = 0;
client->wq_tail = 0;
}
static int guc_ring_doorbell(struct i915_guc_client *client)
static void guc_ring_doorbell(struct i915_guc_client *client)
{
struct guc_process_desc *desc = __get_process_desc(client);
union guc_doorbell_qw db_cmp, db_exc, db_ret;
union guc_doorbell_qw *db;
int attempt = 2, ret = -EAGAIN;
/* Update the tail so it is visible to GuC */
desc->tail = client->wq_tail;
/* current cookie */
db_cmp.db_status = GUC_DOORBELL_ENABLED;
db_cmp.cookie = client->doorbell_cookie;
struct guc_doorbell_info *db;
u32 cookie;
/* cookie to be updated */
db_exc.db_status = GUC_DOORBELL_ENABLED;
db_exc.cookie = client->doorbell_cookie + 1;
if (db_exc.cookie == 0)
db_exc.cookie = 1;
lockdep_assert_held(&client->wq_lock);
/* pointer of current doorbell cacheline */
db = (union guc_doorbell_qw *)__get_doorbell(client);
while (attempt--) {
/* lets ring the doorbell */
db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db,
db_cmp.value_qw, db_exc.value_qw);
/* if the exchange was successfully executed */
if (db_ret.value_qw == db_cmp.value_qw) {
/* db was successfully rung */
client->doorbell_cookie = db_exc.cookie;
ret = 0;
break;
}
/* XXX: doorbell was lost and need to acquire it again */
if (db_ret.db_status == GUC_DOORBELL_DISABLED)
break;
db = __get_doorbell(client);
DRM_WARN("Cookie mismatch. Expected %d, found %d\n",
db_cmp.cookie, db_ret.cookie);
/* update the cookie to newly read cookie from GuC */
db_cmp.cookie = db_ret.cookie;
db_exc.cookie = db_ret.cookie + 1;
if (db_exc.cookie == 0)
db_exc.cookie = 1;
}
/* we're not expecting the doorbell cookie to change behind our back */
cookie = READ_ONCE(db->cookie);
WARN_ON_ONCE(xchg(&db->cookie, cookie + 1) != cookie);
return ret;
/* XXX: doorbell was lost and need to acquire it again */
GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED);
}
/**
* __i915_guc_submit() - Submit commands through GuC
* @rq: request associated with the commands
*
* The caller must have already called i915_guc_wq_reserve() above with
* a result of 0 (success), guaranteeing that there is space in the work
* queue for the new request, so enqueuing the item cannot fail.
*
* Bad Things Will Happen if the caller violates this protocol e.g. calls
* submit() when _reserve() says there's no space, or calls _submit()
* a different number of times from (successful) calls to _reserve().
* i915_guc_submit() - Submit commands through GuC
* @engine: engine associated with the commands
*
* The only error here arises if the doorbell hardware isn't functioning
* as expected, which really shouln't happen.
*/
static void __i915_guc_submit(struct drm_i915_gem_request *rq)
static void i915_guc_submit(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = rq->i915;
struct intel_engine_cs *engine = rq->engine;
unsigned int engine_id = engine->id;
struct intel_guc *guc = &rq->i915->guc;
struct drm_i915_private *dev_priv = engine->i915;
struct intel_guc *guc = &dev_priv->guc;
struct i915_guc_client *client = guc->execbuf_client;
unsigned long flags;
int b_ret;
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
const unsigned int engine_id = engine->id;
unsigned int n;
/* WA to flush out the pending GMADR writes to ring buffer. */
if (i915_vma_is_map_and_fenceable(rq->ring->vma))
POSTING_READ_FW(GUC_STATUS);
for (n = 0; n < ARRAY_SIZE(execlists->port); n++) {
struct drm_i915_gem_request *rq;
unsigned int count;
spin_lock_irqsave(&client->wq_lock, flags);
rq = port_unpack(&port[n], &count);
if (rq && count == 0) {
port_set(&port[n], port_pack(rq, ++count));
guc_wq_item_append(client, rq);
b_ret = guc_ring_doorbell(client);
if (i915_vma_is_map_and_fenceable(rq->ring->vma))
POSTING_READ_FW(GUC_STATUS);
client->submissions[engine_id] += 1;
spin_lock(&client->wq_lock);
spin_unlock_irqrestore(&client->wq_lock, flags);
}
guc_wq_item_append(client, rq);
guc_ring_doorbell(client);
static void i915_guc_submit(struct drm_i915_gem_request *rq)
{
__i915_gem_request_submit(rq);
__i915_guc_submit(rq);
client->submissions[engine_id] += 1;
spin_unlock(&client->wq_lock);
}
}
}
static void nested_enable_signaling(struct drm_i915_gem_request *rq)
......@@ -655,27 +553,33 @@ static void port_assign(struct execlist_port *port,
if (port_isset(port))
i915_gem_request_put(port_request(port));
port_set(port, i915_gem_request_get(rq));
port_set(port, port_pack(i915_gem_request_get(rq), port_count(port)));
nested_enable_signaling(rq);
}
static bool i915_guc_dequeue(struct intel_engine_cs *engine)
static void i915_guc_dequeue(struct intel_engine_cs *engine)
{
struct execlist_port *port = engine->execlist_port;
struct drm_i915_gem_request *last = port_request(port);
struct rb_node *rb;
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
struct drm_i915_gem_request *last = NULL;
const struct execlist_port * const last_port =
&execlists->port[execlists->port_mask];
bool submit = false;
struct rb_node *rb;
if (port_isset(port))
port++;
spin_lock_irq(&engine->timeline->lock);
rb = engine->execlist_first;
GEM_BUG_ON(rb_first(&engine->execlist_queue) != rb);
rb = execlists->first;
GEM_BUG_ON(rb_first(&execlists->queue) != rb);
while (rb) {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
struct drm_i915_gem_request *rq, *rn;
list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
if (last && rq->ctx != last->ctx) {
if (port != engine->execlist_port) {
if (port == last_port) {
__list_del_many(&p->requests,
&rq->priotree.link);
goto done;
......@@ -689,50 +593,48 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine)
INIT_LIST_HEAD(&rq->priotree.link);
rq->priotree.priority = INT_MAX;
i915_guc_submit(rq);
trace_i915_gem_request_in(rq, port_index(port, engine));
__i915_gem_request_submit(rq);
trace_i915_gem_request_in(rq, port_index(port, execlists));
last = rq;
submit = true;
}
rb = rb_next(rb);
rb_erase(&p->node, &engine->execlist_queue);
rb_erase(&p->node, &execlists->queue);
INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
}
done:
engine->execlist_first = rb;
if (submit)
execlists->first = rb;
if (submit) {
port_assign(port, last);
i915_guc_submit(engine);
}
spin_unlock_irq(&engine->timeline->lock);
return submit;
}
static void i915_guc_irq_handler(unsigned long data)
{
struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
struct execlist_port *port = engine->execlist_port;
struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
const struct execlist_port * const last_port =
&execlists->port[execlists->port_mask];
struct drm_i915_gem_request *rq;
bool submit;
do {
rq = port_request(&port[0]);
while (rq && i915_gem_request_completed(rq)) {
trace_i915_gem_request_out(rq);
i915_gem_request_put(rq);
rq = port_request(&port[0]);
while (rq && i915_gem_request_completed(rq)) {
trace_i915_gem_request_out(rq);
i915_gem_request_put(rq);
port[0] = port[1];
memset(&port[1], 0, sizeof(port[1]));
execlists_port_complete(execlists, port);
rq = port_request(&port[0]);
}
rq = port_request(&port[0]);
}
submit = false;
if (!port_count(&port[1]))
submit = i915_guc_dequeue(engine);
} while (submit);
if (!port_isset(last_port))
i915_guc_dequeue(engine);
}
/*
......@@ -913,8 +815,6 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
client->engines = engines;
client->priority = priority;
client->doorbell_id = GUC_DOORBELL_INVALID;
client->wq_offset = GUC_DB_SIZE;
client->wq_size = GUC_WQ_SIZE;
spin_lock_init(&client->wq_lock);
ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS,
......@@ -996,28 +896,39 @@ static void guc_client_free(struct i915_guc_client *client)
kfree(client);
}
static void guc_policy_init(struct guc_policy *policy)
{
policy->execution_quantum = POLICY_DEFAULT_EXECUTION_QUANTUM_US;
policy->preemption_time = POLICY_DEFAULT_PREEMPTION_TIME_US;
policy->fault_time = POLICY_DEFAULT_FAULT_TIME_US;
policy->policy_flags = 0;
}
static void guc_policies_init(struct guc_policies *policies)
{
struct guc_policy *policy;
u32 p, i;
policies->dpc_promote_time = 500000;
policies->dpc_promote_time = POLICY_DEFAULT_DPC_PROMOTE_TIME_US;
policies->max_num_work_items = POLICY_MAX_NUM_WI;
for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) {
for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
policy = &policies->policy[p][i];
policy->execution_quantum = 1000000;
policy->preemption_time = 500000;
policy->fault_time = 250000;
policy->policy_flags = 0;
guc_policy_init(policy);
}
}
policies->is_valid = 1;
}
/*
* The first 80 dwords of the register state context, containing the
* execlists and ppgtt registers.
*/
#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
static int guc_ads_create(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
......@@ -1032,6 +943,8 @@ static int guc_ads_create(struct intel_guc *guc)
} __packed *blob;
struct intel_engine_cs *engine;
enum intel_engine_id id;
const u32 skipped_offset = LRC_HEADER_PAGES * PAGE_SIZE;
const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
u32 base;
GEM_BUG_ON(guc->ads_vma);
......@@ -1062,13 +975,20 @@ static int guc_ads_create(struct intel_guc *guc)
* engines after a reset. Here we use the Render ring default
* context, which must already exist and be pinned in the GGTT,
* so its address won't change after we've told the GuC where
* to find it.
* to find it. Note that we have to skip our header (1 page),
* because our GuC shared data is there.
*/
blob->ads.golden_context_lrca =
dev_priv->engine[RCS]->status_page.ggtt_offset;
guc_ggtt_offset(dev_priv->kernel_context->engine[RCS].state) + skipped_offset;
/*
* The GuC expects us to exclude the portion of the context image that
* it skips from the size it is to read. It starts reading from after
* the execlist context (so skipping the first page [PPHWSP] and 80
* dwords). Weird guc is weird.
*/
for_each_engine(engine, dev_priv, id)
blob->ads.eng_state_size[engine->guc_id] = engine->context_size;
blob->ads.eng_state_size[engine->guc_id] = engine->context_size - skipped_size;
base = guc_ggtt_offset(vma);
blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
......@@ -1221,6 +1141,19 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
enum intel_engine_id id;
int err;
/*
* We're using GuC work items for submitting work through GuC. Since
* we're coalescing multiple requests from a single context into a
* single work item prior to assigning it to execlist_port, we can
* never have more work items than the total number of ports (for all
* engines). The GuC firmware is controlling the HEAD of work queue,
* and it is guaranteed that it will remove the work item from the
* queue before our request is completed.
*/
BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.port) *
sizeof(struct guc_wq_item) *
I915_NUM_ENGINES > GUC_WQ_SIZE);
if (!client) {
client = guc_client_alloc(dev_priv,
INTEL_INFO(dev_priv)->ring_mask,
......@@ -1248,24 +1181,15 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
guc_interrupts_capture(dev_priv);
for_each_engine(engine, dev_priv, id) {
const int wqi_size = sizeof(struct guc_wq_item);
struct drm_i915_gem_request *rq;
struct intel_engine_execlists * const execlists = &engine->execlists;
/* The tasklet was initialised by execlists, and may be in
* a state of flux (across a reset) and so we just want to
* take over the callback without changing any other state
* in the tasklet.
*/
engine->irq_tasklet.func = i915_guc_irq_handler;
execlists->irq_tasklet.func = i915_guc_irq_handler;
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
/* Replay the current set of previously submitted requests */
spin_lock_irq(&engine->timeline->lock);
list_for_each_entry(rq, &engine->timeline->requests, link) {
guc_client_update_wq_rsvd(client, wqi_size);
__i915_guc_submit(rq);
}
spin_unlock_irq(&engine->timeline->lock);
tasklet_schedule(&execlists->irq_tasklet);
}
return 0;
......@@ -1310,7 +1234,7 @@ int intel_guc_suspend(struct drm_i915_private *dev_priv)
/* any value greater than GUC_POWER_D0 */
data[1] = GUC_POWER_D1;
/* first page is shared data with GuC */
data[2] = guc_ggtt_offset(ctx->engine[RCS].state);
data[2] = guc_ggtt_offset(ctx->engine[RCS].state) + LRC_GUCSHR_PN * PAGE_SIZE;
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
......@@ -1328,7 +1252,7 @@ int intel_guc_resume(struct drm_i915_private *dev_priv)
if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
return 0;
if (i915.guc_log_level >= 0)
if (i915_modparams.guc_log_level >= 0)
gen9_enable_guc_interrupts(dev_priv);
ctx = dev_priv->kernel_context;
......@@ -1336,7 +1260,7 @@ int intel_guc_resume(struct drm_i915_private *dev_priv)
data[0] = INTEL_GUC_ACTION_EXIT_S_STATE;
data[1] = GUC_POWER_D0;
/* first page is shared data with GuC */
data[2] = guc_ggtt_offset(ctx->engine[RCS].state);
data[2] = guc_ggtt_offset(ctx->engine[RCS].state) + LRC_GUCSHR_PN * PAGE_SIZE;
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
此差异已折叠。
/*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#include <linux/sysfs.h>
#include "i915_drv.h"
#include "i915_oa_cflgt2.h"
static const struct i915_oa_reg b_counter_config_test_oa[] = {
{ _MMIO(0x2740), 0x00000000 },
{ _MMIO(0x2744), 0x00800000 },
{ _MMIO(0x2714), 0xf0800000 },
{ _MMIO(0x2710), 0x00000000 },
{ _MMIO(0x2724), 0xf0800000 },
{ _MMIO(0x2720), 0x00000000 },
{ _MMIO(0x2770), 0x00000004 },
{ _MMIO(0x2774), 0x00000000 },
{ _MMIO(0x2778), 0x00000003 },
{ _MMIO(0x277c), 0x00000000 },
{ _MMIO(0x2780), 0x00000007 },
{ _MMIO(0x2784), 0x00000000 },
{ _MMIO(0x2788), 0x00100002 },
{ _MMIO(0x278c), 0x0000fff7 },
{ _MMIO(0x2790), 0x00100002 },
{ _MMIO(0x2794), 0x0000ffcf },
{ _MMIO(0x2798), 0x00100082 },
{ _MMIO(0x279c), 0x0000ffef },
{ _MMIO(0x27a0), 0x001000c2 },
{ _MMIO(0x27a4), 0x0000ffe7 },
{ _MMIO(0x27a8), 0x00100001 },
{ _MMIO(0x27ac), 0x0000ffe7 },
};
static const struct i915_oa_reg flex_eu_config_test_oa[] = {
};
static const struct i915_oa_reg mux_config_test_oa[] = {
{ _MMIO(0x9840), 0x00000080 },
{ _MMIO(0x9888), 0x11810000 },
{ _MMIO(0x9888), 0x07810013 },
{ _MMIO(0x9888), 0x1f810000 },
{ _MMIO(0x9888), 0x1d810000 },
{ _MMIO(0x9888), 0x1b930040 },
{ _MMIO(0x9888), 0x07e54000 },
{ _MMIO(0x9888), 0x1f908000 },
{ _MMIO(0x9888), 0x11900000 },
{ _MMIO(0x9888), 0x37900000 },
{ _MMIO(0x9888), 0x53900000 },
{ _MMIO(0x9888), 0x45900000 },
{ _MMIO(0x9888), 0x33900000 },
};
static ssize_t
show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "1\n");
}
void
i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv)
{
strncpy(dev_priv->perf.oa.test_config.uuid,
"74fb4902-d3d3-4237-9e90-cbdc68d0a446",
UUID_STRING_LEN);
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
dev_priv->perf.oa.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa);
dev_priv->perf.oa.test_config.b_counter_regs = b_counter_config_test_oa;
dev_priv->perf.oa.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa);
dev_priv->perf.oa.test_config.flex_regs = flex_eu_config_test_oa;
dev_priv->perf.oa.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa);
dev_priv->perf.oa.test_config.sysfs_metric.name = "74fb4902-d3d3-4237-9e90-cbdc68d0a446";
dev_priv->perf.oa.test_config.sysfs_metric.attrs = dev_priv->perf.oa.test_config.attrs;
dev_priv->perf.oa.test_config.attrs[0] = &dev_priv->perf.oa.test_config.sysfs_metric_id.attr;
dev_priv->perf.oa.test_config.sysfs_metric_id.attr.name = "id";
dev_priv->perf.oa.test_config.sysfs_metric_id.attr.mode = 0444;
dev_priv->perf.oa.test_config.sysfs_metric_id.show = show_test_oa_id;
}
/*
* Autogenerated file by GPU Top : https://github.com/rib/gputop
* DO NOT EDIT manually!
*
*
* Copyright (c) 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef __I915_OA_CFLGT2_H__
#define __I915_OA_CFLGT2_H__
extern void i915_perf_load_test_config_cflgt2(struct drm_i915_private *dev_priv);
#endif
......@@ -25,235 +25,171 @@
#include "i915_params.h"
#include "i915_drv.h"
struct i915_params i915 __read_mostly = {
.modeset = -1,
.panel_ignore_lid = 1,
.semaphores = -1,
.lvds_channel_mode = 0,
.panel_use_ssc = -1,
.vbt_sdvo_panel_type = -1,
.enable_rc6 = -1,
.enable_dc = -1,
.enable_fbc = -1,
.enable_execlists = -1,
.enable_hangcheck = true,
.enable_ppgtt = -1,
.enable_psr = -1,
.alpha_support = IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT),
.disable_power_well = -1,
.enable_ips = 1,
.fastboot = 0,
.prefault_disable = 0,
.load_detect_test = 0,
.force_reset_modeset_test = 0,
.reset = 2,
.error_capture = true,
.invert_brightness = 0,
.disable_display = 0,
.enable_cmd_parser = true,
.use_mmio_flip = 0,
.mmio_debug = 0,
.verbose_state_checks = 1,
.nuclear_pageflip = 0,
.edp_vswing = 0,
.enable_guc_loading = 0,
.enable_guc_submission = 0,
.guc_log_level = -1,
.guc_firmware_path = NULL,
.huc_firmware_path = NULL,
.enable_dp_mst = true,
.inject_load_failure = 0,
.enable_dpcd_backlight = false,
.enable_gvt = false,
#define i915_param_named(name, T, perm, desc) \
module_param_named(name, i915_modparams.name, T, perm); \
MODULE_PARM_DESC(name, desc)
#define i915_param_named_unsafe(name, T, perm, desc) \
module_param_named_unsafe(name, i915_modparams.name, T, perm); \
MODULE_PARM_DESC(name, desc)
struct i915_params i915_modparams __read_mostly = {
#define MEMBER(T, member, value) .member = (value),
I915_PARAMS_FOR_EACH(MEMBER)
#undef MEMBER
};
module_param_named(modeset, i915.modeset, int, 0400);
MODULE_PARM_DESC(modeset,
i915_param_named(modeset, int, 0400,
"Use kernel modesetting [KMS] (0=disable, "
"1=on, -1=force vga console preference [default])");
module_param_named_unsafe(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
MODULE_PARM_DESC(panel_ignore_lid,
i915_param_named_unsafe(panel_ignore_lid, int, 0600,
"Override lid status (0=autodetect, 1=autodetect disabled [default], "
"-1=force lid closed, -2=force lid open)");
module_param_named_unsafe(semaphores, i915.semaphores, int, 0400);
MODULE_PARM_DESC(semaphores,
i915_param_named_unsafe(semaphores, int, 0400,
"Use semaphores for inter-ring sync "
"(default: -1 (use per-chip defaults))");
module_param_named_unsafe(enable_rc6, i915.enable_rc6, int, 0400);
MODULE_PARM_DESC(enable_rc6,
i915_param_named_unsafe(enable_rc6, int, 0400,
"Enable power-saving render C-state 6. "
"Different stages can be selected via bitmask values "
"(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
"default: -1 (use per-chip default)");
module_param_named_unsafe(enable_dc, i915.enable_dc, int, 0400);
MODULE_PARM_DESC(enable_dc,
i915_param_named_unsafe(enable_dc, int, 0400,
"Enable power-saving display C-states. "
"(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)");
module_param_named_unsafe(enable_fbc, i915.enable_fbc, int, 0600);
MODULE_PARM_DESC(enable_fbc,
i915_param_named_unsafe(enable_fbc, int, 0600,
"Enable frame buffer compression for power savings "
"(default: -1 (use per-chip default))");
module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0400);
MODULE_PARM_DESC(lvds_channel_mode,
i915_param_named_unsafe(lvds_channel_mode, int, 0400,
"Specify LVDS channel mode "
"(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
module_param_named_unsafe(lvds_use_ssc, i915.panel_use_ssc, int, 0600);
MODULE_PARM_DESC(lvds_use_ssc,
i915_param_named_unsafe(panel_use_ssc, int, 0600,
"Use Spread Spectrum Clock with panels [LVDS/eDP] "
"(default: auto from VBT)");
module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0400);
MODULE_PARM_DESC(vbt_sdvo_panel_type,
i915_param_named_unsafe(vbt_sdvo_panel_type, int, 0400,
"Override/Ignore selection of SDVO panel mode in the VBT "
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
module_param_named_unsafe(reset, i915.reset, int, 0600);
MODULE_PARM_DESC(reset, "Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])");
i915_param_named_unsafe(reset, int, 0600,
"Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])");
module_param_named_unsafe(vbt_firmware, i915.vbt_firmware, charp, 0400);
MODULE_PARM_DESC(vbt_firmware,
"Load VBT from specified file under /lib/firmware");
i915_param_named_unsafe(vbt_firmware, charp, 0400,
"Load VBT from specified file under /lib/firmware");
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
module_param_named(error_capture, i915.error_capture, bool, 0600);
MODULE_PARM_DESC(error_capture,
i915_param_named(error_capture, bool, 0600,
"Record the GPU state following a hang. "
"This information in /sys/class/drm/card<N>/error is vital for "
"triaging and debugging hangs.");
#endif
module_param_named_unsafe(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
MODULE_PARM_DESC(enable_hangcheck,
i915_param_named_unsafe(enable_hangcheck, bool, 0644,
"Periodically check GPU activity for detecting hangs. "
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
module_param_named_unsafe(enable_ppgtt, i915.enable_ppgtt, int, 0400);
MODULE_PARM_DESC(enable_ppgtt,
i915_param_named_unsafe(enable_ppgtt, int, 0400,
"Override PPGTT usage. "
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)");
module_param_named_unsafe(enable_execlists, i915.enable_execlists, int, 0400);
MODULE_PARM_DESC(enable_execlists,
i915_param_named_unsafe(enable_execlists, int, 0400,
"Override execlists usage. "
"(-1=auto [default], 0=disabled, 1=enabled)");
module_param_named_unsafe(enable_psr, i915.enable_psr, int, 0600);
MODULE_PARM_DESC(enable_psr, "Enable PSR "
"(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
"Default: -1 (use per-chip default)");
i915_param_named_unsafe(enable_psr, int, 0600,
"Enable PSR "
"(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
"Default: -1 (use per-chip default)");
module_param_named_unsafe(alpha_support, i915.alpha_support, bool, 0400);
MODULE_PARM_DESC(alpha_support,
i915_param_named_unsafe(alpha_support, bool, 0400,
"Enable alpha quality driver support for latest hardware. "
"See also CONFIG_DRM_I915_ALPHA_SUPPORT.");
module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0400);
MODULE_PARM_DESC(disable_power_well,
i915_param_named_unsafe(disable_power_well, int, 0400,
"Disable display power wells when possible "
"(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");
module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600);
MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
i915_param_named_unsafe(enable_ips, int, 0600, "Enable IPS (default: true)");
module_param_named(fastboot, i915.fastboot, bool, 0600);
MODULE_PARM_DESC(fastboot,
i915_param_named(fastboot, bool, 0600,
"Try to skip unnecessary mode sets at boot time (default: false)");
module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600);
MODULE_PARM_DESC(prefault_disable,
i915_param_named_unsafe(prefault_disable, bool, 0600,
"Disable page prefaulting for pread/pwrite/reloc (default:false). "
"For developers only.");
module_param_named_unsafe(load_detect_test, i915.load_detect_test, bool, 0600);
MODULE_PARM_DESC(load_detect_test,
i915_param_named_unsafe(load_detect_test, bool, 0600,
"Force-enable the VGA load detect code for testing (default:false). "
"For developers only.");
module_param_named_unsafe(force_reset_modeset_test, i915.force_reset_modeset_test, bool, 0600);
MODULE_PARM_DESC(force_reset_modeset_test,
i915_param_named_unsafe(force_reset_modeset_test, bool, 0600,
"Force a modeset during gpu reset for testing (default:false). "
"For developers only.");
module_param_named_unsafe(invert_brightness, i915.invert_brightness, int, 0600);
MODULE_PARM_DESC(invert_brightness,
i915_param_named_unsafe(invert_brightness, int, 0600,
"Invert backlight brightness "
"(-1 force normal, 0 machine defaults, 1 force inversion), please "
"report PCI device ID, subsystem vendor and subsystem device ID "
"to dri-devel@lists.freedesktop.org, if your machine needs it. "
"It will then be included in an upcoming module version.");
module_param_named(disable_display, i915.disable_display, bool, 0400);
MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
i915_param_named(disable_display, bool, 0400,
"Disable display (default: false)");
module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, bool, 0400);
MODULE_PARM_DESC(enable_cmd_parser,
"Enable command parsing (true=enabled [default], false=disabled)");
i915_param_named_unsafe(enable_cmd_parser, bool, 0400,
"Enable command parsing (true=enabled [default], false=disabled)");
module_param_named_unsafe(use_mmio_flip, i915.use_mmio_flip, int, 0600);
MODULE_PARM_DESC(use_mmio_flip,
"use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
i915_param_named_unsafe(use_mmio_flip, int, 0600,
"use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");
module_param_named(mmio_debug, i915.mmio_debug, int, 0600);
MODULE_PARM_DESC(mmio_debug,
i915_param_named(mmio_debug, int, 0600,
"Enable the MMIO debug code for the first N failures (default: off). "
"This may negatively affect performance.");
module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
MODULE_PARM_DESC(verbose_state_checks,
i915_param_named(verbose_state_checks, bool, 0600,
"Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0400);
MODULE_PARM_DESC(nuclear_pageflip,
"Force enable atomic functionality on platforms that don't have full support yet.");
i915_param_named_unsafe(nuclear_pageflip, bool, 0400,
"Force enable atomic functionality on platforms that don't have full support yet.");
/* WA to get away with the default setting in VBT for early platforms.Will be removed */
module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400);
MODULE_PARM_DESC(edp_vswing,
"Ignore/Override vswing pre-emph table selection from VBT "
"(0=use value from vbt [default], 1=low power swing(200mV),"
"2=default swing(400mV))");
module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400);
MODULE_PARM_DESC(enable_guc_loading,
"Enable GuC firmware loading "
"(-1=auto, 0=never [default], 1=if available, 2=required)");
module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400);
MODULE_PARM_DESC(enable_guc_submission,
"Enable GuC submission "
"(-1=auto, 0=never [default], 1=if available, 2=required)");
module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
MODULE_PARM_DESC(guc_log_level,
i915_param_named_unsafe(edp_vswing, int, 0400,
"Ignore/Override vswing pre-emph table selection from VBT "
"(0=use value from vbt [default], 1=low power swing(200mV),"
"2=default swing(400mV))");
i915_param_named_unsafe(enable_guc_loading, int, 0400,
"Enable GuC firmware loading "
"(-1=auto, 0=never [default], 1=if available, 2=required)");
i915_param_named_unsafe(enable_guc_submission, int, 0400,
"Enable GuC submission "
"(-1=auto, 0=never [default], 1=if available, 2=required)");
i915_param_named(guc_log_level, int, 0400,
"GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
module_param_named_unsafe(guc_firmware_path, i915.guc_firmware_path, charp, 0400);
MODULE_PARM_DESC(guc_firmware_path,
i915_param_named_unsafe(guc_firmware_path, charp, 0400,
"GuC firmware path to use instead of the default one");
module_param_named_unsafe(huc_firmware_path, i915.huc_firmware_path, charp, 0400);
MODULE_PARM_DESC(huc_firmware_path,
i915_param_named_unsafe(huc_firmware_path, charp, 0400,
"HuC firmware path to use instead of the default one");
module_param_named_unsafe(enable_dp_mst, i915.enable_dp_mst, bool, 0600);
MODULE_PARM_DESC(enable_dp_mst,
i915_param_named_unsafe(enable_dp_mst, bool, 0600,
"Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400);
MODULE_PARM_DESC(inject_load_failure,
i915_param_named_unsafe(inject_load_failure, uint, 0400,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600);
MODULE_PARM_DESC(enable_dpcd_backlight,
i915_param_named(enable_dpcd_backlight, bool, 0600,
"Enable support for DPCD backlight control (default:false)");
module_param_named(enable_gvt, i915.enable_gvt, bool, 0400);
MODULE_PARM_DESC(enable_gvt,
i915_param_named(enable_gvt, bool, 0400,
"Enable support for Intel GVT-g graphics virtualization host support(default:false)");
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部