提交 3950bf3d 编写于 作者: O Oscar Mateo 提交者: Joonas Lahtinen

drm/i915/guc: Add onion teardown to the GuC setup

Starting with intel_guc_loader, down to intel_guc_submission
and finally to intel_guc_log.

v2:
  - Null execbuf client outside guc_client_free (Daniele)
  - Assert if things try to get allocated twice (Daniele/Joonas)
  - Null guc->log.buf_addr when destroyed (Daniele)
  - Newline between returning success and error labels (Joonas)
  - Remove some unnecessary comments (Joonas)
  - Keep guc_log_create_extras naming convention (Joonas)
  - Helper function guc_log_has_extras (Joonas)
  - No need for separate relay_channel create/destroy. It's just another extra.
  - No need to nullify guc->log.flush_wq when destroyed (Joonas)
  - Hoist the check for has_extras out of guc_log_create_extras (Joonas)
  - Try to do i915_guc_log_register/unregister calls (kind of) symmetric (Daniele)
  - Make sure initel_guc_fini is not called before init is ever called (Daniele)

v3:
  - Remove unnecessary parenthesis (Joonas)
  - Check for logs enabled on debugfs registration
  - Rebase on top of Tvrtko's "Fix request re-submission after reset"

v4:
  - Rebased
  - Comment around enabling/disabling interrupts inside GuC logging (Joonas)
Signed-off-by: NOscar Mateo <oscar.mateo@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Reviewed-by: NJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: NJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
上级 73b05534
...@@ -549,6 +549,8 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { ...@@ -549,6 +549,8 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
static void i915_gem_fini(struct drm_i915_private *dev_priv) static void i915_gem_fini(struct drm_i915_private *dev_priv)
{ {
mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&dev_priv->drm.struct_mutex);
if (i915.enable_guc_loading)
intel_uc_fini_hw(dev_priv);
i915_gem_cleanup_engines(dev_priv); i915_gem_cleanup_engines(dev_priv);
i915_gem_context_fini(dev_priv); i915_gem_context_fini(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
...@@ -609,7 +611,7 @@ static int i915_load_modeset_init(struct drm_device *dev) ...@@ -609,7 +611,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
ret = i915_gem_init(dev_priv); ret = i915_gem_init(dev_priv);
if (ret) if (ret)
goto cleanup_irq; goto cleanup_uc;
intel_modeset_gem_init(dev); intel_modeset_gem_init(dev);
...@@ -631,9 +633,9 @@ static int i915_load_modeset_init(struct drm_device *dev) ...@@ -631,9 +633,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (i915_gem_suspend(dev_priv)) if (i915_gem_suspend(dev_priv))
DRM_ERROR("failed to idle hardware; continuing to unload!\n"); DRM_ERROR("failed to idle hardware; continuing to unload!\n");
i915_gem_fini(dev_priv); i915_gem_fini(dev_priv);
cleanup_uc:
intel_uc_fini_fw(dev_priv);
cleanup_irq: cleanup_irq:
intel_guc_fini(dev_priv);
intel_huc_fini(dev_priv);
drm_irq_uninstall(dev); drm_irq_uninstall(dev);
intel_teardown_gmbus(dev_priv); intel_teardown_gmbus(dev_priv);
cleanup_csr: cleanup_csr:
...@@ -1369,9 +1371,8 @@ void i915_driver_unload(struct drm_device *dev) ...@@ -1369,9 +1371,8 @@ void i915_driver_unload(struct drm_device *dev)
/* Flush any outstanding unpin_work. */ /* Flush any outstanding unpin_work. */
drain_workqueue(dev_priv->wq); drain_workqueue(dev_priv->wq);
intel_guc_fini(dev_priv);
intel_huc_fini(dev_priv);
i915_gem_fini(dev_priv); i915_gem_fini(dev_priv);
intel_uc_fini_fw(dev_priv);
intel_fbc_cleanup_cfb(dev_priv); intel_fbc_cleanup_cfb(dev_priv);
intel_power_domains_fini(dev_priv); intel_power_domains_fini(dev_priv);
......
...@@ -4608,10 +4608,12 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) ...@@ -4608,10 +4608,12 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
intel_mocs_init_l3cc_table(dev_priv); intel_mocs_init_l3cc_table(dev_priv);
/* We can't enable contexts until all firmware is loaded */ if (i915.enable_guc_loading) {
ret = intel_uc_init_hw(dev_priv); /* We can't enable contexts until all firmware is loaded */
if (ret) ret = intel_uc_init_hw(dev_priv);
goto out; if (ret)
goto out;
}
out: out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
......
...@@ -911,7 +911,6 @@ guc_client_alloc(struct drm_i915_private *dev_priv, ...@@ -911,7 +911,6 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
ida_simple_remove(&guc->ctx_ids, client->ctx_index); ida_simple_remove(&guc->ctx_ids, client->ctx_index);
err_client: err_client:
kfree(client); kfree(client);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
...@@ -937,7 +936,7 @@ static void guc_policies_init(struct guc_policies *policies) ...@@ -937,7 +936,7 @@ static void guc_policies_init(struct guc_policies *policies)
policies->is_valid = 1; policies->is_valid = 1;
} }
static void guc_addon_create(struct intel_guc *guc) static int guc_addon_create(struct intel_guc *guc)
{ {
struct drm_i915_private *dev_priv = guc_to_i915(guc); struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct i915_vma *vma; struct i915_vma *vma;
...@@ -953,14 +952,13 @@ static void guc_addon_create(struct intel_guc *guc) ...@@ -953,14 +952,13 @@ static void guc_addon_create(struct intel_guc *guc)
enum intel_engine_id id; enum intel_engine_id id;
u32 base; u32 base;
vma = guc->ads_vma; GEM_BUG_ON(guc->ads_vma);
if (!vma) {
vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob)));
if (IS_ERR(vma))
return;
guc->ads_vma = vma; vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob)));
} if (IS_ERR(vma))
return PTR_ERR(vma);
guc->ads_vma = vma;
page = i915_vma_first_page(vma); page = i915_vma_first_page(vma);
blob = kmap(page); blob = kmap(page);
...@@ -997,6 +995,13 @@ static void guc_addon_create(struct intel_guc *guc) ...@@ -997,6 +995,13 @@ static void guc_addon_create(struct intel_guc *guc)
blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state); blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state);
kunmap(page); kunmap(page);
return 0;
}
static void guc_addon_destroy(struct intel_guc *guc)
{
i915_vma_unpin_and_release(&guc->ads_vma);
} }
/* /*
...@@ -1011,6 +1016,7 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv) ...@@ -1011,6 +1016,7 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
struct intel_guc *guc = &dev_priv->guc; struct intel_guc *guc = &dev_priv->guc;
struct i915_vma *vma; struct i915_vma *vma;
void *vaddr; void *vaddr;
int ret;
if (!HAS_GUC_SCHED(dev_priv)) if (!HAS_GUC_SCHED(dev_priv))
return 0; return 0;
...@@ -1020,10 +1026,10 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv) ...@@ -1020,10 +1026,10 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
i915_guc_submission_disable(dev_priv); i915_guc_submission_disable(dev_priv);
if (!i915.enable_guc_submission) if (!i915.enable_guc_submission)
return 0; /* not enabled */ return 0;
if (guc->ctx_pool) if (guc->ctx_pool)
return 0; /* already allocated */ return 0;
vma = intel_guc_allocate_vma(guc, gemsize); vma = intel_guc_allocate_vma(guc, gemsize);
if (IS_ERR(vma)) if (IS_ERR(vma))
...@@ -1031,15 +1037,23 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv) ...@@ -1031,15 +1037,23 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
guc->ctx_pool = vma; guc->ctx_pool = vma;
vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); vaddr = i915_gem_object_pin_map(guc->ctx_pool->obj, I915_MAP_WB);
if (IS_ERR(vaddr)) if (IS_ERR(vaddr)) {
goto err; ret = PTR_ERR(vaddr);
goto err_vma;
}
guc->ctx_pool_vaddr = vaddr; guc->ctx_pool_vaddr = vaddr;
ret = intel_guc_log_create(guc);
if (ret < 0)
goto err_vaddr;
ret = guc_addon_create(guc);
if (ret < 0)
goto err_log;
ida_init(&guc->ctx_ids); ida_init(&guc->ctx_ids);
intel_guc_log_create(guc);
guc_addon_create(guc);
guc->execbuf_client = guc_client_alloc(dev_priv, guc->execbuf_client = guc_client_alloc(dev_priv,
INTEL_INFO(dev_priv)->ring_mask, INTEL_INFO(dev_priv)->ring_mask,
...@@ -1047,14 +1061,37 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv) ...@@ -1047,14 +1061,37 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
dev_priv->kernel_context); dev_priv->kernel_context);
if (IS_ERR(guc->execbuf_client)) { if (IS_ERR(guc->execbuf_client)) {
DRM_ERROR("Failed to create GuC client for execbuf!\n"); DRM_ERROR("Failed to create GuC client for execbuf!\n");
goto err; ret = PTR_ERR(guc->execbuf_client);
goto err_ads;
} }
return 0; return 0;
err: err_ads:
i915_guc_submission_fini(dev_priv); guc_addon_destroy(guc);
return -ENOMEM; err_log:
intel_guc_log_destroy(guc);
err_vaddr:
i915_gem_object_unpin_map(guc->ctx_pool->obj);
err_vma:
i915_vma_unpin_and_release(&guc->ctx_pool);
return ret;
}
void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
if (!i915.enable_guc_submission)
return 0;
guc_client_free(guc->execbuf_client);
guc->execbuf_client = NULL;
ida_destroy(&guc->ctx_ids);
guc_addon_destroy(guc);
intel_guc_log_destroy(guc);
i915_gem_object_unpin_map(guc->ctx_pool->obj);
i915_vma_unpin_and_release(&guc->ctx_pool);
} }
static void guc_reset_wq(struct i915_guc_client *client) static void guc_reset_wq(struct i915_guc_client *client)
...@@ -1199,26 +1236,6 @@ void i915_guc_submission_disable(struct drm_i915_private *dev_priv) ...@@ -1199,26 +1236,6 @@ void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
intel_engines_reset_default_submission(dev_priv); intel_engines_reset_default_submission(dev_priv);
} }
void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
struct i915_guc_client *client;
client = fetch_and_zero(&guc->execbuf_client);
if (client && !IS_ERR(client))
guc_client_free(client);
i915_vma_unpin_and_release(&guc->ads_vma);
i915_vma_unpin_and_release(&guc->log.vma);
if (guc->ctx_pool_vaddr) {
ida_destroy(&guc->ctx_ids);
i915_gem_object_unpin_map(guc->ctx_pool->obj);
}
i915_vma_unpin_and_release(&guc->ctx_pool);
}
/** /**
* intel_guc_suspend() - notify GuC entering suspend state * intel_guc_suspend() - notify GuC entering suspend state
* @dev_priv: i915 device private * @dev_priv: i915 device private
......
...@@ -430,24 +430,3 @@ int intel_guc_select_fw(struct intel_guc *guc) ...@@ -430,24 +430,3 @@ int intel_guc_select_fw(struct intel_guc *guc)
return 0; return 0;
} }
/**
* intel_guc_fini() - clean up all allocated resources
* @dev_priv: i915 device private
*/
void intel_guc_fini(struct drm_i915_private *dev_priv)
{
struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
struct drm_i915_gem_object *obj;
mutex_lock(&dev_priv->drm.struct_mutex);
i915_guc_submission_disable(dev_priv);
i915_guc_submission_fini(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
obj = fetch_and_zero(&guc_fw->obj);
if (obj)
i915_gem_object_put(obj);
guc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
}
...@@ -66,7 +66,6 @@ static int guc_log_control(struct intel_guc *guc, u32 control_val) ...@@ -66,7 +66,6 @@ static int guc_log_control(struct intel_guc *guc, u32 control_val)
return intel_guc_send(guc, action, ARRAY_SIZE(action)); return intel_guc_send(guc, action, ARRAY_SIZE(action));
} }
/* /*
* Sub buffer switch callback. Called whenever relay has to switch to a new * Sub buffer switch callback. Called whenever relay has to switch to a new
* sub buffer, relay stays on the same sub buffer if 0 is returned. * sub buffer, relay stays on the same sub buffer if 0 is returned.
...@@ -139,45 +138,15 @@ static struct rchan_callbacks relay_callbacks = { ...@@ -139,45 +138,15 @@ static struct rchan_callbacks relay_callbacks = {
.remove_buf_file = remove_buf_file_callback, .remove_buf_file = remove_buf_file_callback,
}; };
static void guc_log_remove_relay_file(struct intel_guc *guc) static int guc_log_relay_file_create(struct intel_guc *guc)
{
relay_close(guc->log.relay_chan);
}
static int guc_log_create_relay_channel(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct rchan *guc_log_relay_chan;
size_t n_subbufs, subbuf_size;
/* Keep the size of sub buffers same as shared log buffer */
subbuf_size = guc->log.vma->obj->base.size;
/* Store up to 8 snapshots, which is large enough to buffer sufficient
* boot time logs and provides enough leeway to User, in terms of
* latency, for consuming the logs from relay. Also doesn't take
* up too much memory.
*/
n_subbufs = 8;
guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
n_subbufs, &relay_callbacks, dev_priv);
if (!guc_log_relay_chan) {
DRM_ERROR("Couldn't create relay chan for GuC logging\n");
return -ENOMEM;
}
GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
guc->log.relay_chan = guc_log_relay_chan;
return 0;
}
static int guc_log_create_relay_file(struct intel_guc *guc)
{ {
struct drm_i915_private *dev_priv = guc_to_i915(guc); struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct dentry *log_dir; struct dentry *log_dir;
int ret; int ret;
if (i915.guc_log_level < 0)
return 0;
/* For now create the log file in /sys/kernel/debug/dri/0 dir */ /* For now create the log file in /sys/kernel/debug/dri/0 dir */
log_dir = dev_priv->drm.primary->debugfs_root; log_dir = dev_priv->drm.primary->debugfs_root;
...@@ -198,7 +167,7 @@ static int guc_log_create_relay_file(struct intel_guc *guc) ...@@ -198,7 +167,7 @@ static int guc_log_create_relay_file(struct intel_guc *guc)
} }
ret = relay_late_setup_files(guc->log.relay_chan, "guc_log", log_dir); ret = relay_late_setup_files(guc->log.relay_chan, "guc_log", log_dir);
if (ret) { if (ret < 0 && ret != -EEXIST) {
DRM_ERROR("Couldn't associate relay chan with file %d\n", ret); DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
return ret; return ret;
} }
...@@ -371,31 +340,6 @@ static void guc_read_update_log_buffer(struct intel_guc *guc) ...@@ -371,31 +340,6 @@ static void guc_read_update_log_buffer(struct intel_guc *guc)
} }
} }
static void guc_log_cleanup(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
lockdep_assert_held(&dev_priv->drm.struct_mutex);
/* First disable the flush interrupt */
gen9_disable_guc_interrupts(dev_priv);
if (guc->log.flush_wq)
destroy_workqueue(guc->log.flush_wq);
guc->log.flush_wq = NULL;
if (guc->log.relay_chan)
guc_log_remove_relay_file(guc);
guc->log.relay_chan = NULL;
if (guc->log.buf_addr)
i915_gem_object_unpin_map(guc->log.vma->obj);
guc->log.buf_addr = NULL;
}
static void capture_logs_work(struct work_struct *work) static void capture_logs_work(struct work_struct *work)
{ {
struct intel_guc *guc = struct intel_guc *guc =
...@@ -404,120 +348,105 @@ static void capture_logs_work(struct work_struct *work) ...@@ -404,120 +348,105 @@ static void capture_logs_work(struct work_struct *work)
guc_log_capture_logs(guc); guc_log_capture_logs(guc);
} }
static bool guc_log_has_extras(struct intel_guc *guc)
{
return guc->log.buf_addr != NULL;
}
static int guc_log_create_extras(struct intel_guc *guc) static int guc_log_create_extras(struct intel_guc *guc)
{ {
struct drm_i915_private *dev_priv = guc_to_i915(guc); struct drm_i915_private *dev_priv = guc_to_i915(guc);
void *vaddr; void *vaddr;
int ret; struct rchan *guc_log_relay_chan;
size_t n_subbufs, subbuf_size;
int ret = 0;
lockdep_assert_held(&dev_priv->drm.struct_mutex); lockdep_assert_held(&dev_priv->drm.struct_mutex);
/* Nothing to do */ GEM_BUG_ON(guc_log_has_extras(guc));
if (i915.guc_log_level < 0)
return 0;
if (!guc->log.buf_addr) {
/* Create a WC (Uncached for read) vmalloc mapping of log
* buffer pages, so that we can directly get the data
* (up-to-date) from memory.
*/
vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
return ret;
}
guc->log.buf_addr = vaddr;
}
if (!guc->log.relay_chan) { /* Create a WC (Uncached for read) vmalloc mapping of log
/* Create a relay channel, so that we have buffers for storing * buffer pages, so that we can directly get the data
* the GuC firmware logs, the channel will be linked with a file * (up-to-date) from memory.
* later on when debugfs is registered. */
*/ vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
ret = guc_log_create_relay_channel(guc); if (IS_ERR(vaddr)) {
if (ret) DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
return ret; return PTR_ERR(vaddr);
} }
if (!guc->log.flush_wq) { guc->log.buf_addr = vaddr;
INIT_WORK(&guc->log.flush_work, capture_logs_work);
/*
* GuC log buffer flush work item has to do register access to
* send the ack to GuC and this work item, if not synced before
* suspend, can potentially get executed after the GFX device is
* suspended.
* By marking the WQ as freezable, we don't have to bother about
* flushing of this work item from the suspend hooks, the pending
* work item if any will be either executed before the suspend
* or scheduled later on resume. This way the handling of work
* item can be kept same between system suspend & rpm suspend.
*/
guc->log.flush_wq = alloc_ordered_workqueue("i915-guc_log",
WQ_HIGHPRI | WQ_FREEZABLE);
if (guc->log.flush_wq == NULL) {
DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
return -ENOMEM;
}
}
return 0; /* Keep the size of sub buffers same as shared log buffer */
} subbuf_size = guc->log.vma->obj->base.size;
void intel_guc_log_create(struct intel_guc *guc) /* Store up to 8 snapshots, which is large enough to buffer sufficient
{ * boot time logs and provides enough leeway to User, in terms of
struct i915_vma *vma; * latency, for consuming the logs from relay. Also doesn't take
unsigned long offset; * up too much memory.
uint32_t size, flags; */
n_subbufs = 8;
if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX) /* Create a relay channel, so that we have buffers for storing
i915.guc_log_level = GUC_LOG_VERBOSITY_MAX; * the GuC firmware logs, the channel will be linked with a file
* later on when debugfs is registered.
*/
guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
n_subbufs, &relay_callbacks, dev_priv);
if (!guc_log_relay_chan) {
DRM_ERROR("Couldn't create relay chan for GuC logging\n");
/* The first page is to save log buffer state. Allocate one ret = -ENOMEM;
* extra page for others in case for overlap */ goto err_vaddr;
size = (1 + GUC_LOG_DPC_PAGES + 1 + }
GUC_LOG_ISR_PAGES + 1 +
GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
vma = guc->log.vma; GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
if (!vma) { guc->log.relay_chan = guc_log_relay_chan;
/* We require SSE 4.1 for fast reads from the GuC log buffer and
* it should be present on the chipsets supporting GuC based
* submisssions.
*/
if (WARN_ON(!i915_has_memcpy_from_wc())) {
/* logging will not be enabled */
i915.guc_log_level = -1;
return;
}
vma = intel_guc_allocate_vma(guc, size); INIT_WORK(&guc->log.flush_work, capture_logs_work);
if (IS_ERR(vma)) {
/* logging will be off */ /*
i915.guc_log_level = -1; * GuC log buffer flush work item has to do register access to
return; * send the ack to GuC and this work item, if not synced before
} * suspend, can potentially get executed after the GFX device is
* suspended.
* By marking the WQ as freezable, we don't have to bother about
* flushing of this work item from the suspend hooks, the pending
* work item if any will be either executed before the suspend
* or scheduled later on resume. This way the handling of work
* item can be kept same between system suspend & rpm suspend.
*/
guc->log.flush_wq = alloc_ordered_workqueue("i915-guc_log",
WQ_HIGHPRI | WQ_FREEZABLE);
if (!guc->log.flush_wq) {
DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
ret = -ENOMEM;
goto err_relaychan;
}
guc->log.vma = vma; return 0;
if (guc_log_create_extras(guc)) { err_relaychan:
guc_log_cleanup(guc); relay_close(guc->log.relay_chan);
i915_vma_unpin_and_release(&guc->log.vma); err_vaddr:
i915.guc_log_level = -1; i915_gem_object_unpin_map(guc->log.vma->obj);
return; guc->log.buf_addr = NULL;
} return ret;
} }
/* each allocated unit is a page */ static void guc_log_destroy_extras(struct intel_guc *guc)
flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL | {
(GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) | /*
(GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) | * It's possible that extras were never allocated because guc_log_level
(GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT); * was < 0 at the time
**/
if (!guc_log_has_extras(guc))
return;
offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */ destroy_workqueue(guc->log.flush_wq);
guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags; relay_close(guc->log.relay_chan);
i915_gem_object_unpin_map(guc->log.vma->obj);
guc->log.buf_addr = NULL;
} }
static int guc_log_late_setup(struct intel_guc *guc) static int guc_log_late_setup(struct intel_guc *guc)
...@@ -527,24 +456,25 @@ static int guc_log_late_setup(struct intel_guc *guc) ...@@ -527,24 +456,25 @@ static int guc_log_late_setup(struct intel_guc *guc)
lockdep_assert_held(&dev_priv->drm.struct_mutex); lockdep_assert_held(&dev_priv->drm.struct_mutex);
if (i915.guc_log_level < 0) if (!guc_log_has_extras(guc)) {
return -EINVAL; /* If log_level was set as -1 at boot time, then setup needed to
* handle log buffer flush interrupts would not have been done yet,
* so do that now.
*/
ret = guc_log_create_extras(guc);
if (ret)
goto err;
}
/* If log_level was set as -1 at boot time, then setup needed to ret = guc_log_relay_file_create(guc);
* handle log buffer flush interrupts would not have been done yet,
* so do that now.
*/
ret = guc_log_create_extras(guc);
if (ret) if (ret)
goto err; goto err_extras;
ret = guc_log_create_relay_file(guc);
if (ret)
goto err;
return 0; return 0;
err_extras:
guc_log_destroy_extras(guc);
err: err:
guc_log_cleanup(guc);
/* logging will remain off */ /* logging will remain off */
i915.guc_log_level = -1; i915.guc_log_level = -1;
return ret; return ret;
...@@ -586,6 +516,72 @@ static void guc_flush_logs(struct intel_guc *guc) ...@@ -586,6 +516,72 @@ static void guc_flush_logs(struct intel_guc *guc)
guc_log_capture_logs(guc); guc_log_capture_logs(guc);
} }
int intel_guc_log_create(struct intel_guc *guc)
{
struct i915_vma *vma;
unsigned long offset;
uint32_t size, flags;
int ret;
GEM_BUG_ON(guc->log.vma);
if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
/* The first page is to save log buffer state. Allocate one
* extra page for others in case for overlap */
size = (1 + GUC_LOG_DPC_PAGES + 1 +
GUC_LOG_ISR_PAGES + 1 +
GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
/* We require SSE 4.1 for fast reads from the GuC log buffer and
* it should be present on the chipsets supporting GuC based
* submisssions.
*/
if (WARN_ON(!i915_has_memcpy_from_wc())) {
ret = -EINVAL;
goto err;
}
vma = intel_guc_allocate_vma(guc, size);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
}
guc->log.vma = vma;
if (i915.guc_log_level >= 0) {
ret = guc_log_create_extras(guc);
if (ret < 0)
goto err_vma;
}
/* each allocated unit is a page */
flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
(GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
(GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
(GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
return 0;
err_vma:
i915_vma_unpin_and_release(&guc->log.vma);
err:
/* logging will be off */
i915.guc_log_level = -1;
return ret;
}
void intel_guc_log_destroy(struct intel_guc *guc)
{
guc_log_destroy_extras(guc);
i915_vma_unpin_and_release(&guc->log.vma);
}
int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
{ {
struct intel_guc *guc = &dev_priv->guc; struct intel_guc *guc = &dev_priv->guc;
...@@ -609,17 +605,22 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) ...@@ -609,17 +605,22 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
return ret; return ret;
} }
i915.guc_log_level = log_param.verbosity; if (log_param.logging_enabled) {
i915.guc_log_level = log_param.verbosity;
/* If log_level was set as -1 at boot time, then the relay channel file /* If log_level was set as -1 at boot time, then the relay channel file
* wouldn't have been created by now and interrupts also would not have * wouldn't have been created by now and interrupts also would not have
* been enabled. * been enabled. Try again now, just in case.
*/ */
if (!dev_priv->guc.log.relay_chan) {
ret = guc_log_late_setup(guc); ret = guc_log_late_setup(guc);
if (!ret) if (ret < 0) {
gen9_enable_guc_interrupts(dev_priv); DRM_DEBUG_DRIVER("GuC log late setup failed %d\n", ret);
} else if (!log_param.logging_enabled) { return ret;
}
/* GuC logging is currently the only user of Guc2Host interrupts */
gen9_enable_guc_interrupts(dev_priv);
} else {
/* Once logging is disabled, GuC won't generate logs & send an /* Once logging is disabled, GuC won't generate logs & send an
* interrupt. But there could be some data in the log buffer * interrupt. But there could be some data in the log buffer
* which is yet to be captured. So request GuC to update the log * which is yet to be captured. So request GuC to update the log
...@@ -629,9 +630,6 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) ...@@ -629,9 +630,6 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
/* As logging is disabled, update log level to reflect that */ /* As logging is disabled, update log level to reflect that */
i915.guc_log_level = -1; i915.guc_log_level = -1;
} else {
/* In case interrupts were disabled, enable them now */
gen9_enable_guc_interrupts(dev_priv);
} }
return ret; return ret;
...@@ -639,7 +637,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val) ...@@ -639,7 +637,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
void i915_guc_log_register(struct drm_i915_private *dev_priv) void i915_guc_log_register(struct drm_i915_private *dev_priv)
{ {
if (!i915.enable_guc_submission) if (!i915.enable_guc_submission || i915.guc_log_level < 0)
return; return;
mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&dev_priv->drm.struct_mutex);
...@@ -653,6 +651,8 @@ void i915_guc_log_unregister(struct drm_i915_private *dev_priv) ...@@ -653,6 +651,8 @@ void i915_guc_log_unregister(struct drm_i915_private *dev_priv)
return; return;
mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&dev_priv->drm.struct_mutex);
guc_log_cleanup(&dev_priv->guc); /* GuC logging is currently the only user of Guc2Host interrupts */
gen9_disable_guc_interrupts(dev_priv);
guc_log_destroy_extras(&dev_priv->guc);
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
} }
...@@ -95,24 +95,41 @@ void intel_uc_init_fw(struct drm_i915_private *dev_priv) ...@@ -95,24 +95,41 @@ void intel_uc_init_fw(struct drm_i915_private *dev_priv)
intel_uc_prepare_fw(dev_priv, &dev_priv->guc.fw); intel_uc_prepare_fw(dev_priv, &dev_priv->guc.fw);
} }
void intel_uc_fini_fw(struct drm_i915_private *dev_priv)
{
struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
struct drm_i915_gem_object *obj;
obj = fetch_and_zero(&guc_fw->obj);
if (obj)
i915_gem_object_put(obj);
guc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
obj = fetch_and_zero(&huc_fw->obj);
if (obj)
i915_gem_object_put(obj);
huc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
}
int intel_uc_init_hw(struct drm_i915_private *dev_priv) int intel_uc_init_hw(struct drm_i915_private *dev_priv)
{ {
int ret, attempts; int ret, attempts;
/* GuC not enabled, nothing to do */
if (!i915.enable_guc_loading)
return 0;
gen9_reset_guc_interrupts(dev_priv); gen9_reset_guc_interrupts(dev_priv);
/* We need to notify the guc whenever we change the GGTT */ /* We need to notify the guc whenever we change the GGTT */
i915_ggtt_enable_guc(dev_priv); i915_ggtt_enable_guc(dev_priv);
if (i915.enable_guc_submission) { /*
ret = i915_guc_submission_init(dev_priv); * This is stuff we need to have available at fw load time
if (ret) * if we are planning to enable submission later
goto err; */
} ret = i915_guc_submission_init(dev_priv);
if (ret)
goto err_guc;
/* WaEnableuKernelHeaderValidFix:skl */ /* WaEnableuKernelHeaderValidFix:skl */
/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */ /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
...@@ -150,7 +167,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) ...@@ -150,7 +167,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
ret = i915_guc_submission_enable(dev_priv); ret = i915_guc_submission_enable(dev_priv);
if (ret) if (ret)
goto err_submission; goto err_interrupts;
} }
return 0; return 0;
...@@ -164,11 +181,11 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) ...@@ -164,11 +181,11 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
* nonfatal error (i.e. it doesn't prevent driver load, but * nonfatal error (i.e. it doesn't prevent driver load, but
* marks the GPU as wedged until reset). * marks the GPU as wedged until reset).
*/ */
err_interrupts:
gen9_disable_guc_interrupts(dev_priv);
err_submission: err_submission:
if (i915.enable_guc_submission) i915_guc_submission_fini(dev_priv);
i915_guc_submission_fini(dev_priv); err_guc:
err:
i915_ggtt_disable_guc(dev_priv); i915_ggtt_disable_guc(dev_priv);
DRM_ERROR("GuC init failed\n"); DRM_ERROR("GuC init failed\n");
...@@ -185,6 +202,16 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv) ...@@ -185,6 +202,16 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
return ret; return ret;
} }
void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
{
if (i915.enable_guc_submission) {
i915_guc_submission_disable(dev_priv);
gen9_disable_guc_interrupts(dev_priv);
}
i915_guc_submission_fini(dev_priv);
i915_ggtt_disable_guc(dev_priv);
}
/* /*
* Read GuC command/status register (SOFT_SCRATCH_0) * Read GuC command/status register (SOFT_SCRATCH_0)
* Return true if it contains a response rather than a command * Return true if it contains a response rather than a command
......
...@@ -187,7 +187,9 @@ struct intel_huc { ...@@ -187,7 +187,9 @@ struct intel_huc {
void intel_uc_sanitize_options(struct drm_i915_private *dev_priv); void intel_uc_sanitize_options(struct drm_i915_private *dev_priv);
void intel_uc_init_early(struct drm_i915_private *dev_priv); void intel_uc_init_early(struct drm_i915_private *dev_priv);
void intel_uc_init_fw(struct drm_i915_private *dev_priv); void intel_uc_init_fw(struct drm_i915_private *dev_priv);
void intel_uc_fini_fw(struct drm_i915_private *dev_priv);
int intel_uc_init_hw(struct drm_i915_private *dev_priv); int intel_uc_init_hw(struct drm_i915_private *dev_priv);
void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
void intel_uc_prepare_fw(struct drm_i915_private *dev_priv, void intel_uc_prepare_fw(struct drm_i915_private *dev_priv,
struct intel_uc_fw *uc_fw); struct intel_uc_fw *uc_fw);
int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len); int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len);
...@@ -196,7 +198,6 @@ int intel_guc_sample_forcewake(struct intel_guc *guc); ...@@ -196,7 +198,6 @@ int intel_guc_sample_forcewake(struct intel_guc *guc);
/* intel_guc_loader.c */ /* intel_guc_loader.c */
int intel_guc_select_fw(struct intel_guc *guc); int intel_guc_select_fw(struct intel_guc *guc);
int intel_guc_init_hw(struct intel_guc *guc); int intel_guc_init_hw(struct intel_guc *guc);
void intel_guc_fini(struct drm_i915_private *dev_priv);
const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status); const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status);
int intel_guc_suspend(struct drm_i915_private *dev_priv); int intel_guc_suspend(struct drm_i915_private *dev_priv);
int intel_guc_resume(struct drm_i915_private *dev_priv); int intel_guc_resume(struct drm_i915_private *dev_priv);
...@@ -212,10 +213,11 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv); ...@@ -212,10 +213,11 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size); struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
/* intel_guc_log.c */ /* intel_guc_log.c */
void intel_guc_log_create(struct intel_guc *guc); int intel_guc_log_create(struct intel_guc *guc);
void intel_guc_log_destroy(struct intel_guc *guc);
int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
void i915_guc_log_register(struct drm_i915_private *dev_priv); void i915_guc_log_register(struct drm_i915_private *dev_priv);
void i915_guc_log_unregister(struct drm_i915_private *dev_priv); void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
static inline u32 guc_ggtt_offset(struct i915_vma *vma) static inline u32 guc_ggtt_offset(struct i915_vma *vma)
{ {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册