提交 b4519513 编写于 作者: C Chris Wilson 提交者: Daniel Vetter

drm/i915: Introduce for_each_ring() macro

In many places we wish to iterate over the rings associated with the
GPU, so refactor them to use a common macro.

Along the way, there are a few code removals that should be side-effect
free and some rearrangement which should only have a cosmetic impact,
such as error-state.

Note that this slightly changes the semantics in the hangcheck code:
We now always cycle through all enabled rings instead of
short-circuiting the logic.

v2: Pull in a couple of suggestions from Ben and Daniel for
intel_ring_initialized() and not removing the warning (just moving them
to a new home, closer to the error).
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: NBen Widawsky <ben@bwidawsk.net>
[danvet: Added note to commit message about the small behaviour
change, suggested by Ben Widawsky.]
Signed-off-by: NDaniel Vetter <daniel.vetter@ffwll.ch>
上级 e7e164db
...@@ -699,6 +699,7 @@ static int i915_error_state(struct seq_file *m, void *unused) ...@@ -699,6 +699,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
struct drm_device *dev = error_priv->dev; struct drm_device *dev = error_priv->dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_state *error = error_priv->error; struct drm_i915_error_state *error = error_priv->error;
struct intel_ring_buffer *ring;
int i, j, page, offset, elt; int i, j, page, offset, elt;
if (!error) { if (!error) {
...@@ -706,7 +707,6 @@ static int i915_error_state(struct seq_file *m, void *unused) ...@@ -706,7 +707,6 @@ static int i915_error_state(struct seq_file *m, void *unused)
return 0; return 0;
} }
seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec); error->time.tv_usec);
seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
...@@ -722,11 +722,8 @@ static int i915_error_state(struct seq_file *m, void *unused) ...@@ -722,11 +722,8 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); seq_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
} }
i915_ring_error_state(m, dev, error, RCS); for_each_ring(ring, dev_priv, i)
if (HAS_BLT(dev)) i915_ring_error_state(m, dev, error, i);
i915_ring_error_state(m, dev, error, BCS);
if (HAS_BSD(dev))
i915_ring_error_state(m, dev, error, VCS);
if (error->active_bo) if (error->active_bo)
print_error_buffers(m, "Active", print_error_buffers(m, "Active",
......
...@@ -893,15 +893,15 @@ int i915_reset(struct drm_device *dev) ...@@ -893,15 +893,15 @@ int i915_reset(struct drm_device *dev)
*/ */
if (drm_core_check_feature(dev, DRIVER_MODESET) || if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->mm.suspended) { !dev_priv->mm.suspended) {
struct intel_ring_buffer *ring;
int i;
dev_priv->mm.suspended = 0; dev_priv->mm.suspended = 0;
i915_gem_init_swizzling(dev); i915_gem_init_swizzling(dev);
dev_priv->ring[RCS].init(&dev_priv->ring[RCS]); for_each_ring(ring, dev_priv, i)
if (HAS_BSD(dev)) ring->init(ring);
dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
if (HAS_BLT(dev))
dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
i915_gem_init_ppgtt(dev); i915_gem_init_ppgtt(dev);
......
...@@ -410,9 +410,7 @@ typedef struct drm_i915_private { ...@@ -410,9 +410,7 @@ typedef struct drm_i915_private {
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
struct timer_list hangcheck_timer; struct timer_list hangcheck_timer;
int hangcheck_count; int hangcheck_count;
uint32_t last_acthd; uint32_t last_acthd[I915_NUM_RINGS];
uint32_t last_acthd_bsd;
uint32_t last_acthd_blt;
uint32_t last_instdone; uint32_t last_instdone;
uint32_t last_instdone1; uint32_t last_instdone1;
...@@ -820,6 +818,11 @@ typedef struct drm_i915_private { ...@@ -820,6 +818,11 @@ typedef struct drm_i915_private {
struct drm_property *force_audio_property; struct drm_property *force_audio_property;
} drm_i915_private_t; } drm_i915_private_t;
/* Iterate over initialised rings */
#define for_each_ring(ring__, dev_priv__, i__) \
for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
enum hdmi_force_audio { enum hdmi_force_audio {
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
HDMI_AUDIO_OFF, /* force turn off HDMI audio */ HDMI_AUDIO_OFF, /* force turn off HDMI audio */
......
...@@ -1655,10 +1655,11 @@ void i915_gem_reset(struct drm_device *dev) ...@@ -1655,10 +1655,11 @@ void i915_gem_reset(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct intel_ring_buffer *ring;
int i; int i;
for (i = 0; i < I915_NUM_RINGS; i++) for_each_ring(ring, dev_priv, i)
i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]); i915_gem_reset_ring_lists(dev_priv, ring);
/* Remove anything from the flushing lists. The GPU cache is likely /* Remove anything from the flushing lists. The GPU cache is likely
* to be lost on reset along with the data, so simply move the * to be lost on reset along with the data, so simply move the
...@@ -1763,10 +1764,11 @@ void ...@@ -1763,10 +1764,11 @@ void
i915_gem_retire_requests(struct drm_device *dev) i915_gem_retire_requests(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int i; int i;
for (i = 0; i < I915_NUM_RINGS; i++) for_each_ring(ring, dev_priv, i)
i915_gem_retire_requests_ring(&dev_priv->ring[i]); i915_gem_retire_requests_ring(ring);
} }
static void static void
...@@ -1774,6 +1776,7 @@ i915_gem_retire_work_handler(struct work_struct *work) ...@@ -1774,6 +1776,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
{ {
drm_i915_private_t *dev_priv; drm_i915_private_t *dev_priv;
struct drm_device *dev; struct drm_device *dev;
struct intel_ring_buffer *ring;
bool idle; bool idle;
int i; int i;
...@@ -1793,9 +1796,7 @@ i915_gem_retire_work_handler(struct work_struct *work) ...@@ -1793,9 +1796,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
* objects indefinitely. * objects indefinitely.
*/ */
idle = true; idle = true;
for (i = 0; i < I915_NUM_RINGS; i++) { for_each_ring(ring, dev_priv, i) {
struct intel_ring_buffer *ring = &dev_priv->ring[i];
if (!list_empty(&ring->gpu_write_list)) { if (!list_empty(&ring->gpu_write_list)) {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
int ret; int ret;
...@@ -2137,13 +2138,18 @@ static int i915_ring_idle(struct intel_ring_buffer *ring) ...@@ -2137,13 +2138,18 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
int i915_gpu_idle(struct drm_device *dev) int i915_gpu_idle(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int ret, i; int ret, i;
/* Flush everything onto the inactive list. */ /* Flush everything onto the inactive list. */
for (i = 0; i < I915_NUM_RINGS; i++) { for_each_ring(ring, dev_priv, i) {
ret = i915_ring_idle(&dev_priv->ring[i]); ret = i915_ring_idle(ring);
if (ret) if (ret)
return ret; return ret;
/* Is the device fubar? */
if (WARN_ON(!list_empty(&ring->gpu_write_list)))
return -EBUSY;
} }
return 0; return 0;
...@@ -3463,9 +3469,7 @@ void i915_gem_init_ppgtt(struct drm_device *dev) ...@@ -3463,9 +3469,7 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
/* GFX_MODE is per-ring on gen7+ */ /* GFX_MODE is per-ring on gen7+ */
} }
for (i = 0; i < I915_NUM_RINGS; i++) { for_each_ring(ring, dev_priv, i) {
ring = &dev_priv->ring[i];
if (INTEL_INFO(dev)->gen >= 7) if (INTEL_INFO(dev)->gen >= 7)
I915_WRITE(RING_MODE_GEN7(ring), I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
...@@ -3581,10 +3585,11 @@ void ...@@ -3581,10 +3585,11 @@ void
i915_gem_cleanup_ringbuffer(struct drm_device *dev) i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int i; int i;
for (i = 0; i < I915_NUM_RINGS; i++) for_each_ring(ring, dev_priv, i)
intel_cleanup_ring_buffer(&dev_priv->ring[i]); intel_cleanup_ring_buffer(ring);
} }
int int
...@@ -3592,7 +3597,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, ...@@ -3592,7 +3597,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
int ret, i; int ret;
if (drm_core_check_feature(dev, DRIVER_MODESET)) if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0; return 0;
...@@ -3614,10 +3619,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, ...@@ -3614,10 +3619,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
BUG_ON(!list_empty(&dev_priv->mm.active_list)); BUG_ON(!list_empty(&dev_priv->mm.active_list));
BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
for (i = 0; i < I915_NUM_RINGS; i++) {
BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
}
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
ret = drm_irq_install(dev); ret = drm_irq_install(dev);
......
...@@ -168,7 +168,7 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) ...@@ -168,7 +168,7 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next; struct drm_i915_gem_object *obj, *next;
bool lists_empty; bool lists_empty;
int ret,i; int ret;
lists_empty = (list_empty(&dev_priv->mm.inactive_list) && lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) && list_empty(&dev_priv->mm.flushing_list) &&
...@@ -178,17 +178,13 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) ...@@ -178,17 +178,13 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
trace_i915_gem_evict_everything(dev, purgeable_only); trace_i915_gem_evict_everything(dev, purgeable_only);
ret = i915_gpu_idle(dev);
if (ret)
return ret;
/* The gpu_idle will flush everything in the write domain to the /* The gpu_idle will flush everything in the write domain to the
* active list. Then we must move everything off the active list * active list. Then we must move everything off the active list
* with retire requests. * with retire requests.
*/ */
for (i = 0; i < I915_NUM_RINGS; i++) ret = i915_gpu_idle(dev);
if (WARN_ON(!list_empty(&dev_priv->ring[i].gpu_write_list))) if (ret)
return -EBUSY; return ret;
i915_gem_retire_requests(dev); i915_gem_retire_requests(dev);
...@@ -203,5 +199,5 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) ...@@ -203,5 +199,5 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
} }
} }
return ret; return 0;
} }
...@@ -1022,15 +1022,11 @@ static void i915_gem_record_rings(struct drm_device *dev, ...@@ -1022,15 +1022,11 @@ static void i915_gem_record_rings(struct drm_device *dev,
struct drm_i915_error_state *error) struct drm_i915_error_state *error)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
int i, count; int i, count;
for (i = 0; i < I915_NUM_RINGS; i++) { for_each_ring(ring, dev_priv, i) {
struct intel_ring_buffer *ring = &dev_priv->ring[i];
if (ring->obj == NULL)
continue;
i915_record_ring_state(dev, error, ring); i915_record_ring_state(dev, error, ring);
error->ring[i].batchbuffer = error->ring[i].batchbuffer =
...@@ -1295,6 +1291,8 @@ static void i915_report_and_clear_eir(struct drm_device *dev) ...@@ -1295,6 +1291,8 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
void i915_handle_error(struct drm_device *dev, bool wedged) void i915_handle_error(struct drm_device *dev, bool wedged)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int i;
i915_capture_error_state(dev); i915_capture_error_state(dev);
i915_report_and_clear_eir(dev); i915_report_and_clear_eir(dev);
...@@ -1306,11 +1304,8 @@ void i915_handle_error(struct drm_device *dev, bool wedged) ...@@ -1306,11 +1304,8 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
/* /*
* Wakeup waiting processes so they don't hang * Wakeup waiting processes so they don't hang
*/ */
wake_up_all(&dev_priv->ring[RCS].irq_queue); for_each_ring(ring, dev_priv, i)
if (HAS_BSD(dev)) wake_up_all(&ring->irq_queue);
wake_up_all(&dev_priv->ring[VCS].irq_queue);
if (HAS_BLT(dev))
wake_up_all(&dev_priv->ring[BCS].irq_queue);
} }
queue_work(dev_priv->wq, &dev_priv->error_work); queue_work(dev_priv->wq, &dev_priv->error_work);
...@@ -1515,11 +1510,6 @@ ring_last_seqno(struct intel_ring_buffer *ring) ...@@ -1515,11 +1510,6 @@ ring_last_seqno(struct intel_ring_buffer *ring)
static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
{ {
/* We don't check whether the ring even exists before calling this
* function. Hence check whether it's initialized. */
if (ring->obj == NULL)
return true;
if (list_empty(&ring->request_list) || if (list_empty(&ring->request_list) ||
i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
/* Issue a wake-up to catch stuck h/w. */ /* Issue a wake-up to catch stuck h/w. */
...@@ -1553,26 +1543,25 @@ static bool i915_hangcheck_hung(struct drm_device *dev) ...@@ -1553,26 +1543,25 @@ static bool i915_hangcheck_hung(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
if (dev_priv->hangcheck_count++ > 1) { if (dev_priv->hangcheck_count++ > 1) {
bool hung = true;
DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
i915_handle_error(dev, true); i915_handle_error(dev, true);
if (!IS_GEN2(dev)) { if (!IS_GEN2(dev)) {
struct intel_ring_buffer *ring;
int i;
/* Is the chip hanging on a WAIT_FOR_EVENT? /* Is the chip hanging on a WAIT_FOR_EVENT?
* If so we can simply poke the RB_WAIT bit * If so we can simply poke the RB_WAIT bit
* and break the hang. This should work on * and break the hang. This should work on
* all but the second generation chipsets. * all but the second generation chipsets.
*/ */
if (kick_ring(&dev_priv->ring[RCS])) for_each_ring(ring, dev_priv, i)
return false; hung &= !kick_ring(ring);
if (HAS_BSD(dev) && kick_ring(&dev_priv->ring[VCS]))
return false;
if (HAS_BLT(dev) && kick_ring(&dev_priv->ring[BCS]))
return false;
} }
return true; return hung;
} }
return false; return false;
...@@ -1588,16 +1577,23 @@ void i915_hangcheck_elapsed(unsigned long data) ...@@ -1588,16 +1577,23 @@ void i915_hangcheck_elapsed(unsigned long data)
{ {
struct drm_device *dev = (struct drm_device *)data; struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt; uint32_t acthd[I915_NUM_RINGS], instdone, instdone1;
bool err = false; struct intel_ring_buffer *ring;
bool err = false, idle;
int i;
if (!i915_enable_hangcheck) if (!i915_enable_hangcheck)
return; return;
memset(acthd, 0, sizeof(acthd));
idle = true;
for_each_ring(ring, dev_priv, i) {
idle &= i915_hangcheck_ring_idle(ring, &err);
acthd[i] = intel_ring_get_active_head(ring);
}
/* If all work is done then ACTHD clearly hasn't advanced. */ /* If all work is done then ACTHD clearly hasn't advanced. */
if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) && if (idle) {
i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) {
if (err) { if (err) {
if (i915_hangcheck_hung(dev)) if (i915_hangcheck_hung(dev))
return; return;
...@@ -1616,15 +1612,8 @@ void i915_hangcheck_elapsed(unsigned long data) ...@@ -1616,15 +1612,8 @@ void i915_hangcheck_elapsed(unsigned long data)
instdone = I915_READ(INSTDONE_I965); instdone = I915_READ(INSTDONE_I965);
instdone1 = I915_READ(INSTDONE1); instdone1 = I915_READ(INSTDONE1);
} }
acthd = intel_ring_get_active_head(&dev_priv->ring[RCS]);
acthd_bsd = HAS_BSD(dev) ?
intel_ring_get_active_head(&dev_priv->ring[VCS]) : 0;
acthd_blt = HAS_BLT(dev) ?
intel_ring_get_active_head(&dev_priv->ring[BCS]) : 0;
if (dev_priv->last_acthd == acthd && if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
dev_priv->last_acthd_bsd == acthd_bsd &&
dev_priv->last_acthd_blt == acthd_blt &&
dev_priv->last_instdone == instdone && dev_priv->last_instdone == instdone &&
dev_priv->last_instdone1 == instdone1) { dev_priv->last_instdone1 == instdone1) {
if (i915_hangcheck_hung(dev)) if (i915_hangcheck_hung(dev))
...@@ -1632,9 +1621,7 @@ void i915_hangcheck_elapsed(unsigned long data) ...@@ -1632,9 +1621,7 @@ void i915_hangcheck_elapsed(unsigned long data)
} else { } else {
dev_priv->hangcheck_count = 0; dev_priv->hangcheck_count = 0;
dev_priv->last_acthd = acthd; memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
dev_priv->last_acthd_bsd = acthd_bsd;
dev_priv->last_acthd_blt = acthd_blt;
dev_priv->last_instdone = instdone; dev_priv->last_instdone = instdone;
dev_priv->last_instdone1 = instdone1; dev_priv->last_instdone1 = instdone1;
} }
......
...@@ -2326,6 +2326,7 @@ int intel_enable_rc6(const struct drm_device *dev) ...@@ -2326,6 +2326,7 @@ int intel_enable_rc6(const struct drm_device *dev)
void gen6_enable_rps(struct drm_i915_private *dev_priv) void gen6_enable_rps(struct drm_i915_private *dev_priv)
{ {
struct intel_ring_buffer *ring;
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 pcu_mbox, rc6_mask = 0; u32 pcu_mbox, rc6_mask = 0;
...@@ -2360,8 +2361,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) ...@@ -2360,8 +2361,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
for (i = 0; i < I915_NUM_RINGS; i++) for_each_ring(ring, dev_priv, i)
I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10); I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
I915_WRITE(GEN6_RC_SLEEP, 0); I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
......
...@@ -119,6 +119,12 @@ struct intel_ring_buffer { ...@@ -119,6 +119,12 @@ struct intel_ring_buffer {
void *private; void *private;
}; };
static inline bool
intel_ring_initialized(struct intel_ring_buffer *ring)
{
return ring->obj != NULL;
}
static inline unsigned static inline unsigned
intel_ring_flag(struct intel_ring_buffer *ring) intel_ring_flag(struct intel_ring_buffer *ring)
{ {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册