提交 244dc268 编写于 作者: L Linus Torvalds

Merge tag 'drm-fixes-2020-01-19' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Back from LCA2020, fixes wasn't too busy last week, seems to have
  quieten down appropriately, some amdgpu, i915, then a core mst fix and
  one fix for virtio-gpu and one for rockchip:

  core mst:
   - serialize down messages and clear timeslots are on unplug

  amdgpu:
   - Update golden settings for renoir
   - eDP fix

  i915:
   - uAPI fix: Remove dash and colon from PMU names to comply with
     tools/perf
   - Fix for include file that was indirectly included
   - Two fixes to make sure VMA are marked active for error capture

  virtio:
   - maintain obj reservation lock when submitting cmds

  rockchip:
   - increase link rate var size to accommodate rates"

* tag 'drm-fixes-2020-01-19' of git://anongit.freedesktop.org/drm/drm:
  drm/amd/display: Reorder detect_edp_sink_caps before link settings read.
  drm/amdgpu: update goldensetting for renoir
  drm/dp_mst: Have DP_Tx send one msg at a time
  drm/dp_mst: clear time slots for ports invalid
  drm/i915/pmu: Do not use colons or dashes in PMU names
  drm/rockchip: fix integer type used for storing dp data rate
  drm/i915/gt: Mark ring->vma as active while pinned
  drm/i915/gt: Mark context->state vma as active while pinned
  drm/i915/gt: Skip trying to unbind in restore_ggtt_mappings
  drm/i915: Add missing include file <linux/math64.h>
  drm/virtio: add missing virtio_gpu_array_lock_resv call
......@@ -254,7 +254,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x03fbe1fe)
};
static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
......
......@@ -817,8 +817,8 @@ static bool dc_link_detect_helper(struct dc_link *link,
}
case SIGNAL_TYPE_EDP: {
read_current_link_settings_on_detect(link);
detect_edp_sink_caps(link);
read_current_link_settings_on_detect(link);
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
sink_caps.signal = SIGNAL_TYPE_EDP;
break;
......
......@@ -1190,6 +1190,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
mstb->tx_slots[txmsg->seqno] = NULL;
}
mgr->is_waiting_for_dwn_reply = false;
}
out:
if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
......@@ -1199,6 +1201,7 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
}
mutex_unlock(&mgr->qlock);
drm_dp_mst_kick_tx(mgr);
return ret;
}
......@@ -2318,7 +2321,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
{
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port;
int old_ddps, ret;
int old_ddps, old_input, ret, i;
u8 new_pdt;
bool dowork = false, create_connector = false;
......@@ -2349,6 +2352,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
}
old_ddps = port->ddps;
old_input = port->input;
port->input = conn_stat->input_port;
port->mcs = conn_stat->message_capability_status;
port->ldps = conn_stat->legacy_device_plug_status;
......@@ -2373,6 +2377,28 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
dowork = false;
}
if (!old_input && old_ddps != port->ddps && !port->ddps) {
for (i = 0; i < mgr->max_payloads; i++) {
struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
struct drm_dp_mst_port *port_validated;
if (!vcpi)
continue;
port_validated =
container_of(vcpi, struct drm_dp_mst_port, vcpi);
port_validated =
drm_dp_mst_topology_get_port_validated(mgr, port_validated);
if (!port_validated) {
mutex_lock(&mgr->payload_lock);
vcpi->num_slots = 0;
mutex_unlock(&mgr->payload_lock);
} else {
drm_dp_mst_topology_put_port(port_validated);
}
}
}
if (port->connector)
drm_modeset_unlock(&mgr->base.lock);
else if (create_connector)
......@@ -2718,9 +2744,11 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
ret = process_single_tx_qlock(mgr, txmsg, false);
if (ret == 1) {
/* txmsg is sent it should be in the slots now */
mgr->is_waiting_for_dwn_reply = true;
list_del(&txmsg->next);
} else if (ret) {
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
mgr->is_waiting_for_dwn_reply = false;
list_del(&txmsg->next);
if (txmsg->seqno != -1)
txmsg->dst->tx_slots[txmsg->seqno] = NULL;
......@@ -2760,7 +2788,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
}
if (list_is_singular(&mgr->tx_msg_downq))
if (list_is_singular(&mgr->tx_msg_downq) &&
!mgr->is_waiting_for_dwn_reply)
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
......@@ -3678,6 +3707,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
mutex_lock(&mgr->qlock);
txmsg->state = DRM_DP_SIDEBAND_TX_RX;
mstb->tx_slots[slot] = NULL;
mgr->is_waiting_for_dwn_reply = false;
mutex_unlock(&mgr->qlock);
wake_up_all(&mgr->tx_waitq);
......@@ -3687,6 +3717,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
no_msg:
drm_dp_mst_topology_put_mstb(mstb);
clear_down_rep_recv:
mutex_lock(&mgr->qlock);
mgr->is_waiting_for_dwn_reply = false;
mutex_unlock(&mgr->qlock);
memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
return 0;
......@@ -4497,7 +4530,7 @@ static void drm_dp_tx_work(struct work_struct *work)
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
mutex_lock(&mgr->qlock);
if (!list_empty(&mgr->tx_msg_downq))
if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
......
......@@ -123,6 +123,10 @@ static int __context_pin_state(struct i915_vma *vma)
if (err)
return err;
err = i915_active_acquire(&vma->active);
if (err)
goto err_unpin;
/*
* And mark it as a globally pinned object to let the shrinker know
* it cannot reclaim the object until we release it.
......@@ -131,14 +135,44 @@ static int __context_pin_state(struct i915_vma *vma)
vma->obj->mm.dirty = true;
return 0;
err_unpin:
i915_vma_unpin(vma);
return err;
}
static void __context_unpin_state(struct i915_vma *vma)
{
i915_vma_make_shrinkable(vma);
i915_active_release(&vma->active);
__i915_vma_unpin(vma);
}
static int __ring_active(struct intel_ring *ring)
{
int err;
err = i915_active_acquire(&ring->vma->active);
if (err)
return err;
err = intel_ring_pin(ring);
if (err)
goto err_active;
return 0;
err_active:
i915_active_release(&ring->vma->active);
return err;
}
static void __ring_retire(struct intel_ring *ring)
{
intel_ring_unpin(ring);
i915_active_release(&ring->vma->active);
}
__i915_active_call
static void __intel_context_retire(struct i915_active *active)
{
......@@ -151,7 +185,7 @@ static void __intel_context_retire(struct i915_active *active)
__context_unpin_state(ce->state);
intel_timeline_unpin(ce->timeline);
intel_ring_unpin(ce->ring);
__ring_retire(ce->ring);
intel_context_put(ce);
}
......@@ -163,7 +197,7 @@ static int __intel_context_active(struct i915_active *active)
intel_context_get(ce);
err = intel_ring_pin(ce->ring);
err = __ring_active(ce->ring);
if (err)
goto err_put;
......@@ -183,7 +217,7 @@ static int __intel_context_active(struct i915_active *active)
err_timeline:
intel_timeline_unpin(ce->timeline);
err_ring:
intel_ring_unpin(ce->ring);
__ring_retire(ce->ring);
err_put:
intel_context_put(ce);
return err;
......
......@@ -3304,7 +3304,7 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
{
struct i915_vma *vma, *vn;
struct i915_vma *vma;
bool flush = false;
int open;
......@@ -3319,15 +3319,12 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
open = atomic_xchg(&ggtt->vm.open, 0);
/* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
continue;
if (!__i915_vma_unbind(vma))
continue;
clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
WARN_ON(i915_vma_bind(vma,
obj ? obj->cache_level : 0,
......
......@@ -1074,12 +1074,17 @@ void i915_pmu_register(struct drm_i915_private *i915)
hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pmu->timer.function = i915_sample;
if (!is_igp(i915))
if (!is_igp(i915)) {
pmu->name = kasprintf(GFP_KERNEL,
"i915-%s",
"i915_%s",
dev_name(i915->drm.dev));
else
if (pmu->name) {
/* tools/perf reserves colons as special. */
strreplace((char *)pmu->name, ':', '_');
}
} else {
pmu->name = "i915";
}
if (!pmu->name)
goto err;
......
......@@ -25,6 +25,7 @@
#ifndef __I915_SELFTESTS_RANDOM_H__
#define __I915_SELFTESTS_RANDOM_H__
#include <linux/math64.h>
#include <linux/random.h>
#include "../i915_selftest.h"
......
......@@ -95,7 +95,7 @@ struct cdn_dp_device {
struct cdn_dp_port *port[MAX_PHY];
u8 ports;
u8 max_lanes;
u8 max_rate;
unsigned int max_rate;
u8 lanes;
int active_port;
......
......@@ -232,6 +232,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
if (!objs)
return;
virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
virtio_gpu_array_lock_resv(objs);
virtio_gpu_cmd_transfer_to_host_2d
(vgdev, 0,
plane->state->crtc_w,
......
......@@ -605,6 +605,12 @@ struct drm_dp_mst_topology_mgr {
* &drm_dp_sideband_msg_tx.state once they are queued
*/
struct mutex qlock;
/**
* @is_waiting_for_dwn_reply: indicate whether is waiting for down reply
*/
bool is_waiting_for_dwn_reply;
/**
* @tx_msg_downq: List of pending down replies.
*/
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册