提交 08b022a9 编写于 作者: L Linus Torvalds

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Xmas fixes pull:

  core:
      one atomic fix, revert the WARN_ON dumb buffers patch.

  agp:
      fixup Dave J.

  nouveau:
      fix 3.18 regression for old userspace

  tegra fixes:
      vblank and iommu fixes

  amdkfd:
      fix bugs shown by testing with userspace, init apertures once

  msm:
      hdmi fixes and cleanup

  i915:
      misc fixes

  There is also a link ordering fix that I've asked to be cc'ed to you,
  putting iommu before gpu, it fixes an issue with amdkfd when things
  are all in the kernel, but I didn't like sending it via my tree
  without discussion.

  I'll probably be a bit on/off for a few weeks with pulls now, due to
  holidays and LCA, so don't be surprised if stuff gets a bit backed up,
  and things end up a bit large due to lag"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (28 commits)
  Revert "drm/gem: Warn on illegal use of the dumb buffer interface v2"
  agp: Fix up email address & attributions in AGP MODULE_AUTHOR tags
  nouveau: bring back legacy mmap handler
  drm/msm/hdmi: rework HDMI IRQ handler
  drm/msm/hdmi: enable regulators before clocks to avoid warnings
  drm/msm/mdp5: update irqs on crtc<->encoder link change
  drm/msm: block incoming update on pending updates
  drm/atomic: fix potential null ptr on plane enable
  drm/msm: Deletion of unnecessary checks before the function call "release_firmware"
  drm/msm: Deletion of unnecessary checks before two function calls
  drm/tegra: dc: Select root window for event dispatch
  drm/tegra: gem: Use the proper size for GEM objects
  drm/tegra: gem: Flush buffer objects upon allocation
  drm/tegra: dc: Fix a potential race on page-flip completion
  drm/tegra: dc: Consistently use the same pipe
  drm/irq: Add drm_crtc_vblank_count()
  drm/irq: Add drm_crtc_handle_vblank()
  drm/irq: Add drm_crtc_send_vblank_event()
  drm/i915: Disable PSMI sleep messages on all rings around context switches
  drm/i915: Force the CS stall for invalidate flushes
  ...
...@@ -417,6 +417,6 @@ static void __exit agp_ali_cleanup(void) ...@@ -417,6 +417,6 @@ static void __exit agp_ali_cleanup(void)
module_init(agp_ali_init); module_init(agp_ali_init);
module_exit(agp_ali_cleanup); module_exit(agp_ali_cleanup);
MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); MODULE_AUTHOR("Dave Jones");
MODULE_LICENSE("GPL and additional rights"); MODULE_LICENSE("GPL and additional rights");
...@@ -813,6 +813,6 @@ static void __exit agp_amd64_cleanup(void) ...@@ -813,6 +813,6 @@ static void __exit agp_amd64_cleanup(void)
module_init(agp_amd64_mod_init); module_init(agp_amd64_mod_init);
module_exit(agp_amd64_cleanup); module_exit(agp_amd64_cleanup);
MODULE_AUTHOR("Dave Jones <davej@redhat.com>, Andi Kleen"); MODULE_AUTHOR("Dave Jones, Andi Kleen");
module_param(agp_try_unsupported, bool, 0); module_param(agp_try_unsupported, bool, 0);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -579,6 +579,6 @@ static void __exit agp_ati_cleanup(void) ...@@ -579,6 +579,6 @@ static void __exit agp_ati_cleanup(void)
module_init(agp_ati_init); module_init(agp_ati_init);
module_exit(agp_ati_cleanup); module_exit(agp_ati_cleanup);
MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); MODULE_AUTHOR("Dave Jones");
MODULE_LICENSE("GPL and additional rights"); MODULE_LICENSE("GPL and additional rights");
...@@ -356,7 +356,7 @@ static __init int agp_setup(char *s) ...@@ -356,7 +356,7 @@ static __init int agp_setup(char *s)
__setup("agp=", agp_setup); __setup("agp=", agp_setup);
#endif #endif
MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); MODULE_AUTHOR("Dave Jones, Jeff Hartmann");
MODULE_DESCRIPTION("AGP GART driver"); MODULE_DESCRIPTION("AGP GART driver");
MODULE_LICENSE("GPL and additional rights"); MODULE_LICENSE("GPL and additional rights");
MODULE_ALIAS_MISCDEV(AGPGART_MINOR); MODULE_ALIAS_MISCDEV(AGPGART_MINOR);
......
...@@ -920,5 +920,5 @@ static void __exit agp_intel_cleanup(void) ...@@ -920,5 +920,5 @@ static void __exit agp_intel_cleanup(void)
module_init(agp_intel_init); module_init(agp_intel_init);
module_exit(agp_intel_cleanup); module_exit(agp_intel_cleanup);
MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); MODULE_AUTHOR("Dave Jones, Various @Intel");
MODULE_LICENSE("GPL and additional rights"); MODULE_LICENSE("GPL and additional rights");
...@@ -1438,5 +1438,5 @@ void intel_gmch_remove(void) ...@@ -1438,5 +1438,5 @@ void intel_gmch_remove(void)
} }
EXPORT_SYMBOL(intel_gmch_remove); EXPORT_SYMBOL(intel_gmch_remove);
MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); MODULE_AUTHOR("Dave Jones, Various @Intel");
MODULE_LICENSE("GPL and additional rights"); MODULE_LICENSE("GPL and additional rights");
/* /*
* Nvidia AGPGART routines. * Nvidia AGPGART routines.
* Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up * Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up
* to work in 2.5 by Dave Jones <davej@redhat.com> * to work in 2.5 by Dave Jones.
*/ */
#include <linux/module.h> #include <linux/module.h>
......
...@@ -595,4 +595,4 @@ module_init(agp_via_init); ...@@ -595,4 +595,4 @@ module_init(agp_via_init);
module_exit(agp_via_cleanup); module_exit(agp_via_cleanup);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); MODULE_AUTHOR("Dave Jones");
...@@ -121,13 +121,9 @@ static int kfd_open(struct inode *inode, struct file *filep) ...@@ -121,13 +121,9 @@ static int kfd_open(struct inode *inode, struct file *filep)
if (IS_ERR(process)) if (IS_ERR(process))
return PTR_ERR(process); return PTR_ERR(process);
process->is_32bit_user_mode = is_32bit_user_mode;
dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n", dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
process->pasid, process->is_32bit_user_mode); process->pasid, process->is_32bit_user_mode);
kfd_init_apertures(process);
return 0; return 0;
} }
......
...@@ -299,13 +299,13 @@ int kfd_init_apertures(struct kfd_process *process) ...@@ -299,13 +299,13 @@ int kfd_init_apertures(struct kfd_process *process)
struct kfd_dev *dev; struct kfd_dev *dev;
struct kfd_process_device *pdd; struct kfd_process_device *pdd;
mutex_lock(&process->mutex);
/*Iterating over all devices*/ /*Iterating over all devices*/
while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL && while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL &&
id < NUM_OF_SUPPORTED_GPUS) { id < NUM_OF_SUPPORTED_GPUS) {
pdd = kfd_get_process_device_data(dev, process, 1); pdd = kfd_get_process_device_data(dev, process, 1);
if (!pdd)
return -1;
/* /*
* For 64 bit process aperture will be statically reserved in * For 64 bit process aperture will be statically reserved in
...@@ -348,8 +348,6 @@ int kfd_init_apertures(struct kfd_process *process) ...@@ -348,8 +348,6 @@ int kfd_init_apertures(struct kfd_process *process)
id++; id++;
} }
mutex_unlock(&process->mutex);
return 0; return 0;
} }
......
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/amd-iommu.h> #include <linux/amd-iommu.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/compat.h>
struct mm_struct; struct mm_struct;
#include "kfd_priv.h" #include "kfd_priv.h"
...@@ -285,8 +287,15 @@ static struct kfd_process *create_process(const struct task_struct *thread) ...@@ -285,8 +287,15 @@ static struct kfd_process *create_process(const struct task_struct *thread)
if (err != 0) if (err != 0)
goto err_process_pqm_init; goto err_process_pqm_init;
/* init process apertures*/
process->is_32bit_user_mode = is_compat_task();
if (kfd_init_apertures(process) != 0)
goto err_init_apretures;
return process; return process;
err_init_apretures:
pqm_uninit(&process->pqm);
err_process_pqm_init: err_process_pqm_init:
hash_del_rcu(&process->kfd_processes); hash_del_rcu(&process->kfd_processes);
synchronize_rcu(); synchronize_rcu();
......
...@@ -700,8 +700,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, ...@@ -700,8 +700,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.simd_per_cu); dev->node_props.simd_per_cu);
sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu", sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu",
dev->node_props.max_slots_scratch_cu); dev->node_props.max_slots_scratch_cu);
sysfs_show_32bit_prop(buffer, "engine_id",
dev->node_props.engine_id);
sysfs_show_32bit_prop(buffer, "vendor_id", sysfs_show_32bit_prop(buffer, "vendor_id",
dev->node_props.vendor_id); dev->node_props.vendor_id);
sysfs_show_32bit_prop(buffer, "device_id", sysfs_show_32bit_prop(buffer, "device_id",
...@@ -715,6 +713,12 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, ...@@ -715,6 +713,12 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->gpu->kgd)); dev->gpu->kgd));
sysfs_show_64bit_prop(buffer, "local_mem_size", sysfs_show_64bit_prop(buffer, "local_mem_size",
kfd2kgd->get_vmem_size(dev->gpu->kgd)); kfd2kgd->get_vmem_size(dev->gpu->kgd));
sysfs_show_32bit_prop(buffer, "fw_version",
kfd2kgd->get_fw_version(
dev->gpu->kgd,
KGD_ENGINE_MEC1));
} }
ret = sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute", ret = sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
......
...@@ -45,6 +45,17 @@ enum kgd_memory_pool { ...@@ -45,6 +45,17 @@ enum kgd_memory_pool {
KGD_POOL_FRAMEBUFFER = 3, KGD_POOL_FRAMEBUFFER = 3,
}; };
enum kgd_engine_type {
KGD_ENGINE_PFP = 1,
KGD_ENGINE_ME,
KGD_ENGINE_CE,
KGD_ENGINE_MEC1,
KGD_ENGINE_MEC2,
KGD_ENGINE_RLC,
KGD_ENGINE_SDMA,
KGD_ENGINE_MAX
};
struct kgd2kfd_shared_resources { struct kgd2kfd_shared_resources {
/* Bit n == 1 means VMID n is available for KFD. */ /* Bit n == 1 means VMID n is available for KFD. */
unsigned int compute_vmid_bitmap; unsigned int compute_vmid_bitmap;
...@@ -137,6 +148,8 @@ struct kgd2kfd_calls { ...@@ -137,6 +148,8 @@ struct kgd2kfd_calls {
* *
* @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot. * @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
* *
* @get_fw_version: Returns FW versions from the header
*
* This structure contains function pointers to services that the kgd driver * This structure contains function pointers to services that the kgd driver
* provides to amdkfd driver. * provides to amdkfd driver.
* *
...@@ -176,6 +189,8 @@ struct kfd2kgd_calls { ...@@ -176,6 +189,8 @@ struct kfd2kgd_calls {
int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type, int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id, unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id); uint32_t queue_id);
uint16_t (*get_fw_version)(struct kgd_dev *kgd,
enum kgd_engine_type type);
}; };
bool kgd2kfd_init(unsigned interface_version, bool kgd2kfd_init(unsigned interface_version,
......
...@@ -61,7 +61,7 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state, ...@@ -61,7 +61,7 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
struct drm_crtc_state *crtc_state; struct drm_crtc_state *crtc_state;
if (plane->state->crtc) { if (plane->state->crtc) {
crtc_state = state->crtc_states[drm_crtc_index(plane->crtc)]; crtc_state = state->crtc_states[drm_crtc_index(plane->state->crtc)];
if (WARN_ON(!crtc_state)) if (WARN_ON(!crtc_state))
return; return;
......
...@@ -830,6 +830,8 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, ...@@ -830,6 +830,8 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
* vblank events since the system was booted, including lost events due to * vblank events since the system was booted, including lost events due to
* modesetting activity. * modesetting activity.
* *
* This is the legacy version of drm_crtc_vblank_count().
*
* Returns: * Returns:
* The software vblank counter. * The software vblank counter.
*/ */
...@@ -843,6 +845,25 @@ u32 drm_vblank_count(struct drm_device *dev, int crtc) ...@@ -843,6 +845,25 @@ u32 drm_vblank_count(struct drm_device *dev, int crtc)
} }
EXPORT_SYMBOL(drm_vblank_count); EXPORT_SYMBOL(drm_vblank_count);
/**
* drm_crtc_vblank_count - retrieve "cooked" vblank counter value
* @crtc: which counter to retrieve
*
* Fetches the "cooked" vblank count value that represents the number of
* vblank events since the system was booted, including lost events due to
* modesetting activity.
*
* This is the native KMS version of drm_vblank_count().
*
* Returns:
* The software vblank counter.
*/
u32 drm_crtc_vblank_count(struct drm_crtc *crtc)
{
return drm_vblank_count(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_vblank_count);
/** /**
* drm_vblank_count_and_time - retrieve "cooked" vblank counter value * drm_vblank_count_and_time - retrieve "cooked" vblank counter value
* and the system timestamp corresponding to that vblank counter value. * and the system timestamp corresponding to that vblank counter value.
...@@ -904,6 +925,8 @@ static void send_vblank_event(struct drm_device *dev, ...@@ -904,6 +925,8 @@ static void send_vblank_event(struct drm_device *dev,
* *
* Updates sequence # and timestamp on event, and sends it to userspace. * Updates sequence # and timestamp on event, and sends it to userspace.
* Caller must hold event lock. * Caller must hold event lock.
*
* This is the legacy version of drm_crtc_send_vblank_event().
*/ */
void drm_send_vblank_event(struct drm_device *dev, int crtc, void drm_send_vblank_event(struct drm_device *dev, int crtc,
struct drm_pending_vblank_event *e) struct drm_pending_vblank_event *e)
...@@ -922,6 +945,23 @@ void drm_send_vblank_event(struct drm_device *dev, int crtc, ...@@ -922,6 +945,23 @@ void drm_send_vblank_event(struct drm_device *dev, int crtc,
} }
EXPORT_SYMBOL(drm_send_vblank_event); EXPORT_SYMBOL(drm_send_vblank_event);
/**
* drm_crtc_send_vblank_event - helper to send vblank event after pageflip
* @crtc: the source CRTC of the vblank event
* @e: the event to send
*
* Updates sequence # and timestamp on event, and sends it to userspace.
* Caller must hold event lock.
*
* This is the native KMS version of drm_send_vblank_event().
*/
void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e)
{
drm_send_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
}
EXPORT_SYMBOL(drm_crtc_send_vblank_event);
/** /**
* drm_vblank_enable - enable the vblank interrupt on a CRTC * drm_vblank_enable - enable the vblank interrupt on a CRTC
* @dev: DRM device * @dev: DRM device
...@@ -1594,6 +1634,8 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc) ...@@ -1594,6 +1634,8 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
* *
* Drivers should call this routine in their vblank interrupt handlers to * Drivers should call this routine in their vblank interrupt handlers to
* update the vblank counter and send any signals that may be pending. * update the vblank counter and send any signals that may be pending.
*
* This is the legacy version of drm_crtc_handle_vblank().
*/ */
bool drm_handle_vblank(struct drm_device *dev, int crtc) bool drm_handle_vblank(struct drm_device *dev, int crtc)
{ {
...@@ -1670,3 +1712,21 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc) ...@@ -1670,3 +1712,21 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
return true; return true;
} }
EXPORT_SYMBOL(drm_handle_vblank); EXPORT_SYMBOL(drm_handle_vblank);
/**
* drm_crtc_handle_vblank - handle a vblank event
* @crtc: where this event occurred
*
* Drivers should call this routine in their vblank interrupt handlers to
* update the vblank counter and send any signals that may be pending.
*
* This is the native KMS version of drm_handle_vblank().
*
* Returns:
* True if the event was successfully handled, false on failure.
*/
bool drm_crtc_handle_vblank(struct drm_crtc *crtc)
{
return drm_handle_vblank(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_handle_vblank);
...@@ -811,6 +811,8 @@ int i915_reset(struct drm_device *dev) ...@@ -811,6 +811,8 @@ int i915_reset(struct drm_device *dev)
if (!i915.reset) if (!i915.reset)
return 0; return 0;
intel_reset_gt_powersave(dev);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
i915_gem_reset(dev); i915_gem_reset(dev);
...@@ -880,7 +882,7 @@ int i915_reset(struct drm_device *dev) ...@@ -880,7 +882,7 @@ int i915_reset(struct drm_device *dev)
* of re-init after reset. * of re-init after reset.
*/ */
if (INTEL_INFO(dev)->gen > 5) if (INTEL_INFO(dev)->gen > 5)
intel_reset_gt_powersave(dev); intel_enable_gt_powersave(dev);
} else { } else {
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} }
...@@ -1584,7 +1586,7 @@ static struct drm_driver driver = { ...@@ -1584,7 +1586,7 @@ static struct drm_driver driver = {
.gem_prime_import = i915_gem_prime_import, .gem_prime_import = i915_gem_prime_import,
.dumb_create = i915_gem_dumb_create, .dumb_create = i915_gem_dumb_create,
.dumb_map_offset = i915_gem_dumb_map_offset, .dumb_map_offset = i915_gem_mmap_gtt,
.dumb_destroy = drm_gem_dumb_destroy, .dumb_destroy = drm_gem_dumb_destroy,
.ioctls = i915_ioctls, .ioctls = i915_ioctls,
.fops = &i915_driver_fops, .fops = &i915_driver_fops,
......
...@@ -2501,9 +2501,8 @@ void i915_vma_move_to_active(struct i915_vma *vma, ...@@ -2501,9 +2501,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
int i915_gem_dumb_create(struct drm_file *file_priv, int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev, struct drm_device *dev,
struct drm_mode_create_dumb *args); struct drm_mode_create_dumb *args);
int i915_gem_dumb_map_offset(struct drm_file *file_priv, int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
struct drm_device *dev, uint32_t handle, uint32_t handle, uint64_t *offset);
uint64_t *offset);
/** /**
* Returns true if seq1 is later than seq2. * Returns true if seq1 is later than seq2.
*/ */
......
...@@ -401,7 +401,6 @@ static int ...@@ -401,7 +401,6 @@ static int
i915_gem_create(struct drm_file *file, i915_gem_create(struct drm_file *file,
struct drm_device *dev, struct drm_device *dev,
uint64_t size, uint64_t size,
bool dumb,
uint32_t *handle_p) uint32_t *handle_p)
{ {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
...@@ -417,7 +416,6 @@ i915_gem_create(struct drm_file *file, ...@@ -417,7 +416,6 @@ i915_gem_create(struct drm_file *file,
if (obj == NULL) if (obj == NULL)
return -ENOMEM; return -ENOMEM;
obj->base.dumb = dumb;
ret = drm_gem_handle_create(file, &obj->base, &handle); ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */ /* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(&obj->base); drm_gem_object_unreference_unlocked(&obj->base);
...@@ -437,7 +435,7 @@ i915_gem_dumb_create(struct drm_file *file, ...@@ -437,7 +435,7 @@ i915_gem_dumb_create(struct drm_file *file,
args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
args->size = args->pitch * args->height; args->size = args->pitch * args->height;
return i915_gem_create(file, dev, return i915_gem_create(file, dev,
args->size, true, &args->handle); args->size, &args->handle);
} }
/** /**
...@@ -450,7 +448,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -450,7 +448,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_create *args = data; struct drm_i915_gem_create *args = data;
return i915_gem_create(file, dev, return i915_gem_create(file, dev,
args->size, false, &args->handle); args->size, &args->handle);
} }
static inline int static inline int
...@@ -1840,10 +1838,10 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) ...@@ -1840,10 +1838,10 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
drm_gem_free_mmap_offset(&obj->base); drm_gem_free_mmap_offset(&obj->base);
} }
static int int
i915_gem_mmap_gtt(struct drm_file *file, i915_gem_mmap_gtt(struct drm_file *file,
struct drm_device *dev, struct drm_device *dev,
uint32_t handle, bool dumb, uint32_t handle,
uint64_t *offset) uint64_t *offset)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
...@@ -1860,13 +1858,6 @@ i915_gem_mmap_gtt(struct drm_file *file, ...@@ -1860,13 +1858,6 @@ i915_gem_mmap_gtt(struct drm_file *file,
goto unlock; goto unlock;
} }
/*
* We don't allow dumb mmaps on objects created using another
* interface.
*/
WARN_ONCE(dumb && !(obj->base.dumb || obj->base.import_attach),
"Illegal dumb map of accelerated buffer.\n");
if (obj->base.size > dev_priv->gtt.mappable_end) { if (obj->base.size > dev_priv->gtt.mappable_end) {
ret = -E2BIG; ret = -E2BIG;
goto out; goto out;
...@@ -1891,15 +1882,6 @@ i915_gem_mmap_gtt(struct drm_file *file, ...@@ -1891,15 +1882,6 @@ i915_gem_mmap_gtt(struct drm_file *file,
return ret; return ret;
} }
int
i915_gem_dumb_map_offset(struct drm_file *file,
struct drm_device *dev,
uint32_t handle,
uint64_t *offset)
{
return i915_gem_mmap_gtt(file, dev, handle, true, offset);
}
/** /**
* i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
* @dev: DRM device * @dev: DRM device
...@@ -1921,7 +1903,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, ...@@ -1921,7 +1903,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
{ {
struct drm_i915_gem_mmap_gtt *args = data; struct drm_i915_gem_mmap_gtt *args = data;
return i915_gem_mmap_gtt(file, dev, args->handle, false, &args->offset); return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
} }
static inline int static inline int
......
...@@ -473,7 +473,12 @@ mi_set_context(struct intel_engine_cs *ring, ...@@ -473,7 +473,12 @@ mi_set_context(struct intel_engine_cs *ring,
u32 hw_flags) u32 hw_flags)
{ {
u32 flags = hw_flags | MI_MM_SPACE_GTT; u32 flags = hw_flags | MI_MM_SPACE_GTT;
int ret; const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
i915_semaphore_is_enabled(ring->dev) ?
hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
0;
int len, i, ret;
/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
* invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
...@@ -490,15 +495,31 @@ mi_set_context(struct intel_engine_cs *ring, ...@@ -490,15 +495,31 @@ mi_set_context(struct intel_engine_cs *ring,
if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8) if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
ret = intel_ring_begin(ring, 6);
len = 4;
if (INTEL_INFO(ring->dev)->gen >= 7)
len += 2 + (num_rings ? 4*num_rings + 2 : 0);
ret = intel_ring_begin(ring, len);
if (ret) if (ret)
return ret; return ret;
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
if (INTEL_INFO(ring->dev)->gen >= 7) if (INTEL_INFO(ring->dev)->gen >= 7) {
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE); intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
else if (num_rings) {
intel_ring_emit(ring, MI_NOOP); struct intel_engine_cs *signaller;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
for_each_ring(signaller, to_i915(ring->dev), i) {
if (signaller == ring)
continue;
intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
}
}
}
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT); intel_ring_emit(ring, MI_SET_CONTEXT);
...@@ -510,10 +531,21 @@ mi_set_context(struct intel_engine_cs *ring, ...@@ -510,10 +531,21 @@ mi_set_context(struct intel_engine_cs *ring,
*/ */
intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_NOOP);
if (INTEL_INFO(ring->dev)->gen >= 7) if (INTEL_INFO(ring->dev)->gen >= 7) {
if (num_rings) {
struct intel_engine_cs *signaller;
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
for_each_ring(signaller, to_i915(ring->dev), i) {
if (signaller == ring)
continue;
intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
}
}
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
else }
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring); intel_ring_advance(ring);
......
...@@ -121,9 +121,6 @@ eb_lookup_vmas(struct eb_vmas *eb, ...@@ -121,9 +121,6 @@ eb_lookup_vmas(struct eb_vmas *eb,
goto err; goto err;
} }
WARN_ONCE(obj->base.dumb,
"GPU use of dumb buffer is illegal.\n");
drm_gem_object_reference(&obj->base); drm_gem_object_reference(&obj->base);
list_add_tail(&obj->obj_exec_link, &objects); list_add_tail(&obj->obj_exec_link, &objects);
} }
......
...@@ -281,10 +281,14 @@ void gen6_enable_rps_interrupts(struct drm_device *dev) ...@@ -281,10 +281,14 @@ void gen6_enable_rps_interrupts(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
spin_lock_irq(&dev_priv->irq_lock); spin_lock_irq(&dev_priv->irq_lock);
WARN_ON(dev_priv->rps.pm_iir); WARN_ON(dev_priv->rps.pm_iir);
WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
dev_priv->rps.interrupts_enabled = true; dev_priv->rps.interrupts_enabled = true;
I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
dev_priv->pm_rps_events);
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock); spin_unlock_irq(&dev_priv->irq_lock);
} }
...@@ -3307,8 +3311,10 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev) ...@@ -3307,8 +3311,10 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
if (INTEL_INFO(dev)->gen >= 6) { if (INTEL_INFO(dev)->gen >= 6) {
pm_irqs |= dev_priv->pm_rps_events; /*
* RPS interrupts will get enabled/disabled on demand when RPS
* itself is enabled/disabled.
*/
if (HAS_VEBOX(dev)) if (HAS_VEBOX(dev))
pm_irqs |= PM_VEBOX_USER_INTERRUPT; pm_irqs |= PM_VEBOX_USER_INTERRUPT;
...@@ -3520,7 +3526,11 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) ...@@ -3520,7 +3526,11 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
dev_priv->pm_irq_mask = 0xffffffff; dev_priv->pm_irq_mask = 0xffffffff;
GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events); /*
* RPS interrupts will get enabled/disabled on demand when RPS itself
* is enabled/disabled.
*/
GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
} }
...@@ -3609,7 +3619,7 @@ static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv) ...@@ -3609,7 +3619,7 @@ static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
vlv_display_irq_reset(dev_priv); vlv_display_irq_reset(dev_priv);
dev_priv->irq_mask = 0; dev_priv->irq_mask = ~0;
} }
static void valleyview_irq_uninstall(struct drm_device *dev) static void valleyview_irq_uninstall(struct drm_device *dev)
......
...@@ -395,6 +395,7 @@ ...@@ -395,6 +395,7 @@
#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21) #define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
#define PIPE_CONTROL_CS_STALL (1<<20) #define PIPE_CONTROL_CS_STALL (1<<20)
#define PIPE_CONTROL_TLB_INVALIDATE (1<<18) #define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16)
#define PIPE_CONTROL_QW_WRITE (1<<14) #define PIPE_CONTROL_QW_WRITE (1<<14)
#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14) #define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
#define PIPE_CONTROL_DEPTH_STALL (1<<13) #define PIPE_CONTROL_DEPTH_STALL (1<<13)
...@@ -1128,6 +1129,7 @@ enum punit_power_well { ...@@ -1128,6 +1129,7 @@ enum punit_power_well {
#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE)) #define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE))
#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE)) #define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE))
#define GEN6_NOSYNC 0 #define GEN6_NOSYNC 0
#define RING_PSMI_CTL(base) ((base)+0x50)
#define RING_MAX_IDLE(base) ((base)+0x54) #define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80) #define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080) #define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
...@@ -1458,6 +1460,7 @@ enum punit_power_well { ...@@ -1458,6 +1460,7 @@ enum punit_power_well {
#define GEN6_BLITTER_FBC_NOTIFY (1<<3) #define GEN6_BLITTER_FBC_NOTIFY (1<<3)
#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050 #define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050
#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0)
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12) #define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10) #define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
......
...@@ -6191,6 +6191,20 @@ void intel_cleanup_gt_powersave(struct drm_device *dev) ...@@ -6191,6 +6191,20 @@ void intel_cleanup_gt_powersave(struct drm_device *dev)
valleyview_cleanup_gt_powersave(dev); valleyview_cleanup_gt_powersave(dev);
} }
static void gen6_suspend_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
flush_delayed_work(&dev_priv->rps.delayed_resume_work);
/*
* TODO: disable RPS interrupts on GEN9+ too once RPS support
* is added for it.
*/
if (INTEL_INFO(dev)->gen < 9)
gen6_disable_rps_interrupts(dev);
}
/** /**
* intel_suspend_gt_powersave - suspend PM work and helper threads * intel_suspend_gt_powersave - suspend PM work and helper threads
* @dev: drm device * @dev: drm device
...@@ -6206,14 +6220,7 @@ void intel_suspend_gt_powersave(struct drm_device *dev) ...@@ -6206,14 +6220,7 @@ void intel_suspend_gt_powersave(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 6) if (INTEL_INFO(dev)->gen < 6)
return; return;
flush_delayed_work(&dev_priv->rps.delayed_resume_work); gen6_suspend_rps(dev);
/*
* TODO: disable RPS interrupts on GEN9+ too once RPS support
* is added for it.
*/
if (INTEL_INFO(dev)->gen < 9)
gen6_disable_rps_interrupts(dev);
/* Force GPU to min freq during suspend */ /* Force GPU to min freq during suspend */
gen6_rps_idle(dev_priv); gen6_rps_idle(dev_priv);
...@@ -6316,8 +6323,11 @@ void intel_reset_gt_powersave(struct drm_device *dev) ...@@ -6316,8 +6323,11 @@ void intel_reset_gt_powersave(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
if (INTEL_INFO(dev)->gen < 6)
return;
gen6_suspend_rps(dev);
dev_priv->rps.enabled = false; dev_priv->rps.enabled = false;
intel_enable_gt_powersave(dev);
} }
static void ibx_init_clock_gating(struct drm_device *dev) static void ibx_init_clock_gating(struct drm_device *dev)
......
...@@ -362,12 +362,15 @@ gen7_render_ring_flush(struct intel_engine_cs *ring, ...@@ -362,12 +362,15 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
/* /*
* TLB invalidate requires a post-sync write. * TLB invalidate requires a post-sync write.
*/ */
flags |= PIPE_CONTROL_QW_WRITE; flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
/* Workaround: we must issue a pipe_control with CS-stall bit /* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache * set before a pipe_control command that has the state cache
* invalidate bit set. */ * invalidate bit set. */
......
...@@ -386,9 +386,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu) ...@@ -386,9 +386,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu)
msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id); msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
drm_gem_object_unreference(gpu->memptrs_bo); drm_gem_object_unreference(gpu->memptrs_bo);
} }
if (gpu->pm4) release_firmware(gpu->pm4);
release_firmware(gpu->pm4); release_firmware(gpu->pfp);
if (gpu->pfp)
release_firmware(gpu->pfp);
msm_gpu_cleanup(&gpu->base); msm_gpu_cleanup(&gpu->base);
} }
...@@ -141,6 +141,15 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector) ...@@ -141,6 +141,15 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
uint32_t hpd_ctrl; uint32_t hpd_ctrl;
int i, ret; int i, ret;
for (i = 0; i < config->hpd_reg_cnt; i++) {
ret = regulator_enable(hdmi->hpd_regs[i]);
if (ret) {
dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
config->hpd_reg_names[i], ret);
goto fail;
}
}
ret = gpio_config(hdmi, true); ret = gpio_config(hdmi, true);
if (ret) { if (ret) {
dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret); dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret);
...@@ -164,15 +173,6 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector) ...@@ -164,15 +173,6 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
} }
} }
for (i = 0; i < config->hpd_reg_cnt; i++) {
ret = regulator_enable(hdmi->hpd_regs[i]);
if (ret) {
dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
config->hpd_reg_names[i], ret);
goto fail;
}
}
hdmi_set_mode(hdmi, false); hdmi_set_mode(hdmi, false);
phy->funcs->reset(phy); phy->funcs->reset(phy);
hdmi_set_mode(hdmi, true); hdmi_set_mode(hdmi, true);
...@@ -200,7 +200,7 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector) ...@@ -200,7 +200,7 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
return ret; return ret;
} }
static int hdp_disable(struct hdmi_connector *hdmi_connector) static void hdp_disable(struct hdmi_connector *hdmi_connector)
{ {
struct hdmi *hdmi = hdmi_connector->hdmi; struct hdmi *hdmi = hdmi_connector->hdmi;
const struct hdmi_platform_config *config = hdmi->config; const struct hdmi_platform_config *config = hdmi->config;
...@@ -212,28 +212,19 @@ static int hdp_disable(struct hdmi_connector *hdmi_connector) ...@@ -212,28 +212,19 @@ static int hdp_disable(struct hdmi_connector *hdmi_connector)
hdmi_set_mode(hdmi, false); hdmi_set_mode(hdmi, false);
for (i = 0; i < config->hpd_reg_cnt; i++) {
ret = regulator_disable(hdmi->hpd_regs[i]);
if (ret) {
dev_err(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
config->hpd_reg_names[i], ret);
goto fail;
}
}
for (i = 0; i < config->hpd_clk_cnt; i++) for (i = 0; i < config->hpd_clk_cnt; i++)
clk_disable_unprepare(hdmi->hpd_clks[i]); clk_disable_unprepare(hdmi->hpd_clks[i]);
ret = gpio_config(hdmi, false); ret = gpio_config(hdmi, false);
if (ret) { if (ret)
dev_err(dev->dev, "failed to unconfigure GPIOs: %d\n", ret); dev_warn(dev->dev, "failed to unconfigure GPIOs: %d\n", ret);
goto fail;
}
return 0;
fail: for (i = 0; i < config->hpd_reg_cnt; i++) {
return ret; ret = regulator_disable(hdmi->hpd_regs[i]);
if (ret)
dev_warn(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
config->hpd_reg_names[i], ret);
}
} }
static void static void
...@@ -260,11 +251,11 @@ void hdmi_connector_irq(struct drm_connector *connector) ...@@ -260,11 +251,11 @@ void hdmi_connector_irq(struct drm_connector *connector)
(hpd_int_status & HDMI_HPD_INT_STATUS_INT)) { (hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED); bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED);
DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl); /* ack & disable (temporarily) HPD events: */
/* ack the irq: */
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK); HDMI_HPD_INT_CTRL_INT_ACK);
DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
/* detect disconnect if we are connected or visa versa: */ /* detect disconnect if we are connected or visa versa: */
hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN; hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
......
...@@ -331,17 +331,8 @@ static int mdp4_crtc_atomic_check(struct drm_crtc *crtc, ...@@ -331,17 +331,8 @@ static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state) struct drm_crtc_state *state)
{ {
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_device *dev = crtc->dev;
DBG("%s: check", mdp4_crtc->name); DBG("%s: check", mdp4_crtc->name);
if (mdp4_crtc->event) {
dev_err(dev->dev, "already pending flip!\n");
return -EBUSY;
}
// TODO anything else to check? // TODO anything else to check?
return 0; return 0;
} }
...@@ -357,7 +348,7 @@ static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc) ...@@ -357,7 +348,7 @@ static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
unsigned long flags; unsigned long flags;
DBG("%s: flush", mdp4_crtc->name); DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event);
WARN_ON(mdp4_crtc->event); WARN_ON(mdp4_crtc->event);
......
...@@ -303,11 +303,6 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, ...@@ -303,11 +303,6 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
DBG("%s: check", mdp5_crtc->name); DBG("%s: check", mdp5_crtc->name);
if (mdp5_crtc->event) {
dev_err(dev->dev, "already pending flip!\n");
return -EBUSY;
}
/* request a free CTL, if none is already allocated for this CRTC */ /* request a free CTL, if none is already allocated for this CRTC */
if (state->enable && !mdp5_crtc->ctl) { if (state->enable && !mdp5_crtc->ctl) {
mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc); mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
...@@ -364,7 +359,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc) ...@@ -364,7 +359,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
unsigned long flags; unsigned long flags;
DBG("%s: flush", mdp5_crtc->name); DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event);
WARN_ON(mdp5_crtc->event); WARN_ON(mdp5_crtc->event);
...@@ -460,10 +455,7 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf, ...@@ -460,10 +455,7 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
/* now that we know what irq's we want: */ /* now that we know what irq's we want: */
mdp5_crtc->err.irqmask = intf2err(intf); mdp5_crtc->err.irqmask = intf2err(intf);
mdp5_crtc->vblank.irqmask = intf2vblank(intf); mdp5_crtc->vblank.irqmask = intf2vblank(intf);
mdp_irq_update(&mdp5_kms->base);
/* when called from modeset_init(), skip the rest until later: */
if (!mdp5_kms)
return;
spin_lock_irqsave(&mdp5_kms->resource_lock, flags); spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL); intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
......
...@@ -216,17 +216,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms) ...@@ -216,17 +216,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
goto fail; goto fail;
} }
/* NOTE: the vsync and error irq's are actually associated with encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;;
* the INTF/encoder.. the easiest way to deal with this (ie. what
* we do now) is assume a fixed relationship between crtc's and
* encoders. I'm not sure if there is ever a need to more freely
* assign crtcs to encoders, but if there is then we need to take
* care of error and vblank irq's that the crtc has registered,
* and also update user-requested vblank_mask.
*/
encoder->possible_crtcs = BIT(0);
mdp5_crtc_set_intf(priv->crtcs[0], 3, INTF_HDMI);
priv->encoders[priv->num_encoders++] = encoder; priv->encoders[priv->num_encoders++] = encoder;
/* Construct bridge/connector for HDMI: */ /* Construct bridge/connector for HDMI: */
......
...@@ -42,7 +42,10 @@ static void update_irq(struct mdp_kms *mdp_kms) ...@@ -42,7 +42,10 @@ static void update_irq(struct mdp_kms *mdp_kms)
mdp_kms->funcs->set_irqmask(mdp_kms, irqmask); mdp_kms->funcs->set_irqmask(mdp_kms, irqmask);
} }
static void update_irq_unlocked(struct mdp_kms *mdp_kms) /* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder
* link changes, this must be called to figure out the new global irqmask
*/
void mdp_irq_update(struct mdp_kms *mdp_kms)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&list_lock, flags); spin_lock_irqsave(&list_lock, flags);
...@@ -122,7 +125,7 @@ void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq) ...@@ -122,7 +125,7 @@ void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
spin_unlock_irqrestore(&list_lock, flags); spin_unlock_irqrestore(&list_lock, flags);
if (needs_update) if (needs_update)
update_irq_unlocked(mdp_kms); mdp_irq_update(mdp_kms);
} }
void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq) void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
...@@ -141,5 +144,5 @@ void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq) ...@@ -141,5 +144,5 @@ void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
spin_unlock_irqrestore(&list_lock, flags); spin_unlock_irqrestore(&list_lock, flags);
if (needs_update) if (needs_update)
update_irq_unlocked(mdp_kms); mdp_irq_update(mdp_kms);
} }
...@@ -75,7 +75,7 @@ void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable) ...@@ -75,7 +75,7 @@ void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable)
void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask); void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask);
void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq); void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq); void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
void mdp_irq_update(struct mdp_kms *mdp_kms);
/* /*
* pixel format helpers: * pixel format helpers:
......
...@@ -23,10 +23,41 @@ struct msm_commit { ...@@ -23,10 +23,41 @@ struct msm_commit {
struct drm_atomic_state *state; struct drm_atomic_state *state;
uint32_t fence; uint32_t fence;
struct msm_fence_cb fence_cb; struct msm_fence_cb fence_cb;
uint32_t crtc_mask;
}; };
static void fence_cb(struct msm_fence_cb *cb); static void fence_cb(struct msm_fence_cb *cb);
/* block until specified crtcs are no longer pending update, and
* atomically mark them as pending update
*/
static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
{
int ret;
spin_lock(&priv->pending_crtcs_event.lock);
ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
!(priv->pending_crtcs & crtc_mask));
if (ret == 0) {
DBG("start: %08x", crtc_mask);
priv->pending_crtcs |= crtc_mask;
}
spin_unlock(&priv->pending_crtcs_event.lock);
return ret;
}
/* clear specified crtcs (no longer pending update)
*/
static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
{
spin_lock(&priv->pending_crtcs_event.lock);
DBG("end: %08x", crtc_mask);
priv->pending_crtcs &= ~crtc_mask;
wake_up_all_locked(&priv->pending_crtcs_event);
spin_unlock(&priv->pending_crtcs_event.lock);
}
static struct msm_commit *new_commit(struct drm_atomic_state *state) static struct msm_commit *new_commit(struct drm_atomic_state *state)
{ {
struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL); struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
...@@ -58,12 +89,27 @@ static void complete_commit(struct msm_commit *c) ...@@ -58,12 +89,27 @@ static void complete_commit(struct msm_commit *c)
drm_atomic_helper_commit_post_planes(dev, state); drm_atomic_helper_commit_post_planes(dev, state);
/* NOTE: _wait_for_vblanks() only waits for vblank on
* enabled CRTCs. So we end up faulting when disabling
* due to (potentially) unref'ing the outgoing fb's
* before the vblank when the disable has latched.
*
* But if it did wait on disabled (or newly disabled)
* CRTCs, that would be racy (ie. we could have missed
* the irq. We need some way to poll for pipe shut
* down. Or just live with occasionally hitting the
* timeout in the CRTC disable path (which really should
* not be critical path)
*/
drm_atomic_helper_wait_for_vblanks(dev, state); drm_atomic_helper_wait_for_vblanks(dev, state);
drm_atomic_helper_cleanup_planes(dev, state); drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_state_free(state); drm_atomic_state_free(state);
end_atomic(dev->dev_private, c->crtc_mask);
kfree(c); kfree(c);
} }
...@@ -97,8 +143,9 @@ static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb) ...@@ -97,8 +143,9 @@ static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
int msm_atomic_commit(struct drm_device *dev, int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool async) struct drm_atomic_state *state, bool async)
{ {
struct msm_commit *c;
int nplanes = dev->mode_config.num_total_plane; int nplanes = dev->mode_config.num_total_plane;
int ncrtcs = dev->mode_config.num_crtc;
struct msm_commit *c;
int i, ret; int i, ret;
ret = drm_atomic_helper_prepare_planes(dev, state); ret = drm_atomic_helper_prepare_planes(dev, state);
...@@ -106,6 +153,18 @@ int msm_atomic_commit(struct drm_device *dev, ...@@ -106,6 +153,18 @@ int msm_atomic_commit(struct drm_device *dev,
return ret; return ret;
c = new_commit(state); c = new_commit(state);
if (!c)
return -ENOMEM;
/*
* Figure out what crtcs we have:
*/
for (i = 0; i < ncrtcs; i++) {
struct drm_crtc *crtc = state->crtcs[i];
if (!crtc)
continue;
c->crtc_mask |= (1 << drm_crtc_index(crtc));
}
/* /*
* Figure out what fence to wait for: * Figure out what fence to wait for:
...@@ -121,6 +180,14 @@ int msm_atomic_commit(struct drm_device *dev, ...@@ -121,6 +180,14 @@ int msm_atomic_commit(struct drm_device *dev,
add_fb(c, new_state->fb); add_fb(c, new_state->fb);
} }
/*
* Wait for pending updates on any of the same crtc's and then
* mark our set of crtc's as busy:
*/
ret = start_atomic(dev->dev_private, c->crtc_mask);
if (ret)
return ret;
/* /*
* This is the point of no return - everything below never fails except * This is the point of no return - everything below never fails except
* when the hw goes bonghits. Which means we can commit the new state on * when the hw goes bonghits. Which means we can commit the new state on
......
...@@ -193,6 +193,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags) ...@@ -193,6 +193,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
priv->wq = alloc_ordered_workqueue("msm", 0); priv->wq = alloc_ordered_workqueue("msm", 0);
init_waitqueue_head(&priv->fence_event); init_waitqueue_head(&priv->fence_event);
init_waitqueue_head(&priv->pending_crtcs_event);
INIT_LIST_HEAD(&priv->inactive_list); INIT_LIST_HEAD(&priv->inactive_list);
INIT_LIST_HEAD(&priv->fence_cbs); INIT_LIST_HEAD(&priv->fence_cbs);
......
...@@ -96,6 +96,10 @@ struct msm_drm_private { ...@@ -96,6 +96,10 @@ struct msm_drm_private {
/* callbacks deferred until bo is inactive: */ /* callbacks deferred until bo is inactive: */
struct list_head fence_cbs; struct list_head fence_cbs;
/* crtcs pending async atomic updates: */
uint32_t pending_crtcs;
wait_queue_head_t pending_crtcs_event;
/* registered MMUs: */ /* registered MMUs: */
unsigned int num_mmus; unsigned int num_mmus;
struct msm_mmu *mmus[NUM_DOMAINS]; struct msm_mmu *mmus[NUM_DOMAINS];
......
...@@ -190,8 +190,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, ...@@ -190,8 +190,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
fail: fail:
if (ret) { if (ret) {
if (fbi) framebuffer_release(fbi);
framebuffer_release(fbi);
if (fb) { if (fb) {
drm_framebuffer_unregister_private(fb); drm_framebuffer_unregister_private(fb);
drm_framebuffer_remove(fb); drm_framebuffer_remove(fb);
......
...@@ -535,8 +535,7 @@ void msm_gem_free_object(struct drm_gem_object *obj) ...@@ -535,8 +535,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
drm_free_large(msm_obj->pages); drm_free_large(msm_obj->pages);
} else { } else {
if (msm_obj->vaddr) vunmap(msm_obj->vaddr);
vunmap(msm_obj->vaddr);
put_pages(obj); put_pages(obj);
} }
......
...@@ -876,7 +876,6 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev, ...@@ -876,7 +876,6 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
if (ret) if (ret)
return ret; return ret;
bo->gem.dumb = true;
ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle); ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
drm_gem_object_unreference_unlocked(&bo->gem); drm_gem_object_unreference_unlocked(&bo->gem);
return ret; return ret;
...@@ -892,14 +891,6 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv, ...@@ -892,14 +891,6 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
gem = drm_gem_object_lookup(dev, file_priv, handle); gem = drm_gem_object_lookup(dev, file_priv, handle);
if (gem) { if (gem) {
struct nouveau_bo *bo = nouveau_gem_object(gem); struct nouveau_bo *bo = nouveau_gem_object(gem);
/*
* We don't allow dumb mmaps on objects created using another
* interface.
*/
WARN_ONCE(!(gem->dumb || gem->import_attach),
"Illegal dumb map of accelerated buffer.\n");
*poffset = drm_vma_node_offset_addr(&bo->bo.vma_node); *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
drm_gem_object_unreference_unlocked(gem); drm_gem_object_unreference_unlocked(gem);
return 0; return 0;
......
...@@ -444,9 +444,6 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli, ...@@ -444,9 +444,6 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
list_for_each_entry(nvbo, list, entry) { list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
WARN_ONCE(nvbo->gem.dumb,
"GPU use of dumb buffer is illegal.\n");
ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains, ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
b->write_domains, b->write_domains,
b->valid_domains); b->valid_domains);
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include "nouveau_ttm.h" #include "nouveau_ttm.h"
#include "nouveau_gem.h" #include "nouveau_gem.h"
#include "drm_legacy.h"
static int static int
nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{ {
...@@ -281,7 +282,7 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) ...@@ -281,7 +282,7 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev); struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
return -EINVAL; return drm_legacy_mmap(filp, vma);
return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
} }
......
...@@ -394,10 +394,9 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, ...@@ -394,10 +394,9 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
return r; return r;
} }
static int radeon_mode_mmap(struct drm_file *filp, int radeon_mode_dumb_mmap(struct drm_file *filp,
struct drm_device *dev, struct drm_device *dev,
uint32_t handle, bool dumb, uint32_t handle, uint64_t *offset_p)
uint64_t *offset_p)
{ {
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
struct radeon_bo *robj; struct radeon_bo *robj;
...@@ -406,14 +405,6 @@ static int radeon_mode_mmap(struct drm_file *filp, ...@@ -406,14 +405,6 @@ static int radeon_mode_mmap(struct drm_file *filp,
if (gobj == NULL) { if (gobj == NULL) {
return -ENOENT; return -ENOENT;
} }
/*
* We don't allow dumb mmaps on objects created using another
* interface.
*/
WARN_ONCE(dumb && !(gobj->dumb || gobj->import_attach),
"Illegal dumb map of GPU buffer.\n");
robj = gem_to_radeon_bo(gobj); robj = gem_to_radeon_bo(gobj);
if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
drm_gem_object_unreference_unlocked(gobj); drm_gem_object_unreference_unlocked(gobj);
...@@ -424,20 +415,12 @@ static int radeon_mode_mmap(struct drm_file *filp, ...@@ -424,20 +415,12 @@ static int radeon_mode_mmap(struct drm_file *filp,
return 0; return 0;
} }
int radeon_mode_dumb_mmap(struct drm_file *filp,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p)
{
return radeon_mode_mmap(filp, dev, handle, true, offset_p);
}
int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp) struct drm_file *filp)
{ {
struct drm_radeon_gem_mmap *args = data; struct drm_radeon_gem_mmap *args = data;
return radeon_mode_mmap(filp, dev, args->handle, false, return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
&args->addr_ptr);
} }
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
...@@ -763,7 +746,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv, ...@@ -763,7 +746,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
return -ENOMEM; return -ENOMEM;
r = drm_gem_handle_create(file_priv, gobj, &handle); r = drm_gem_handle_create(file_priv, gobj, &handle);
gobj->dumb = true;
/* drop reference from allocate - handle holds it now */ /* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(gobj); drm_gem_object_unreference_unlocked(gobj);
if (r) { if (r) {
......
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
#include "cikd.h" #include "cikd.h"
#include "cik_reg.h" #include "cik_reg.h"
#include "radeon_kfd.h" #include "radeon_kfd.h"
#include "radeon_ucode.h"
#include <linux/firmware.h>
#define CIK_PIPE_PER_MEC (4) #define CIK_PIPE_PER_MEC (4)
...@@ -49,6 +51,7 @@ static uint64_t get_vmem_size(struct kgd_dev *kgd); ...@@ -49,6 +51,7 @@ static uint64_t get_vmem_size(struct kgd_dev *kgd);
static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd); static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd); static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
/* /*
* Register access functions * Register access functions
...@@ -91,6 +94,7 @@ static const struct kfd2kgd_calls kfd2kgd = { ...@@ -91,6 +94,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
.hqd_load = kgd_hqd_load, .hqd_load = kgd_hqd_load,
.hqd_is_occupies = kgd_hqd_is_occupies, .hqd_is_occupies = kgd_hqd_is_occupies,
.hqd_destroy = kgd_hqd_destroy, .hqd_destroy = kgd_hqd_destroy,
.get_fw_version = get_fw_version
}; };
static const struct kgd2kfd_calls *kgd2kfd; static const struct kgd2kfd_calls *kgd2kfd;
...@@ -561,3 +565,52 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, ...@@ -561,3 +565,52 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
release_queue(kgd); release_queue(kgd);
return 0; return 0;
} }
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
{
struct radeon_device *rdev = (struct radeon_device *) kgd;
const union radeon_firmware_header *hdr;
BUG_ON(kgd == NULL || rdev->mec_fw == NULL);
switch (type) {
case KGD_ENGINE_PFP:
hdr = (const union radeon_firmware_header *) rdev->pfp_fw->data;
break;
case KGD_ENGINE_ME:
hdr = (const union radeon_firmware_header *) rdev->me_fw->data;
break;
case KGD_ENGINE_CE:
hdr = (const union radeon_firmware_header *) rdev->ce_fw->data;
break;
case KGD_ENGINE_MEC1:
hdr = (const union radeon_firmware_header *) rdev->mec_fw->data;
break;
case KGD_ENGINE_MEC2:
hdr = (const union radeon_firmware_header *)
rdev->mec2_fw->data;
break;
case KGD_ENGINE_RLC:
hdr = (const union radeon_firmware_header *) rdev->rlc_fw->data;
break;
case KGD_ENGINE_SDMA:
hdr = (const union radeon_firmware_header *)
rdev->sdma_fw->data;
break;
default:
return 0;
}
if (hdr == NULL)
return 0;
/* Only 12 bit in use*/
return hdr->common.ucode_version;
}
...@@ -529,9 +529,6 @@ int radeon_bo_list_validate(struct radeon_device *rdev, ...@@ -529,9 +529,6 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
u32 current_domain = u32 current_domain =
radeon_mem_type_to_domain(bo->tbo.mem.mem_type); radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
WARN_ONCE(bo->gem_base.dumb,
"GPU use of dumb buffer is illegal.\n");
/* Check if this buffer will be moved and don't move it /* Check if this buffer will be moved and don't move it
* if we have moved too many buffers for this IB already. * if we have moved too many buffers for this IB already.
* *
......
...@@ -168,7 +168,7 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index, ...@@ -168,7 +168,7 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
const struct tegra_dc_window *window) const struct tegra_dc_window *window)
{ {
unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp; unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp;
unsigned long value; unsigned long value, flags;
bool yuv, planar; bool yuv, planar;
/* /*
...@@ -181,6 +181,8 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index, ...@@ -181,6 +181,8 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
else else
bpp = planar ? 1 : 2; bpp = planar ? 1 : 2;
spin_lock_irqsave(&dc->lock, flags);
value = WINDOW_A_SELECT << index; value = WINDOW_A_SELECT << index;
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER); tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
...@@ -273,6 +275,7 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index, ...@@ -273,6 +275,7 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
case TEGRA_BO_TILING_MODE_BLOCK: case TEGRA_BO_TILING_MODE_BLOCK:
DRM_ERROR("hardware doesn't support block linear mode\n"); DRM_ERROR("hardware doesn't support block linear mode\n");
spin_unlock_irqrestore(&dc->lock, flags);
return -EINVAL; return -EINVAL;
} }
...@@ -331,6 +334,8 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index, ...@@ -331,6 +334,8 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
tegra_dc_window_commit(dc, index); tegra_dc_window_commit(dc, index);
spin_unlock_irqrestore(&dc->lock, flags);
return 0; return 0;
} }
...@@ -338,11 +343,14 @@ static int tegra_window_plane_disable(struct drm_plane *plane) ...@@ -338,11 +343,14 @@ static int tegra_window_plane_disable(struct drm_plane *plane)
{ {
struct tegra_dc *dc = to_tegra_dc(plane->crtc); struct tegra_dc *dc = to_tegra_dc(plane->crtc);
struct tegra_plane *p = to_tegra_plane(plane); struct tegra_plane *p = to_tegra_plane(plane);
unsigned long flags;
u32 value; u32 value;
if (!plane->crtc) if (!plane->crtc)
return 0; return 0;
spin_lock_irqsave(&dc->lock, flags);
value = WINDOW_A_SELECT << p->index; value = WINDOW_A_SELECT << p->index;
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER); tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
...@@ -352,6 +360,8 @@ static int tegra_window_plane_disable(struct drm_plane *plane) ...@@ -352,6 +360,8 @@ static int tegra_window_plane_disable(struct drm_plane *plane)
tegra_dc_window_commit(dc, p->index); tegra_dc_window_commit(dc, p->index);
spin_unlock_irqrestore(&dc->lock, flags);
return 0; return 0;
} }
...@@ -699,14 +709,16 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y, ...@@ -699,14 +709,16 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
struct tegra_bo *bo = tegra_fb_get_plane(fb, 0); struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
unsigned int h_offset = 0, v_offset = 0; unsigned int h_offset = 0, v_offset = 0;
struct tegra_bo_tiling tiling; struct tegra_bo_tiling tiling;
unsigned long value, flags;
unsigned int format, swap; unsigned int format, swap;
unsigned long value;
int err; int err;
err = tegra_fb_get_tiling(fb, &tiling); err = tegra_fb_get_tiling(fb, &tiling);
if (err < 0) if (err < 0)
return err; return err;
spin_lock_irqsave(&dc->lock, flags);
tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER); tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
value = fb->offsets[0] + y * fb->pitches[0] + value = fb->offsets[0] + y * fb->pitches[0] +
...@@ -752,6 +764,7 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y, ...@@ -752,6 +764,7 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
case TEGRA_BO_TILING_MODE_BLOCK: case TEGRA_BO_TILING_MODE_BLOCK:
DRM_ERROR("hardware doesn't support block linear mode\n"); DRM_ERROR("hardware doesn't support block linear mode\n");
spin_unlock_irqrestore(&dc->lock, flags);
return -EINVAL; return -EINVAL;
} }
...@@ -778,6 +791,8 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y, ...@@ -778,6 +791,8 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL); tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL);
tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
spin_unlock_irqrestore(&dc->lock, flags);
return 0; return 0;
} }
...@@ -814,23 +829,32 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc) ...@@ -814,23 +829,32 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
unsigned long flags, base; unsigned long flags, base;
struct tegra_bo *bo; struct tegra_bo *bo;
if (!dc->event) spin_lock_irqsave(&drm->event_lock, flags);
if (!dc->event) {
spin_unlock_irqrestore(&drm->event_lock, flags);
return; return;
}
bo = tegra_fb_get_plane(crtc->primary->fb, 0); bo = tegra_fb_get_plane(crtc->primary->fb, 0);
spin_lock_irqsave(&dc->lock, flags);
/* check if new start address has been latched */ /* check if new start address has been latched */
tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS); tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR); base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR);
tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS); tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
spin_unlock_irqrestore(&dc->lock, flags);
if (base == bo->paddr + crtc->primary->fb->offsets[0]) { if (base == bo->paddr + crtc->primary->fb->offsets[0]) {
spin_lock_irqsave(&drm->event_lock, flags); drm_crtc_send_vblank_event(crtc, dc->event);
drm_send_vblank_event(drm, dc->pipe, dc->event); drm_crtc_vblank_put(crtc);
drm_vblank_put(drm, dc->pipe);
dc->event = NULL; dc->event = NULL;
spin_unlock_irqrestore(&drm->event_lock, flags);
} }
spin_unlock_irqrestore(&drm->event_lock, flags);
} }
void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file) void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
...@@ -843,7 +867,7 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file) ...@@ -843,7 +867,7 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
if (dc->event && dc->event->base.file_priv == file) { if (dc->event && dc->event->base.file_priv == file) {
dc->event->base.destroy(&dc->event->base); dc->event->base.destroy(&dc->event->base);
drm_vblank_put(drm, dc->pipe); drm_crtc_vblank_put(crtc);
dc->event = NULL; dc->event = NULL;
} }
...@@ -853,16 +877,16 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file) ...@@ -853,16 +877,16 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event, uint32_t page_flip_flags) struct drm_pending_vblank_event *event, uint32_t page_flip_flags)
{ {
unsigned int pipe = drm_crtc_index(crtc);
struct tegra_dc *dc = to_tegra_dc(crtc); struct tegra_dc *dc = to_tegra_dc(crtc);
struct drm_device *drm = crtc->dev;
if (dc->event) if (dc->event)
return -EBUSY; return -EBUSY;
if (event) { if (event) {
event->pipe = dc->pipe; event->pipe = pipe;
dc->event = event; dc->event = event;
drm_vblank_get(drm, dc->pipe); drm_crtc_vblank_get(crtc);
} }
tegra_dc_set_base(dc, 0, 0, fb); tegra_dc_set_base(dc, 0, 0, fb);
...@@ -1127,7 +1151,7 @@ static irqreturn_t tegra_dc_irq(int irq, void *data) ...@@ -1127,7 +1151,7 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
/* /*
dev_dbg(dc->dev, "%s(): vertical blank\n", __func__); dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
*/ */
drm_handle_vblank(dc->base.dev, dc->pipe); drm_crtc_handle_vblank(&dc->base);
tegra_dc_finish_page_flip(dc); tegra_dc_finish_page_flip(dc);
} }
......
...@@ -694,24 +694,28 @@ static const struct file_operations tegra_drm_fops = { ...@@ -694,24 +694,28 @@ static const struct file_operations tegra_drm_fops = {
.llseek = noop_llseek, .llseek = noop_llseek,
}; };
static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe) static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm,
unsigned int pipe)
{ {
struct drm_crtc *crtc; struct drm_crtc *crtc;
list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) { list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
struct tegra_dc *dc = to_tegra_dc(crtc); if (pipe == drm_crtc_index(crtc))
if (dc->pipe == pipe)
return crtc; return crtc;
} }
return NULL; return NULL;
} }
static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc) static u32 tegra_drm_get_vblank_counter(struct drm_device *drm, int pipe)
{ {
struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
if (!crtc)
return 0;
/* TODO: implement real hardware counter using syncpoints */ /* TODO: implement real hardware counter using syncpoints */
return drm_vblank_count(dev, crtc); return drm_crtc_vblank_count(crtc);
} }
static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe) static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
......
...@@ -216,32 +216,58 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) ...@@ -216,32 +216,58 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
} }
} }
static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo, static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
size_t size)
{ {
struct scatterlist *s;
struct sg_table *sgt;
unsigned int i;
bo->pages = drm_gem_get_pages(&bo->gem); bo->pages = drm_gem_get_pages(&bo->gem);
if (IS_ERR(bo->pages)) if (IS_ERR(bo->pages))
return PTR_ERR(bo->pages); return PTR_ERR(bo->pages);
bo->num_pages = size >> PAGE_SHIFT; bo->num_pages = bo->gem.size >> PAGE_SHIFT;
bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
if (IS_ERR(bo->sgt)) { if (IS_ERR(sgt))
drm_gem_put_pages(&bo->gem, bo->pages, false, false); goto put_pages;
return PTR_ERR(bo->sgt);
/*
* Fake up the SG table so that dma_map_sg() can be used to flush the
* pages associated with it. Note that this relies on the fact that
* the DMA API doesn't hook into IOMMU on Tegra, therefore mapping is
* only cache maintenance.
*
* TODO: Replace this by drm_clflash_sg() once it can be implemented
* without relying on symbols that are not exported.
*/
for_each_sg(sgt->sgl, s, sgt->nents, i)
sg_dma_address(s) = sg_phys(s);
if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0) {
sgt = ERR_PTR(-ENOMEM);
goto release_sgt;
} }
bo->sgt = sgt;
return 0; return 0;
release_sgt:
sg_free_table(sgt);
kfree(sgt);
put_pages:
drm_gem_put_pages(&bo->gem, bo->pages, false, false);
return PTR_ERR(sgt);
} }
static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo, static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
size_t size)
{ {
struct tegra_drm *tegra = drm->dev_private; struct tegra_drm *tegra = drm->dev_private;
int err; int err;
if (tegra->domain) { if (tegra->domain) {
err = tegra_bo_get_pages(drm, bo, size); err = tegra_bo_get_pages(drm, bo);
if (err < 0) if (err < 0)
return err; return err;
...@@ -251,6 +277,8 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo, ...@@ -251,6 +277,8 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo,
return err; return err;
} }
} else { } else {
size_t size = bo->gem.size;
bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr, bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
GFP_KERNEL | __GFP_NOWARN); GFP_KERNEL | __GFP_NOWARN);
if (!bo->vaddr) { if (!bo->vaddr) {
...@@ -274,7 +302,7 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size, ...@@ -274,7 +302,7 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
if (IS_ERR(bo)) if (IS_ERR(bo))
return bo; return bo;
err = tegra_bo_alloc(drm, bo, size); err = tegra_bo_alloc(drm, bo);
if (err < 0) if (err < 0)
goto release; goto release;
......
...@@ -901,11 +901,15 @@ extern int drm_vblank_init(struct drm_device *dev, int num_crtcs); ...@@ -901,11 +901,15 @@ extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
extern int drm_wait_vblank(struct drm_device *dev, void *data, extern int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *filp); struct drm_file *filp);
extern u32 drm_vblank_count(struct drm_device *dev, int crtc); extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
struct timeval *vblanktime); struct timeval *vblanktime);
extern void drm_send_vblank_event(struct drm_device *dev, int crtc, extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
struct drm_pending_vblank_event *e); struct drm_pending_vblank_event *e);
extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e);
extern bool drm_handle_vblank(struct drm_device *dev, int crtc); extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
extern int drm_vblank_get(struct drm_device *dev, int crtc); extern int drm_vblank_get(struct drm_device *dev, int crtc);
extern void drm_vblank_put(struct drm_device *dev, int crtc); extern void drm_vblank_put(struct drm_device *dev, int crtc);
extern int drm_crtc_vblank_get(struct drm_crtc *crtc); extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
......
...@@ -119,13 +119,6 @@ struct drm_gem_object { ...@@ -119,13 +119,6 @@ struct drm_gem_object {
* simply leave it as NULL. * simply leave it as NULL.
*/ */
struct dma_buf_attachment *import_attach; struct dma_buf_attachment *import_attach;
/**
* dumb - created as dumb buffer
* Whether the gem object was created using the dumb buffer interface
* as such it may not be used for GPU rendering.
*/
bool dumb;
}; };
void drm_gem_object_release(struct drm_gem_object *obj); void drm_gem_object_release(struct drm_gem_object *obj);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册