提交 c11b8989 编写于 作者: D Dave Airlie

Merge tag 'omapdrm-4.5-resolved' of...

Merge tag 'omapdrm-4.5-resolved' of git://git.kernel.org/pub/scm/linux/kernel/git/tomba/linux into drm-next

omapdrm changes for v4.5

* enable DRIVER_ATOMIC
* improved TILER performance
* cleanups preparing for DMAbuf import
* fbdev emulation is now optional
* minor fixes

* tag 'omapdrm-4.5-resolved' of git://git.kernel.org/pub/scm/linux/kernel/git/tomba/linux:
  drm/omap: remove obsolete manager assignment
  drm/omap: set DRIVER_ATOMIC for omapdrm
  drm/omap: remove unused plugin defines
  drm/omap: Use bitmaps for TILER placement
  drm: omapdrm: gem: Remove check for impossible condition
  drm: omapdrm: gem: Simplify error handling when creating GEM object
  drm: omapdrm: gem: Don't free mmap offset twice
  drm: omapdrm: gem: Fix GEM object destroy in error path
  drm: omapdrm: gem: Free the correct memory object
  drm: omapdrm: gem: Mask out private flags passed from userspace
  drm: omapdrm: gem: Move global usergart variable to omap_drm_private
  drm: omapdrm: gem: Group functions by purpose
  drm: omapdrm: gem: Remove forward declarations
  drm: omapdrm: gem: Remove unused function prototypes
  drm: omapdrm: Make fbdev emulation optional
  drm: omapdrm: Fix plane state free in plane reset handler
  drm: omapdrm: move omap_plane_reset()
  drm/omap: Use platform_register/unregister_drivers()
  drm: omapdrm: tiler: Remove unneded module alias for tiler
......@@ -12,10 +12,11 @@ omapdrm-y := omap_drv.o \
omap_encoder.o \
omap_connector.o \
omap_fb.o \
omap_fbdev.o \
omap_gem.o \
omap_gem_dmabuf.o \
omap_dmm_tiler.o \
tcm-sita.o
omapdrm-$(CONFIG_DRM_FBDEV_EMULATION) += omap_fbdev.o
obj-$(CONFIG_DRM_OMAP) += omapdrm.o
......@@ -51,6 +51,7 @@ static int mm_show(struct seq_file *m, void *arg)
return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
}
#ifdef CONFIG_DRM_FBDEV_EMULATION
static int fb_show(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
......@@ -73,12 +74,15 @@ static int fb_show(struct seq_file *m, void *arg)
return 0;
}
#endif
/* list of debufs files that are applicable to all devices */
static struct drm_info_list omap_debugfs_list[] = {
{"gem", gem_show, 0},
{"mm", mm_show, 0},
#ifdef CONFIG_DRM_FBDEV_EMULATION
{"fb", fb_show, 0},
#endif
};
/* list of debugfs files that are specific to devices with dmm/tiler */
......
......@@ -363,6 +363,7 @@ struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
u32 min_align = 128;
int ret;
unsigned long flags;
size_t slot_bytes;
BUG_ON(!validfmt(fmt));
......@@ -371,13 +372,15 @@ struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
h = DIV_ROUND_UP(h, geom[fmt].slot_h);
/* convert alignment to slots */
min_align = max(min_align, (geom[fmt].slot_w * geom[fmt].cpp));
align = ALIGN(align, min_align);
align /= geom[fmt].slot_w * geom[fmt].cpp;
slot_bytes = geom[fmt].slot_w * geom[fmt].cpp;
min_align = max(min_align, slot_bytes);
align = (align > min_align) ? ALIGN(align, min_align) : min_align;
align /= slot_bytes;
block->fmt = fmt;
ret = tcm_reserve_2d(containers[fmt], w, h, align, &block->area);
ret = tcm_reserve_2d(containers[fmt], w, h, align, -1, slot_bytes,
&block->area);
if (ret) {
kfree(block);
return ERR_PTR(-ENOMEM);
......@@ -739,8 +742,7 @@ static int omap_dmm_probe(struct platform_device *dev)
programming during reill operations */
for (i = 0; i < omap_dmm->num_lut; i++) {
omap_dmm->tcm[i] = sita_init(omap_dmm->container_width,
omap_dmm->container_height,
NULL);
omap_dmm->container_height);
if (!omap_dmm->tcm[i]) {
dev_err(&dev->dev, "failed to allocate container\n");
......@@ -1030,4 +1032,3 @@ struct platform_driver omap_dmm_driver = {
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>");
MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");
MODULE_ALIAS("platform:" DMM_DRIVER_NAME);
......@@ -547,14 +547,19 @@ static int ioctl_set_param(struct drm_device *dev, void *data,
return 0;
}
#define OMAP_BO_USER_MASK 0x00ffffff /* flags settable by userspace */
static int ioctl_gem_new(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_omap_gem_new *args = data;
u32 flags = args->flags & OMAP_BO_USER_MASK;
VERB("%p:%p: size=0x%08x, flags=%08x", dev, file_priv,
args->size.bytes, args->flags);
return omap_gem_new_handle(dev, file_priv, args->size,
args->flags, &args->handle);
args->size.bytes, flags);
return omap_gem_new_handle(dev, file_priv, args->size, flags,
&args->handle);
}
static int ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
......@@ -692,10 +697,6 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
drm_crtc_vblank_off(priv->crtcs[i]);
priv->fbdev = omap_fbdev_init(dev);
if (!priv->fbdev) {
dev_warn(dev->dev, "omap_fbdev_init failed\n");
/* well, limp along without an fbdev.. maybe X11 will work? */
}
/* store off drm_device for use in pm ops */
dev_set_drvdata(dev->dev, dev);
......@@ -831,7 +832,8 @@ static const struct file_operations omapdriver_fops = {
};
static struct drm_driver omap_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC,
.load = dev_load,
.unload = dev_unload,
.open = dev_open,
......@@ -928,35 +930,23 @@ static struct platform_driver pdev = {
.remove = pdev_remove,
};
static struct platform_driver * const drivers[] = {
&omap_dmm_driver,
&pdev,
};
static int __init omap_drm_init(void)
{
int r;
DBG("init");
r = platform_driver_register(&omap_dmm_driver);
if (r) {
pr_err("DMM driver registration failed\n");
return r;
}
r = platform_driver_register(&pdev);
if (r) {
pr_err("omapdrm driver registration failed\n");
platform_driver_unregister(&omap_dmm_driver);
return r;
}
return 0;
return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
}
static void __exit omap_drm_fini(void)
{
DBG("fini");
platform_driver_unregister(&pdev);
platform_driver_unregister(&omap_dmm_driver);
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
}
/* need late_initcall() so we load after dss_driver's are loaded */
......
......@@ -36,11 +36,7 @@
#define MODULE_NAME "omapdrm"
/* max # of mapper-id's that can be assigned.. todo, come up with a better
* (but still inexpensive) way to store/access per-buffer mapper private
* data..
*/
#define MAX_MAPPERS 2
struct omap_drm_usergart;
/* parameters which describe (unrotated) coordinates of scanout within a fb: */
struct omap_drm_window {
......@@ -97,6 +93,7 @@ struct omap_drm_private {
/* list of GEM objects: */
struct list_head obj_list;
struct omap_drm_usergart *usergart;
bool has_dmm;
/* properties: */
......@@ -138,8 +135,18 @@ void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
void omap_drm_irq_uninstall(struct drm_device *dev);
int omap_drm_irq_install(struct drm_device *dev);
#ifdef CONFIG_DRM_FBDEV_EMULATION
struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev);
void omap_fbdev_free(struct drm_device *dev);
#else
static inline struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
{
return NULL;
}
static inline void omap_fbdev_free(struct drm_device *dev)
{
}
#endif
struct omap_video_timings *omap_crtc_timings(struct drm_crtc *crtc);
enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
......
......@@ -110,8 +110,6 @@ static int omap_encoder_update(struct drm_encoder *encoder,
struct omap_dss_driver *dssdrv = dssdev->driver;
int ret;
dssdev->src->manager = omap_dss_get_overlay_manager(channel);
if (dssdrv->check_timings) {
ret = dssdrv->check_timings(dssdev, timings);
} else {
......
......@@ -295,6 +295,10 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
drm_fb_helper_fini(helper);
fail:
kfree(fbdev);
dev_warn(dev->dev, "omap_fbdev_init failed\n");
/* well, limp along without an fbdev.. maybe X11 will work? */
return NULL;
}
......
......@@ -25,24 +25,15 @@
#include "omap_drv.h"
#include "omap_dmm_tiler.h"
/* remove these once drm core helpers are merged */
struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed);
int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
/*
* GEM buffer object implementation.
*/
#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
/* note: we use upper 8 bits of flags for driver-internal flags: */
#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
#define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
#define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
struct omap_gem_object {
struct drm_gem_object base;
......@@ -119,8 +110,7 @@ struct omap_gem_object {
} *sync;
};
static int get_pages(struct drm_gem_object *obj, struct page ***pages);
static uint64_t mmap_offset(struct drm_gem_object *obj);
#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
* not necessarily pinned in TILER all the time, and (b) when they are
......@@ -134,27 +124,69 @@ static uint64_t mmap_offset(struct drm_gem_object *obj);
* for later..
*/
#define NUM_USERGART_ENTRIES 2
struct usergart_entry {
struct omap_drm_usergart_entry {
struct tiler_block *block; /* the reserved tiler block */
dma_addr_t paddr;
struct drm_gem_object *obj; /* the current pinned obj */
pgoff_t obj_pgoff; /* page offset of obj currently
mapped in */
};
static struct {
struct usergart_entry entry[NUM_USERGART_ENTRIES];
struct omap_drm_usergart {
struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
int height; /* height in rows */
int height_shift; /* ilog2(height in rows) */
int slot_shift; /* ilog2(width per slot) */
int stride_pfn; /* stride in pages */
int last; /* index of last used entry */
} *usergart;
};
/* -----------------------------------------------------------------------------
* Helpers
*/
/** get mmap offset */
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
int ret;
size_t size;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
/* Make it mmapable */
size = omap_gem_mmap_size(obj);
ret = drm_gem_create_mmap_offset_size(obj, size);
if (ret) {
dev_err(dev->dev, "could not allocate mmap offset\n");
return 0;
}
return drm_vma_node_offset_addr(&obj->vma_node);
}
/* GEM objects can either be allocated from contiguous memory (in which
* case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
* contiguous buffers can be remapped in TILER/DMM if they need to be
* contiguous... but we don't do this all the time to reduce pressure
* on TILER/DMM space when we know at allocation time that the buffer
* will need to be scanned out.
*/
static inline bool is_shmem(struct drm_gem_object *obj)
{
return obj->filp != NULL;
}
/* -----------------------------------------------------------------------------
* Eviction
*/
static void evict_entry(struct drm_gem_object *obj,
enum tiler_fmt fmt, struct usergart_entry *entry)
enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int n = usergart[fmt].height;
struct omap_drm_private *priv = obj->dev->dev_private;
int n = priv->usergart[fmt].height;
size_t size = PAGE_SIZE * n;
loff_t off = mmap_offset(obj) +
(entry->obj_pgoff << PAGE_SHIFT);
......@@ -180,46 +212,25 @@ static void evict_entry(struct drm_gem_object *obj,
static void evict(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct omap_drm_private *priv = obj->dev->dev_private;
if (omap_obj->flags & OMAP_BO_TILED) {
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
int i;
if (!usergart)
return;
for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
struct usergart_entry *entry = &usergart[fmt].entry[i];
struct omap_drm_usergart_entry *entry =
&priv->usergart[fmt].entry[i];
if (entry->obj == obj)
evict_entry(obj, fmt, entry);
}
}
}
/* GEM objects can either be allocated from contiguous memory (in which
* case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
* contiguous buffers can be remapped in TILER/DMM if they need to be
* contiguous... but we don't do this all the time to reduce pressure
* on TILER/DMM space when we know at allocation time that the buffer
* will need to be scanned out.
*/
static inline bool is_shmem(struct drm_gem_object *obj)
{
return obj->filp != NULL;
}
/**
* shmem buffers that are mapped cached can simulate coherency via using
* page faulting to keep track of dirty pages
/* -----------------------------------------------------------------------------
* Page Management
*/
static inline bool is_cached_coherent(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
return is_shmem(obj) &&
((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
}
static DEFINE_SPINLOCK(sync_lock);
/** ensure backing pages are allocated */
static int omap_gem_attach_pages(struct drm_gem_object *obj)
......@@ -272,6 +283,28 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
return ret;
}
/* acquire pages when needed (for example, for DMA where physically
* contiguous buffer is not required
*/
static int get_pages(struct drm_gem_object *obj, struct page ***pages)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = 0;
if (is_shmem(obj) && !omap_obj->pages) {
ret = omap_gem_attach_pages(obj);
if (ret) {
dev_err(obj->dev->dev, "could not attach pages\n");
return ret;
}
}
/* TODO: even phys-contig.. we should have a list of pages? */
*pages = omap_obj->pages;
return 0;
}
/** release backing pages */
static void omap_gem_detach_pages(struct drm_gem_object *obj)
{
......@@ -301,26 +334,6 @@ uint32_t omap_gem_flags(struct drm_gem_object *obj)
return to_omap_bo(obj)->flags;
}
/** get mmap offset */
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
int ret;
size_t size;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
/* Make it mmapable */
size = omap_gem_mmap_size(obj);
ret = drm_gem_create_mmap_offset_size(obj, size);
if (ret) {
dev_err(dev->dev, "could not allocate mmap offset\n");
return 0;
}
return drm_vma_node_offset_addr(&obj->vma_node);
}
uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
{
uint64_t offset;
......@@ -362,6 +375,10 @@ int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
return -EINVAL;
}
/* -----------------------------------------------------------------------------
* Fault Handling
*/
/* Normal handling for the case of faulting in non-tiled buffers */
static int fault_1d(struct drm_gem_object *obj,
struct vm_area_struct *vma, struct vm_fault *vmf)
......@@ -393,7 +410,8 @@ static int fault_2d(struct drm_gem_object *obj,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
struct usergart_entry *entry;
struct omap_drm_private *priv = obj->dev->dev_private;
struct omap_drm_usergart_entry *entry;
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
struct page *pages[64]; /* XXX is this too much to have on stack? */
unsigned long pfn;
......@@ -406,8 +424,8 @@ static int fault_2d(struct drm_gem_object *obj,
* that need to be mapped in to fill 4kb wide CPU page. If the slot
* height is 64, then 64 pages fill a 4kb wide by 64 row region.
*/
const int n = usergart[fmt].height;
const int n_shift = usergart[fmt].height_shift;
const int n = priv->usergart[fmt].height;
const int n_shift = priv->usergart[fmt].height_shift;
/*
* If buffer width in bytes > PAGE_SIZE then the virtual stride is
......@@ -428,11 +446,11 @@ static int fault_2d(struct drm_gem_object *obj,
base_pgoff = round_down(pgoff, m << n_shift);
/* figure out buffer width in slots */
slots = omap_obj->width >> usergart[fmt].slot_shift;
slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
entry = &usergart[fmt].entry[usergart[fmt].last];
entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
/* evict previous buffer using this usergart entry, if any: */
if (entry->obj)
......@@ -479,12 +497,13 @@ static int fault_2d(struct drm_gem_object *obj,
for (i = n; i > 0; i--) {
vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
pfn += usergart[fmt].stride_pfn;
pfn += priv->usergart[fmt].stride_pfn;
vaddr += PAGE_SIZE * m;
}
/* simple round-robin: */
usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES;
priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
% NUM_USERGART_ENTRIES;
return 0;
}
......@@ -596,6 +615,9 @@ int omap_gem_mmap_obj(struct drm_gem_object *obj,
return 0;
}
/* -----------------------------------------------------------------------------
* Dumb Buffers
*/
/**
* omap_gem_dumb_create - create a dumb buffer
......@@ -653,6 +675,7 @@ int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
return ret;
}
#ifdef CONFIG_DRM_FBDEV_EMULATION
/* Set scrolling position. This allows us to implement fast scrolling
* for console.
*
......@@ -689,6 +712,22 @@ int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
return ret;
}
#endif
/* -----------------------------------------------------------------------------
* Memory Management & DMA Sync
*/
/**
* shmem buffers that are mapped cached can simulate coherency via using
* page faulting to keep track of dirty pages
*/
static inline bool is_cached_coherent(struct drm_gem_object *obj)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
return is_shmem(obj) &&
((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
}
/* Sync the buffer for CPU access.. note pages should already be
* attached, ie. omap_gem_get_pages()
......@@ -865,28 +904,6 @@ int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
return ret;
}
/* acquire pages when needed (for example, for DMA where physically
* contiguous buffer is not required
*/
static int get_pages(struct drm_gem_object *obj, struct page ***pages)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = 0;
if (is_shmem(obj) && !omap_obj->pages) {
ret = omap_gem_attach_pages(obj);
if (ret) {
dev_err(obj->dev->dev, "could not attach pages\n");
return ret;
}
}
/* TODO: even phys-contig.. we should have a list of pages? */
*pages = omap_obj->pages;
return 0;
}
/* if !remap, and we don't have pages backing, then fail, rather than
* increasing the pin count (which we don't really do yet anyways,
* because we don't support swapping pages back out). And 'remap'
......@@ -924,6 +941,7 @@ int omap_gem_put_pages(struct drm_gem_object *obj)
return 0;
}
#ifdef CONFIG_DRM_FBDEV_EMULATION
/* Get kernel virtual address for CPU access.. this more or less only
* exists for omap_fbdev. This should be called with struct_mutex
* held.
......@@ -942,6 +960,11 @@ void *omap_gem_vaddr(struct drm_gem_object *obj)
}
return omap_obj->vaddr;
}
#endif
/* -----------------------------------------------------------------------------
* Power Management
*/
#ifdef CONFIG_PM
/* re-pin objects in DMM in resume path: */
......@@ -971,6 +994,10 @@ int omap_gem_resume(struct device *dev)
}
#endif
/* -----------------------------------------------------------------------------
* DebugFS
*/
#ifdef CONFIG_DEBUG_FS
void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
......@@ -1017,9 +1044,12 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
}
#endif
/* Buffer Synchronization:
/* -----------------------------------------------------------------------------
* Buffer Synchronization
*/
static DEFINE_SPINLOCK(sync_lock);
struct omap_gem_sync_waiter {
struct list_head list;
struct omap_gem_object *omap_obj;
......@@ -1265,6 +1295,10 @@ int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
return ret;
}
/* -----------------------------------------------------------------------------
* Constructor & Destructor
*/
/* don't call directly.. called from GEM core when it is time to actually
* free the object..
*/
......@@ -1282,8 +1316,6 @@ void omap_gem_free_object(struct drm_gem_object *obj)
list_del(&omap_obj->mm_list);
spin_unlock(&priv->list_lock);
drm_gem_free_mmap_offset(obj);
/* this means the object is still pinned.. which really should
* not happen. I think..
*/
......@@ -1308,31 +1340,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
drm_gem_object_release(obj);
kfree(obj);
}
/* convenience method to construct a GEM buffer object, and userspace handle */
int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
{
struct drm_gem_object *obj;
int ret;
obj = omap_gem_new(dev, gsize, flags);
if (!obj)
return -ENOMEM;
ret = drm_gem_handle_create(file, obj, handle);
if (ret) {
drm_gem_object_release(obj);
kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
return ret;
}
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(obj);
return 0;
kfree(omap_obj);
}
/* GEM buffer object constructor */
......@@ -1341,15 +1349,15 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_gem_object *omap_obj;
struct drm_gem_object *obj = NULL;
struct drm_gem_object *obj;
struct address_space *mapping;
size_t size;
int ret;
if (flags & OMAP_BO_TILED) {
if (!usergart) {
if (!priv->usergart) {
dev_err(dev->dev, "Tiled buffers require DMM\n");
goto fail;
return NULL;
}
/* tiled buffers are always shmem paged backed.. when they are
......@@ -1420,16 +1428,42 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
return obj;
fail:
if (obj)
omap_gem_free_object(obj);
return NULL;
}
/* convenience method to construct a GEM buffer object, and userspace handle */
int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
{
struct drm_gem_object *obj;
int ret;
obj = omap_gem_new(dev, gsize, flags);
if (!obj)
return -ENOMEM;
ret = drm_gem_handle_create(file, obj, handle);
if (ret) {
omap_gem_free_object(obj);
return ret;
}
return NULL;
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(obj);
return 0;
}
/* init/cleanup.. if DMM is used, we need to set some stuff up.. */
/* -----------------------------------------------------------------------------
* Init & Cleanup
*/
/* If DMM is used, we need to set some stuff up.. */
void omap_gem_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_drm_usergart *usergart;
const enum tiler_fmt fmts[] = {
TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
};
......@@ -1458,10 +1492,11 @@ void omap_gem_init(struct drm_device *dev)
usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
struct usergart_entry *entry = &usergart[i].entry[j];
struct tiler_block *block =
tiler_reserve_2d(fmts[i], w, h,
PAGE_SIZE);
struct omap_drm_usergart_entry *entry;
struct tiler_block *block;
entry = &usergart[i].entry[j];
block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
if (IS_ERR(block)) {
dev_err(dev->dev,
"reserve failed: %d, %d, %ld\n",
......@@ -1477,13 +1512,16 @@ void omap_gem_init(struct drm_device *dev)
}
}
priv->usergart = usergart;
priv->has_dmm = true;
}
void omap_gem_deinit(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
/* I believe we can rely on there being no more outstanding GEM
* objects which could depend on usergart/dmm at this point.
*/
kfree(usergart);
kfree(priv->usergart);
}
......@@ -188,33 +188,6 @@ static const struct drm_plane_helper_funcs omap_plane_helper_funcs = {
.atomic_disable = omap_plane_atomic_disable,
};
static void omap_plane_reset(struct drm_plane *plane)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
struct omap_plane_state *omap_state;
if (plane->state && plane->state->fb)
drm_framebuffer_unreference(plane->state->fb);
kfree(plane->state);
plane->state = NULL;
omap_state = kzalloc(sizeof(*omap_state), GFP_KERNEL);
if (omap_state == NULL)
return;
/*
* Set defaults depending on whether we are a primary or overlay
* plane.
*/
omap_state->zorder = plane->type == DRM_PLANE_TYPE_PRIMARY
? 0 : omap_plane->id;
omap_state->base.rotation = BIT(DRM_ROTATE_0);
plane->state = &omap_state->base;
plane->state->plane = plane;
}
static void omap_plane_destroy(struct drm_plane *plane)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
......@@ -270,6 +243,32 @@ static void omap_plane_atomic_destroy_state(struct drm_plane *plane,
kfree(to_omap_plane_state(state));
}
static void omap_plane_reset(struct drm_plane *plane)
{
struct omap_plane *omap_plane = to_omap_plane(plane);
struct omap_plane_state *omap_state;
if (plane->state) {
omap_plane_atomic_destroy_state(plane, plane->state);
plane->state = NULL;
}
omap_state = kzalloc(sizeof(*omap_state), GFP_KERNEL);
if (omap_state == NULL)
return;
/*
* Set defaults depending on whether we are a primary or overlay
* plane.
*/
omap_state->zorder = plane->type == DRM_PLANE_TYPE_PRIMARY
? 0 : omap_plane->id;
omap_state->base.rotation = BIT(DRM_ROTATE_0);
plane->state = &omap_state->base;
plane->state->plane = plane;
}
static int omap_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
......
此差异已折叠。
......@@ -61,18 +61,17 @@ struct tcm {
unsigned int y_offset; /* offset to use for y coordinates */
/* 'pvt' structure shall contain any tcm details (attr) along with
linked list of allocated areas and mutex for mutually exclusive access
to the list. It may also contain copies of width and height to notice
any changes to the publicly available width and height fields. */
void *pvt;
spinlock_t lock;
unsigned long *bitmap;
size_t map_size;
/* function table */
s32 (*reserve_2d)(struct tcm *tcm, u16 height, u16 width, u8 align,
s32 (*reserve_2d)(struct tcm *tcm, u16 height, u16 width, u16 align,
int16_t offset, uint16_t slot_bytes,
struct tcm_area *area);
s32 (*reserve_1d)(struct tcm *tcm, u32 slots, struct tcm_area *area);
s32 (*free) (struct tcm *tcm, struct tcm_area *area);
void (*deinit) (struct tcm *tcm);
s32 (*free)(struct tcm *tcm, struct tcm_area *area);
void (*deinit)(struct tcm *tcm);
};
/*=============================================================================
......@@ -91,7 +90,7 @@ struct tcm {
*
*/
struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr);
struct tcm *sita_init(u16 width, u16 height);
/**
......@@ -120,6 +119,9 @@ static inline void tcm_deinit(struct tcm *tcm)
* all values may be supported by the container manager,
* but it must support 0 (1), 32 and 64.
* 0 value is equivalent to 1.
* @param offset Offset requirement, in bytes. This is the offset
* from a 4KiB aligned virtual address.
* @param slot_bytes Width of slot in bytes
* @param area Pointer to where the reserved area should be stored.
*
* @return 0 on success. Non-0 error code on failure. Also,
......@@ -129,7 +131,8 @@ static inline void tcm_deinit(struct tcm *tcm)
* allocation.
*/
static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height,
u16 align, struct tcm_area *area)
u16 align, int16_t offset, uint16_t slot_bytes,
struct tcm_area *area)
{
/* perform rudimentary error checking */
s32 res = tcm == NULL ? -ENODEV :
......@@ -140,7 +143,8 @@ static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height,
if (!res) {
area->is2d = true;
res = tcm->reserve_2d(tcm, height, width, align, area);
res = tcm->reserve_2d(tcm, height, width, align, offset,
slot_bytes, area);
area->tcm = res ? NULL : tcm;
}
......
......@@ -101,9 +101,6 @@ struct drm_omap_gem_info {
#define DRM_OMAP_GET_PARAM 0x00
#define DRM_OMAP_SET_PARAM 0x01
/* placeholder for plugin-api
#define DRM_OMAP_GET_BASE 0x02
*/
#define DRM_OMAP_GEM_NEW 0x03
#define DRM_OMAP_GEM_CPU_PREP 0x04
#define DRM_OMAP_GEM_CPU_FINI 0x05
......@@ -112,9 +109,6 @@ struct drm_omap_gem_info {
#define DRM_IOCTL_OMAP_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_PARAM, struct drm_omap_param)
#define DRM_IOCTL_OMAP_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_SET_PARAM, struct drm_omap_param)
/* placeholder for plugin-api
#define DRM_IOCTL_OMAP_GET_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_BASE, struct drm_omap_get_base)
*/
#define DRM_IOCTL_OMAP_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_NEW, struct drm_omap_gem_new)
#define DRM_IOCTL_OMAP_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_PREP, struct drm_omap_gem_cpu_prep)
#define DRM_IOCTL_OMAP_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_FINI, struct drm_omap_gem_cpu_fini)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册