提交 f7de1545 编写于 作者: J Jordan Crouse 提交者: Rob Clark

drm/msm: Add per-instance submit queues

Currently the behavior of a command stream is provided by the user
application during submission and the application is expected to internally
maintain the settings for each 'context' or 'rendering queue' and specify
the correct ones.

This works okay for simple cases but as applications become more
complex we will want to set context specific flags and do various
permission checks to allow certain contexts to enable additional
privileges.

Add kernel-side submit queues to be analogous to 'contexts' or
'rendering queues' on the application side. Each file descriptor
instance will maintain its own list of queues. Queues cannot be
shared between file descriptors.

For backwards compatibility context id '0' is defined as a default
context specifying no priority and no special flags. This is
intended to be the usual configuration for 99% of applications so
that a garden variety application can function correctly without
creating a queue. Only those applications requiring the specific
benefit of different queues need create one.
Signed-off-by: NJordan Crouse <jcrouse@codeaurora.org>
Signed-off-by: NRob Clark <robdclark@gmail.com>
上级 0033e1b5
......@@ -57,7 +57,8 @@ msm-y := \
msm_iommu.o \
msm_perf.o \
msm_rd.o \
msm_ringbuffer.o
msm_ringbuffer.o \
msm_submitqueue.o
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
......
......@@ -29,9 +29,12 @@
* - 1.0.0 - initial interface
* - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
* - 1.2.0 - adds explicit fence support for submit ioctl
* - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
* SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
* MSM_GEM_INFO ioctl.
*/
#define MSM_VERSION_MAJOR 1
#define MSM_VERSION_MINOR 2
#define MSM_VERSION_MINOR 3
#define MSM_VERSION_PATCHLEVEL 0
static void msm_fb_output_poll_changed(struct drm_device *dev)
......@@ -504,24 +507,37 @@ static void load_gpu(struct drm_device *dev)
mutex_unlock(&init_lock);
}
static int msm_open(struct drm_device *dev, struct drm_file *file)
static int context_init(struct drm_file *file)
{
struct msm_file_private *ctx;
/* For now, load gpu on open.. to avoid the requirement of having
* firmware in the initrd.
*/
load_gpu(dev);
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
msm_submitqueue_init(ctx);
file->driver_priv = ctx;
return 0;
}
static int msm_open(struct drm_device *dev, struct drm_file *file)
{
/* For now, load gpu on open.. to avoid the requirement of having
* firmware in the initrd.
*/
load_gpu(dev);
return context_init(file);
}
static void context_close(struct msm_file_private *ctx)
{
msm_submitqueue_close(ctx);
kfree(ctx);
}
static void msm_postclose(struct drm_device *dev, struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
......@@ -532,7 +548,7 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file)
priv->lastctx = NULL;
mutex_unlock(&dev->struct_mutex);
kfree(ctx);
context_close(ctx);
}
static void msm_lastclose(struct drm_device *dev)
......@@ -777,6 +793,28 @@ static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
return ret;
}
static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_msm_submitqueue *args = data;
if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
return -EINVAL;
return msm_submitqueue_create(file->driver_priv, args->prio,
args->flags, &args->id);
}
static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
struct drm_file *file)
{
u32 id = *(u32 *) data;
return msm_submitqueue_remove(file->driver_priv, id);
}
static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
......@@ -786,6 +824,8 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
......
......@@ -56,11 +56,9 @@ struct msm_gem_address_space;
struct msm_gem_vma;
struct msm_file_private {
/* currently we don't do anything useful with this.. but when
* per-context address spaces are supported we'd keep track of
* the context's page-tables here.
*/
int dummy;
rwlock_t queuelock;
struct list_head submitqueues;
int queueid;
};
enum msm_mdp_plane_property {
......@@ -319,6 +317,18 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
void msm_writel(u32 data, void __iomem *addr);
u32 msm_readl(const void __iomem *addr);
struct msm_gpu_submitqueue;
int msm_submitqueue_init(struct msm_file_private *ctx);
struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
u32 id);
int msm_submitqueue_create(struct msm_file_private *ctx, u32 prio,
u32 flags, u32 *id);
int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
void msm_submitqueue_close(struct msm_file_private *ctx);
void msm_submitqueue_destroy(struct kref *kref);
#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
......
......@@ -142,6 +142,7 @@ struct msm_gem_submit {
struct list_head bo_list;
struct ww_acquire_ctx ticket;
struct dma_fence *fence;
struct msm_gpu_submitqueue *queue;
struct pid *pid; /* submitting process */
bool valid; /* true if no cmdstream patching needed */
unsigned int nr_cmds;
......
......@@ -31,7 +31,8 @@
#define BO_PINNED 0x2000
static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
struct msm_gpu *gpu, struct msm_gpu_submitqueue *queue,
uint32_t nr_bos, uint32_t nr_cmds)
{
struct msm_gem_submit *submit;
uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
......@@ -49,6 +50,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
submit->fence = NULL;
submit->pid = get_pid(task_pid(current));
submit->cmd = (void *)&submit->bos[nr_bos];
submit->queue = queue;
/* initially, until copy_from_user() and bo lookup succeeds: */
submit->nr_bos = 0;
......@@ -66,6 +68,8 @@ void msm_gem_submit_free(struct msm_gem_submit *submit)
dma_fence_put(submit->fence);
list_del(&submit->node);
put_pid(submit->pid);
msm_submitqueue_put(submit->queue);
kfree(submit);
}
......@@ -405,6 +409,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_gpu *gpu = priv->gpu;
struct dma_fence *in_fence = NULL;
struct sync_file *sync_file = NULL;
struct msm_gpu_submitqueue *queue;
int out_fence_fd = -1;
unsigned i;
int ret;
......@@ -421,6 +426,10 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
return -EINVAL;
queue = msm_submitqueue_get(ctx, args->queueid);
if (!queue)
return -ENOENT;
if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
in_fence = sync_file_get_fence(args->fence_fd);
......@@ -451,7 +460,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
}
priv->struct_mutex_task = current;
submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds);
submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
if (!submit) {
ret = -ENOMEM;
goto out_unlock;
......@@ -535,6 +544,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->nr_cmds = i;
submit->fence = msm_fence_alloc(gpu->fctx);
if (IS_ERR(submit->fence)) {
ret = PTR_ERR(submit->fence);
submit->fence = NULL;
......
......@@ -150,6 +150,15 @@ struct msm_gpu_perfcntr {
const char *name;
};
struct msm_gpu_submitqueue {
int id;
u32 flags;
u32 prio;
int faults;
struct list_head node;
struct kref ref;
};
static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
{
msm_writel(data, gpu->mmio + (reg << 2));
......@@ -223,4 +232,10 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
void __init adreno_register(void);
void __exit adreno_unregister(void);
static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
{
if (queue)
kref_put(&queue->ref, msm_submitqueue_destroy);
}
#endif /* __MSM_GPU_H__ */
/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kref.h>
#include "msm_gpu.h"
void msm_submitqueue_destroy(struct kref *kref)
{
struct msm_gpu_submitqueue *queue = container_of(kref,
struct msm_gpu_submitqueue, ref);
kfree(queue);
}
struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
u32 id)
{
struct msm_gpu_submitqueue *entry;
if (!ctx)
return NULL;
read_lock(&ctx->queuelock);
list_for_each_entry(entry, &ctx->submitqueues, node) {
if (entry->id == id) {
kref_get(&entry->ref);
read_unlock(&ctx->queuelock);
return entry;
}
}
read_unlock(&ctx->queuelock);
return NULL;
}
void msm_submitqueue_close(struct msm_file_private *ctx)
{
struct msm_gpu_submitqueue *entry, *tmp;
if (!ctx)
return;
/*
* No lock needed in close and there won't
* be any more user ioctls coming our way
*/
list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node)
msm_submitqueue_put(entry);
}
int msm_submitqueue_create(struct msm_file_private *ctx, u32 prio, u32 flags,
u32 *id)
{
struct msm_gpu_submitqueue *queue;
if (!ctx)
return -ENODEV;
queue = kzalloc(sizeof(*queue), GFP_KERNEL);
if (!queue)
return -ENOMEM;
kref_init(&queue->ref);
queue->flags = flags;
queue->prio = prio;
write_lock(&ctx->queuelock);
queue->id = ctx->queueid++;
if (id)
*id = queue->id;
list_add_tail(&queue->node, &ctx->submitqueues);
write_unlock(&ctx->queuelock);
return 0;
}
int msm_submitqueue_init(struct msm_file_private *ctx)
{
if (!ctx)
return 0;
INIT_LIST_HEAD(&ctx->submitqueues);
rwlock_init(&ctx->queuelock);
return msm_submitqueue_create(ctx, 2, 0, NULL);
}
int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
{
struct msm_gpu_submitqueue *entry;
if (!ctx)
return 0;
/*
* id 0 is the "default" queue and can't be destroyed
* by the user
*/
if (!id)
return -ENOENT;
write_lock(&ctx->queuelock);
list_for_each_entry(entry, &ctx->submitqueues, node) {
if (entry->id == id) {
list_del(&entry->node);
write_unlock(&ctx->queuelock);
msm_submitqueue_put(entry);
return 0;
}
}
write_unlock(&ctx->queuelock);
return -ENOENT;
}
......@@ -218,6 +218,7 @@ struct drm_msm_gem_submit {
__u64 bos; /* in, ptr to array of submit_bo's */
__u64 cmds; /* in, ptr to array of submit_cmd's */
__s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
__u32 queueid; /* in, submitqueue id */
};
/* The normal way to synchronize with the GPU is just to CPU_PREP on
......@@ -254,6 +255,20 @@ struct drm_msm_gem_madvise {
__u32 retained; /* out, whether backing store still exists */
};
/*
* Draw queues allow the user to set specific submission parameter. Command
* submissions specify a specific submitqueue to use. ID 0 is reserved for
* backwards compatibility as a "default" submitqueue
*/
#define MSM_SUBMITQUEUE_FLAGS (0)
struct drm_msm_submitqueue {
__u32 flags; /* in, MSM_SUBMITQUEUE_x */
__u32 prio; /* in, Priority level */
__u32 id; /* out, identifier */
};
#define DRM_MSM_GET_PARAM 0x00
/* placeholder:
#define DRM_MSM_SET_PARAM 0x01
......@@ -265,6 +280,11 @@ struct drm_msm_gem_madvise {
#define DRM_MSM_GEM_SUBMIT 0x06
#define DRM_MSM_WAIT_FENCE 0x07
#define DRM_MSM_GEM_MADVISE 0x08
/* placeholder:
#define DRM_MSM_GEM_SVM_NEW 0x09
*/
#define DRM_MSM_SUBMITQUEUE_NEW 0x0A
#define DRM_MSM_SUBMITQUEUE_CLOSE 0x0B
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
......@@ -274,6 +294,8 @@ struct drm_msm_gem_madvise {
#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
#define DRM_IOCTL_MSM_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise)
#define DRM_IOCTL_MSM_SUBMITQUEUE_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, struct drm_msm_submitqueue)
#define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, __u32)
#if defined(__cplusplus)
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册