udl_gem.c 6.3 KB
Newer Older
D
Dave Airlie 已提交
1 2 3 4 5 6 7 8
/*
 * Copyright (C) 2012 Red Hat
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License v2. See the file COPYING in the main directory of this archive for
 * more details.
 */

9
#include <drm/drmP.h>
D
Dave Airlie 已提交
10 11
#include "udl_drv.h"
#include <linux/shmem_fs.h>
D
Dave Airlie 已提交
12
#include <linux/dma-buf.h>
D
Dave Airlie 已提交
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27

struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
					    size_t size)
{
	struct udl_gem_object *obj;

	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
	if (obj == NULL)
		return NULL;

	if (drm_gem_object_init(dev, &obj->base, size) != 0) {
		kfree(obj);
		return NULL;
	}

28
	obj->flags = UDL_BO_CACHEABLE;
D
Dave Airlie 已提交
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
	return obj;
}

static int
udl_gem_create(struct drm_file *file,
	       struct drm_device *dev,
	       uint64_t size,
	       uint32_t *handle_p)
{
	struct udl_gem_object *obj;
	int ret;
	u32 handle;

	size = roundup(size, PAGE_SIZE);

	obj = udl_gem_alloc_object(dev, size);
	if (obj == NULL)
		return -ENOMEM;

	ret = drm_gem_handle_create(file, &obj->base, &handle);
	if (ret) {
		drm_gem_object_release(&obj->base);
		kfree(obj);
		return ret;
	}

	drm_gem_object_unreference(&obj->base);
	*handle_p = handle;
	return 0;
}

60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
static void update_vm_cache_attr(struct udl_gem_object *obj,
				 struct vm_area_struct *vma)
{
	DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);

	/* non-cacheable as default. */
	if (obj->flags & UDL_BO_CACHEABLE) {
		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
	} else if (obj->flags & UDL_BO_WC) {
		vma->vm_page_prot =
			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
	} else {
		vma->vm_page_prot =
			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
	}
}

D
Dave Airlie 已提交
77 78 79 80
int udl_dumb_create(struct drm_file *file,
		    struct drm_device *dev,
		    struct drm_mode_create_dumb *args)
{
81
	args->pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
D
Dave Airlie 已提交
82 83 84 85 86
	args->size = args->pitch * args->height;
	return udl_gem_create(file, dev,
			      args->size, &args->handle);
}

87 88 89 90 91 92 93 94 95 96 97
int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
	int ret;

	ret = drm_gem_mmap(filp, vma);
	if (ret)
		return ret;

	vma->vm_flags &= ~VM_PFNMAP;
	vma->vm_flags |= VM_MIXEDMAP;

98 99
	update_vm_cache_attr(to_udl_bo(vma->vm_private_data), vma);

100 101 102
	return ret;
}

D
Dave Airlie 已提交
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
	struct page *page;
	unsigned int page_offset;
	int ret = 0;

	page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
		PAGE_SHIFT;

	if (!obj->pages)
		return VM_FAULT_SIGBUS;

	page = obj->pages[page_offset];
	ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
	switch (ret) {
	case -EAGAIN:
	case 0:
	case -ERESTARTSYS:
		return VM_FAULT_NOPAGE;
	case -ENOMEM:
		return VM_FAULT_OOM;
	default:
		return VM_FAULT_SIGBUS;
	}
}

130
static int udl_gem_get_pages(struct udl_gem_object *obj)
D
Dave Airlie 已提交
131
{
132
	struct page **pages;
D
Dave Airlie 已提交
133 134 135 136

	if (obj->pages)
		return 0;

137
	pages = drm_gem_get_pages(&obj->base);
138 139
	if (IS_ERR(pages))
		return PTR_ERR(pages);
D
Dave Airlie 已提交
140

141
	obj->pages = pages;
D
Dave Airlie 已提交
142 143 144 145 146 147

	return 0;
}

static void udl_gem_put_pages(struct udl_gem_object *obj)
{
148 149 150 151 152 153
	if (obj->base.import_attach) {
		drm_free_large(obj->pages);
		obj->pages = NULL;
		return;
	}

154
	drm_gem_put_pages(&obj->base, obj->pages, false, false);
D
Dave Airlie 已提交
155 156 157 158 159 160 161 162
	obj->pages = NULL;
}

int udl_gem_vmap(struct udl_gem_object *obj)
{
	int page_count = obj->base.size / PAGE_SIZE;
	int ret;

163 164 165 166 167 168 169
	if (obj->base.import_attach) {
		obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
		if (!obj->vmapping)
			return -ENOMEM;
		return 0;
	}
		
170
	ret = udl_gem_get_pages(obj);
D
Dave Airlie 已提交
171 172 173 174 175 176 177 178 179 180 181
	if (ret)
		return ret;

	obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL);
	if (!obj->vmapping)
		return -ENOMEM;
	return 0;
}

void udl_gem_vunmap(struct udl_gem_object *obj)
{
182 183 184 185 186
	if (obj->base.import_attach) {
		dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
		return;
	}

D
Dave Airlie 已提交
187 188 189 190 191 192 193 194 195 196 197 198 199
	if (obj->vmapping)
		vunmap(obj->vmapping);

	udl_gem_put_pages(obj);
}

void udl_gem_free_object(struct drm_gem_object *gem_obj)
{
	struct udl_gem_object *obj = to_udl_bo(gem_obj);

	if (obj->vmapping)
		udl_gem_vunmap(obj);

200
	if (gem_obj->import_attach) {
201
		drm_prime_gem_destroy(gem_obj, obj->sg);
202 203
		put_device(gem_obj->dev->dev);
	}
204

D
Dave Airlie 已提交
205 206 207
	if (obj->pages)
		udl_gem_put_pages(obj);

208
	drm_gem_free_mmap_offset(gem_obj);
D
Dave Airlie 已提交
209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
}

/* the dumb interface doesn't work with the GEM straight MMAP
   interface, it expects to do MMAP on the drm fd, like normal */
int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
		 uint32_t handle, uint64_t *offset)
{
	struct udl_gem_object *gobj;
	struct drm_gem_object *obj;
	int ret = 0;

	mutex_lock(&dev->struct_mutex);
	obj = drm_gem_object_lookup(dev, file, handle);
	if (obj == NULL) {
		ret = -ENOENT;
		goto unlock;
	}
	gobj = to_udl_bo(obj);

228
	ret = udl_gem_get_pages(gobj);
D
Dave Airlie 已提交
229
	if (ret)
230
		goto out;
231 232 233
	ret = drm_gem_create_mmap_offset(obj);
	if (ret)
		goto out;
D
Dave Airlie 已提交
234

235
	*offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
D
Dave Airlie 已提交
236 237 238 239 240 241 242

out:
	drm_gem_object_unreference(&gobj->base);
unlock:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}
D
Dave Airlie 已提交
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280

static int udl_prime_create(struct drm_device *dev,
			    size_t size,
			    struct sg_table *sg,
			    struct udl_gem_object **obj_p)
{
	struct udl_gem_object *obj;
	int npages;

	npages = size / PAGE_SIZE;

	*obj_p = NULL;
	obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
	if (!obj)
		return -ENOMEM;

	obj->sg = sg;
	obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
	if (obj->pages == NULL) {
		DRM_ERROR("obj pages is NULL %d\n", npages);
		return -ENOMEM;
	}

	drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);

	*obj_p = obj;
	return 0;
}

struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
				struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct sg_table *sg;
	struct udl_gem_object *uobj;
	int ret;

	/* need to attach */
281
	get_device(dev->dev);
D
Dave Airlie 已提交
282
	attach = dma_buf_attach(dma_buf, dev->dev);
283 284
	if (IS_ERR(attach)) {
		put_device(dev->dev);
285
		return ERR_CAST(attach);
286
	}
D
Dave Airlie 已提交
287

288 289
	get_dma_buf(dma_buf);

D
Dave Airlie 已提交
290 291 292 293 294 295 296 297 298 299 300 301
	sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
	if (IS_ERR(sg)) {
		ret = PTR_ERR(sg);
		goto fail_detach;
	}

	ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
	if (ret) {
		goto fail_unmap;
	}

	uobj->base.import_attach = attach;
302
	uobj->flags = UDL_BO_WC;
D
Dave Airlie 已提交
303 304 305 306 307 308 309

	return &uobj->base;

fail_unmap:
	dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
fail_detach:
	dma_buf_detach(dma_buf, attach);
310
	dma_buf_put(dma_buf);
311
	put_device(dev->dev);
D
Dave Airlie 已提交
312 313
	return ERR_PTR(ret);
}