exynos_drm_buf.c 4.4 KB
Newer Older
1 2 3 4 5
/* exynos_drm_buf.c
 *
 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
 * Author: Inki Dae <inki.dae@samsung.com>
 *
6 7 8 9
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
10 11
 */

12 13
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
14 15

#include "exynos_drm_drv.h"
I
Inki Dae 已提交
16
#include "exynos_drm_gem.h"
17
#include "exynos_drm_buf.h"
18
#include "exynos_drm_iommu.h"
19 20

static int lowlevel_buffer_allocate(struct drm_device *dev,
21
		unsigned int flags, struct exynos_drm_gem_buf *buf)
22
{
23
	int ret = 0;
24
	enum dma_attr attr;
25
	unsigned int nr_pages;
26 27 28 29 30 31

	if (buf->dma_addr) {
		DRM_DEBUG_KMS("already allocated.\n");
		return 0;
	}

32 33
	init_dma_attrs(&buf->dma_attrs);

34 35 36 37 38
	/*
	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
	 * region will be allocated else physically contiguous
	 * as possible.
	 */
39
	if (!(flags & EXYNOS_BO_NONCONTIG))
40 41 42 43 44 45 46
		dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);

	/*
	 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
	 * else cachable mapping.
	 */
	if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
47
		attr = DMA_ATTR_WRITE_COMBINE;
48 49
	else
		attr = DMA_ATTR_NON_CONSISTENT;
50 51

	dma_set_attr(attr, &buf->dma_attrs);
52
	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
53

54 55 56 57 58 59
	nr_pages = buf->size >> PAGE_SHIFT;

	if (!is_drm_iommu_supported(dev)) {
		dma_addr_t start_addr;
		unsigned int i = 0;

60
		buf->pages = drm_calloc_large(nr_pages, sizeof(struct page));
61 62 63 64 65 66 67 68 69 70
		if (!buf->pages) {
			DRM_ERROR("failed to allocate pages.\n");
			return -ENOMEM;
		}

		buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
					&buf->dma_addr, GFP_KERNEL,
					&buf->dma_attrs);
		if (!buf->kvaddr) {
			DRM_ERROR("failed to allocate buffer.\n");
71
			drm_free_large(buf->pages);
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
			return -ENOMEM;
		}

		start_addr = buf->dma_addr;
		while (i < nr_pages) {
			buf->pages[i] = phys_to_page(start_addr);
			start_addr += PAGE_SIZE;
			i++;
		}
	} else {

		buf->pages = dma_alloc_attrs(dev->dev, buf->size,
					&buf->dma_addr, GFP_KERNEL,
					&buf->dma_attrs);
		if (!buf->pages) {
			DRM_ERROR("failed to allocate buffer.\n");
			return -ENOMEM;
		}
90 91
	}

92
	buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
93
	if (!buf->sgt) {
94
		DRM_ERROR("failed to get sg table.\n");
95 96
		ret = -ENOMEM;
		goto err_free_attrs;
97 98
	}

99
	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
100 101 102 103
			(unsigned long)buf->dma_addr,
			buf->size);

	return ret;
104 105

err_free_attrs:
106
	dma_free_attrs(dev->dev, buf->size, buf->pages,
107 108
			(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
	buf->dma_addr = (dma_addr_t)NULL;
109

110
	if (!is_drm_iommu_supported(dev))
111
		drm_free_large(buf->pages);
112

113
	return ret;
114 115 116
}

static void lowlevel_buffer_deallocate(struct drm_device *dev,
117
		unsigned int flags, struct exynos_drm_gem_buf *buf)
118
{
119 120 121 122 123
	if (!buf->dma_addr) {
		DRM_DEBUG_KMS("dma_addr is invalid.\n");
		return;
	}

124
	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
125 126 127 128 129 130 131 132
			(unsigned long)buf->dma_addr,
			buf->size);

	sg_free_table(buf->sgt);

	kfree(buf->sgt);
	buf->sgt = NULL;

133 134 135
	if (!is_drm_iommu_supported(dev)) {
		dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
				(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
136
		drm_free_large(buf->pages);
137 138
	} else
		dma_free_attrs(dev->dev, buf->size, buf->pages,
139
				(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
140

141
	buf->dma_addr = (dma_addr_t)NULL;
142 143
}

144 145
struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
						unsigned int size)
146
{
I
Inki Dae 已提交
147
	struct exynos_drm_gem_buf *buffer;
148

I
Inki Dae 已提交
149
	DRM_DEBUG_KMS("desired size = 0x%x\n", size);
150

I
Inki Dae 已提交
151 152 153
	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
	if (!buffer) {
		DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
J
Joonyoung Shim 已提交
154
		return NULL;
155 156
	}

I
Inki Dae 已提交
157 158
	buffer->size = size;
	return buffer;
159 160
}

161 162
void exynos_drm_fini_buf(struct drm_device *dev,
				struct exynos_drm_gem_buf *buffer)
163
{
I
Inki Dae 已提交
164 165
	if (!buffer) {
		DRM_DEBUG_KMS("buffer is null.\n");
166 167 168
		return;
	}

I
Inki Dae 已提交
169 170
	kfree(buffer);
	buffer = NULL;
171 172
}

173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
int exynos_drm_alloc_buf(struct drm_device *dev,
		struct exynos_drm_gem_buf *buf, unsigned int flags)
{

	/*
	 * allocate memory region and set the memory information
	 * to vaddr and dma_addr of a buffer object.
	 */
	if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
		return -ENOMEM;

	return 0;
}

void exynos_drm_free_buf(struct drm_device *dev,
		unsigned int flags, struct exynos_drm_gem_buf *buffer)
{

	lowlevel_buffer_deallocate(dev, flags, buffer);
}