exynos_drm_buf.c 4.5 KB
Newer Older
1 2 3 4 5
/* exynos_drm_buf.c
 *
 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
 * Author: Inki Dae <inki.dae@samsung.com>
 *
6 7 8 9
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
10 11
 */

12 13
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
14 15

#include "exynos_drm_drv.h"
I
Inki Dae 已提交
16
#include "exynos_drm_gem.h"
17
#include "exynos_drm_buf.h"
18
#include "exynos_drm_iommu.h"
19 20

static int lowlevel_buffer_allocate(struct drm_device *dev,
21
		unsigned int flags, struct exynos_drm_gem_buf *buf)
22
{
23
	int ret = 0;
24
	enum dma_attr attr;
25
	unsigned int nr_pages;
26

27 28
	DRM_DEBUG_KMS("%s\n", __FILE__);

29 30 31 32 33
	if (buf->dma_addr) {
		DRM_DEBUG_KMS("already allocated.\n");
		return 0;
	}

34 35
	init_dma_attrs(&buf->dma_attrs);

36 37 38 39 40 41 42 43 44 45 46 47 48
	/*
	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
	 * region will be allocated else physically contiguous
	 * as possible.
	 */
	if (flags & EXYNOS_BO_CONTIG)
		dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);

	/*
	 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
	 * else cachable mapping.
	 */
	if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
49
		attr = DMA_ATTR_WRITE_COMBINE;
50 51
	else
		attr = DMA_ATTR_NON_CONSISTENT;
52 53

	dma_set_attr(attr, &buf->dma_attrs);
54
	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
55

56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
	nr_pages = buf->size >> PAGE_SHIFT;

	if (!is_drm_iommu_supported(dev)) {
		dma_addr_t start_addr;
		unsigned int i = 0;

		buf->pages = kzalloc(sizeof(struct page) * nr_pages,
					GFP_KERNEL);
		if (!buf->pages) {
			DRM_ERROR("failed to allocate pages.\n");
			return -ENOMEM;
		}

		buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
					&buf->dma_addr, GFP_KERNEL,
					&buf->dma_attrs);
		if (!buf->kvaddr) {
			DRM_ERROR("failed to allocate buffer.\n");
			kfree(buf->pages);
			return -ENOMEM;
		}

		start_addr = buf->dma_addr;
		while (i < nr_pages) {
			buf->pages[i] = phys_to_page(start_addr);
			start_addr += PAGE_SIZE;
			i++;
		}
	} else {

		buf->pages = dma_alloc_attrs(dev->dev, buf->size,
					&buf->dma_addr, GFP_KERNEL,
					&buf->dma_attrs);
		if (!buf->pages) {
			DRM_ERROR("failed to allocate buffer.\n");
			return -ENOMEM;
		}
93 94
	}

95
	buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
96
	if (!buf->sgt) {
97
		DRM_ERROR("failed to get sg table.\n");
98 99
		ret = -ENOMEM;
		goto err_free_attrs;
100 101
	}

102
	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
103 104 105 106
			(unsigned long)buf->dma_addr,
			buf->size);

	return ret;
107 108

err_free_attrs:
109
	dma_free_attrs(dev->dev, buf->size, buf->pages,
110 111
			(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
	buf->dma_addr = (dma_addr_t)NULL;
112

113 114 115
	if (!is_drm_iommu_supported(dev))
		kfree(buf->pages);

116
	return ret;
117 118 119
}

static void lowlevel_buffer_deallocate(struct drm_device *dev,
120
		unsigned int flags, struct exynos_drm_gem_buf *buf)
121 122 123
{
	DRM_DEBUG_KMS("%s.\n", __FILE__);

124 125 126 127 128
	if (!buf->dma_addr) {
		DRM_DEBUG_KMS("dma_addr is invalid.\n");
		return;
	}

129
	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
130 131 132 133 134 135 136 137
			(unsigned long)buf->dma_addr,
			buf->size);

	sg_free_table(buf->sgt);

	kfree(buf->sgt);
	buf->sgt = NULL;

138 139 140 141 142 143
	if (!is_drm_iommu_supported(dev)) {
		dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
				(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
		kfree(buf->pages);
	} else
		dma_free_attrs(dev->dev, buf->size, buf->pages,
144
				(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
145

146
	buf->dma_addr = (dma_addr_t)NULL;
147 148
}

149 150
struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
						unsigned int size)
151
{
I
Inki Dae 已提交
152
	struct exynos_drm_gem_buf *buffer;
153 154

	DRM_DEBUG_KMS("%s.\n", __FILE__);
I
Inki Dae 已提交
155
	DRM_DEBUG_KMS("desired size = 0x%x\n", size);
156

I
Inki Dae 已提交
157 158 159
	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
	if (!buffer) {
		DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
J
Joonyoung Shim 已提交
160
		return NULL;
161 162
	}

I
Inki Dae 已提交
163 164
	buffer->size = size;
	return buffer;
165 166
}

167 168
void exynos_drm_fini_buf(struct drm_device *dev,
				struct exynos_drm_gem_buf *buffer)
169 170 171
{
	DRM_DEBUG_KMS("%s.\n", __FILE__);

I
Inki Dae 已提交
172 173
	if (!buffer) {
		DRM_DEBUG_KMS("buffer is null.\n");
174 175 176
		return;
	}

I
Inki Dae 已提交
177 178
	kfree(buffer);
	buffer = NULL;
179 180
}

181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
int exynos_drm_alloc_buf(struct drm_device *dev,
		struct exynos_drm_gem_buf *buf, unsigned int flags)
{

	/*
	 * allocate memory region and set the memory information
	 * to vaddr and dma_addr of a buffer object.
	 */
	if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
		return -ENOMEM;

	return 0;
}

void exynos_drm_free_buf(struct drm_device *dev,
		unsigned int flags, struct exynos_drm_gem_buf *buffer)
{

	lowlevel_buffer_deallocate(dev, flags, buffer);
}